]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
New upstream version 1.60.0+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
CommitLineData
29967ef6
XL
1//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2//! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4mod cpuid;
5mod llvm;
6mod simd;
7
8pub(crate) use cpuid::codegen_cpuid_call;
9pub(crate) use llvm::codegen_llvm_intrinsic_call;
10
17df50a5
XL
11use rustc_middle::ty::print::with_no_trimmed_paths;
12use rustc_span::symbol::{kw, sym};
13
29967ef6 14use crate::prelude::*;
6a06907d 15use cranelift_codegen::ir::AtomicRmwOp;
29967ef6
XL
16
17macro intrinsic_pat {
18 (_) => {
19 _
20 },
21 ($name:ident) => {
17df50a5
XL
22 sym::$name
23 },
24 (kw.$name:ident) => {
25 kw::$name
29967ef6
XL
26 },
27 ($name:literal) => {
17df50a5 28 $name
29967ef6 29 },
29967ef6
XL
30}
31
32macro intrinsic_arg {
33 (o $fx:expr, $arg:ident) => {
34 $arg
35 },
36 (c $fx:expr, $arg:ident) => {
37 codegen_operand($fx, $arg)
38 },
39 (v $fx:expr, $arg:ident) => {
40 codegen_operand($fx, $arg).load_scalar($fx)
41 }
42}
43
44macro intrinsic_substs {
45 ($substs:expr, $index:expr,) => {},
46 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
47 let $first = $substs.type_at($index);
48 intrinsic_substs!($substs, $index+1, $($rest),*);
49 }
50}
51
52macro intrinsic_match {
53 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
54 _ => $unknown:block;
55 $(
56 $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
57 )*) => {
58 let _ = $substs; // Silence warning when substs is unused.
59 match $intrinsic {
60 $(
61 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
62 #[allow(unused_parens, non_snake_case)]
63 {
64 $(
65 intrinsic_substs!($substs, 0, $($subst),*);
66 )?
67 if let [$($arg),*] = $args {
68 let ($($arg,)*) = (
69 $(intrinsic_arg!($a $fx, $arg),)*
70 );
71 #[warn(unused_parens, non_snake_case)]
72 {
73 $content
74 }
75 } else {
76 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
77 }
78 }
79 }
80 )*
81 _ => $unknown,
82 }
83 }
84}
85
86macro call_intrinsic_match {
87 ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
88 $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
89 )*) => {
90 match $intrinsic {
91 $(
17df50a5 92 sym::$name => {
5099ac24 93 assert!($substs.is_empty());
29967ef6
XL
94 if let [$(ref $arg),*] = *$args {
95 let ($($arg,)*) = (
96 $(codegen_operand($fx, $arg),)*
97 );
98 let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
99 $ret.write_cvalue($fx, res);
100
101 if let Some((_, dest)) = $destination {
102 let ret_block = $fx.get_block(dest);
103 $fx.bcx.ins().jump(ret_block, &[]);
104 return;
105 } else {
106 unreachable!();
107 }
108 } else {
109 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
110 }
111 }
112 )*
113 _ => {}
114 }
115 }
116}
117
29967ef6
XL
118macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
119 match $ty.kind() {
fc512014 120 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
29967ef6
XL
121 _ => {
122 $fx.tcx.sess.span_err(
123 $span,
124 &format!(
fc512014 125 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
29967ef6
XL
126 $intrinsic, $ty
127 ),
128 );
129 // Prevent verifier error
130 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
131 return;
132 }
133 }
134}
135
136macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
137 if !$ty.is_simd() {
138 $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
139 // Prevent verifier error
140 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
141 return;
142 }
143}
144
29967ef6 145pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
c295e0f8
XL
146 let (element, count) = match layout.abi {
147 Abi::Vector { element, count } => (element, count),
29967ef6
XL
148 _ => unreachable!(),
149 };
150
151 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
152 // Cranelift currently only implements icmp for 128bit vectors.
153 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
154 _ => None,
155 }
156}
157
6a06907d
XL
158fn simd_for_each_lane<'tcx>(
159 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
160 val: CValue<'tcx>,
161 ret: CPlace<'tcx>,
162 f: impl Fn(
6a06907d 163 &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
164 TyAndLayout<'tcx>,
165 TyAndLayout<'tcx>,
166 Value,
167 ) -> CValue<'tcx>,
168) {
169 let layout = val.layout();
170
5869c6ff
XL
171 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
172 let lane_layout = fx.layout_of(lane_ty);
173 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
174 let ret_lane_layout = fx.layout_of(ret_lane_ty);
29967ef6
XL
175 assert_eq!(lane_count, ret_lane_count);
176
177 for lane_idx in 0..lane_count {
94222f64 178 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
29967ef6
XL
179
180 let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
181
94222f64 182 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
29967ef6
XL
183 }
184}
185
6a06907d
XL
186fn simd_pair_for_each_lane<'tcx>(
187 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
188 x: CValue<'tcx>,
189 y: CValue<'tcx>,
190 ret: CPlace<'tcx>,
191 f: impl Fn(
6a06907d 192 &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
193 TyAndLayout<'tcx>,
194 TyAndLayout<'tcx>,
195 Value,
196 Value,
197 ) -> CValue<'tcx>,
198) {
199 assert_eq!(x.layout(), y.layout());
200 let layout = x.layout();
201
5869c6ff
XL
202 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
203 let lane_layout = fx.layout_of(lane_ty);
204 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
205 let ret_lane_layout = fx.layout_of(ret_lane_ty);
29967ef6
XL
206 assert_eq!(lane_count, ret_lane_count);
207
94222f64
XL
208 for lane_idx in 0..lane_count {
209 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
210 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
29967ef6
XL
211
212 let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
213
94222f64 214 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
29967ef6
XL
215 }
216}
217
6a06907d
XL
218fn simd_reduce<'tcx>(
219 fx: &mut FunctionCx<'_, '_, 'tcx>,
fc512014 220 val: CValue<'tcx>,
94222f64 221 acc: Option<Value>,
fc512014 222 ret: CPlace<'tcx>,
6a06907d 223 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
fc512014 224) {
5869c6ff
XL
225 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
226 let lane_layout = fx.layout_of(lane_ty);
fc512014
XL
227 assert_eq!(lane_layout, ret.layout());
228
94222f64
XL
229 let (mut res_val, start_lane) =
230 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
231 for lane_idx in start_lane..lane_count {
232 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
fc512014
XL
233 res_val = f(fx, lane_layout, res_val, lane);
234 }
235 let res = CValue::by_val(res_val, lane_layout);
236 ret.write_cvalue(fx, res);
237}
238
94222f64 239// FIXME move all uses to `simd_reduce`
6a06907d
XL
240fn simd_reduce_bool<'tcx>(
241 fx: &mut FunctionCx<'_, '_, 'tcx>,
fc512014
XL
242 val: CValue<'tcx>,
243 ret: CPlace<'tcx>,
6a06907d 244 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
fc512014 245) {
5869c6ff 246 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
fc512014
XL
247 assert!(ret.layout().ty.is_bool());
248
94222f64 249 let res_val = val.value_lane(fx, 0).load_scalar(fx);
fc512014
XL
250 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
251 for lane_idx in 1..lane_count {
94222f64 252 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
fc512014
XL
253 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
254 res_val = f(fx, res_val, lane);
255 }
94222f64
XL
256 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
257 fx.bcx.ins().ireduce(types::I8, res_val)
258 } else {
259 res_val
260 };
fc512014
XL
261 let res = CValue::by_val(res_val, ret.layout());
262 ret.write_cvalue(fx, res);
263}
264
29967ef6 265fn bool_to_zero_or_max_uint<'tcx>(
6a06907d 266 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
267 layout: TyAndLayout<'tcx>,
268 val: Value,
269) -> CValue<'tcx> {
270 let ty = fx.clif_type(layout.ty).unwrap();
271
272 let int_ty = match ty {
273 types::F32 => types::I32,
274 types::F64 => types::I64,
275 ty => ty,
276 };
277
278 let val = fx.bcx.ins().bint(int_ty, val);
279 let mut res = fx.bcx.ins().ineg(val);
280
281 if ty.is_float() {
282 res = fx.bcx.ins().bitcast(ty, res);
283 }
284
285 CValue::by_val(res, layout)
286}
287
288macro simd_cmp {
fc512014 289 ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
29967ef6
XL
290 let vector_ty = clif_vector_type($fx.tcx, $x.layout());
291
292 if let Some(vector_ty) = vector_ty {
293 let x = $x.load_scalar($fx);
294 let y = $y.load_scalar($fx);
94222f64
XL
295 let val = if vector_ty.lane_type().is_float() {
296 $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
297 } else {
298 $fx.bcx.ins().icmp(IntCC::$cc, x, y)
299 };
29967ef6
XL
300
301 // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
302 let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
303
304 $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
305 } else {
306 simd_pair_for_each_lane(
307 $fx,
308 $x,
309 $y,
310 $ret,
311 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
312 let res_lane = match lane_layout.ty.kind() {
313 ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
fc512014 314 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
29967ef6
XL
315 _ => unreachable!("{:?}", lane_layout.ty),
316 };
317 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
318 },
319 );
320 }
321 },
fc512014 322 ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
29967ef6
XL
323 // FIXME use vector icmp when possible
324 simd_pair_for_each_lane(
325 $fx,
326 $x,
327 $y,
328 $ret,
329 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
330 let res_lane = match lane_layout.ty.kind() {
331 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
332 ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
fc512014 333 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
29967ef6
XL
334 _ => unreachable!("{:?}", lane_layout.ty),
335 };
336 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
337 },
338 );
339 },
340}
341
342macro simd_int_binop {
343 ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
344 simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
345 },
346 ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
347 simd_pair_for_each_lane(
348 $fx,
349 $x,
350 $y,
351 $ret,
352 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
353 let res_lane = match lane_layout.ty.kind() {
354 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
355 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
356 _ => unreachable!("{:?}", lane_layout.ty),
357 };
358 CValue::by_val(res_lane, ret_lane_layout)
359 },
360 );
361 },
362}
363
364macro simd_int_flt_binop {
365 ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
366 simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
367 },
368 ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
369 simd_pair_for_each_lane(
370 $fx,
371 $x,
372 $y,
373 $ret,
374 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
375 let res_lane = match lane_layout.ty.kind() {
376 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
377 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
378 ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
379 _ => unreachable!("{:?}", lane_layout.ty),
380 };
381 CValue::by_val(res_lane, ret_lane_layout)
382 },
383 );
384 },
385}
386
387macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
388 simd_pair_for_each_lane(
389 $fx,
390 $x,
391 $y,
392 $ret,
393 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
394 let res_lane = match lane_layout.ty.kind() {
395 ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
396 _ => unreachable!("{:?}", lane_layout.ty),
397 };
398 CValue::by_val(res_lane, ret_lane_layout)
399 },
400 );
401}
402
403pub(crate) fn codegen_intrinsic_call<'tcx>(
6a06907d 404 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
405 instance: Instance<'tcx>,
406 args: &[mir::Operand<'tcx>],
407 destination: Option<(CPlace<'tcx>, BasicBlock)>,
408 span: Span,
409) {
c295e0f8 410 let intrinsic = fx.tcx.item_name(instance.def_id());
29967ef6
XL
411 let substs = instance.substs;
412
29967ef6
XL
413 let ret = match destination {
414 Some((place, _)) => place,
415 None => {
416 // Insert non returning intrinsics here
417 match intrinsic {
17df50a5 418 sym::abort => {
29967ef6
XL
419 trap_abort(fx, "Called intrinsic::abort.");
420 }
17df50a5 421 sym::transmute => {
29967ef6
XL
422 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
423 }
424 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
425 }
426 return;
427 }
428 };
429
17df50a5 430 if intrinsic.as_str().starts_with("simd_") {
29967ef6
XL
431 self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
432 let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
433 fx.bcx.ins().jump(ret_block, &[]);
434 return;
435 }
436
437 let usize_layout = fx.layout_of(fx.tcx.types.usize);
438
439 call_intrinsic_match! {
440 fx, intrinsic, substs, ret, destination, args,
441 expf32(flt) -> f32 => expf,
442 expf64(flt) -> f64 => exp,
443 exp2f32(flt) -> f32 => exp2f,
444 exp2f64(flt) -> f64 => exp2,
445 sqrtf32(flt) -> f32 => sqrtf,
446 sqrtf64(flt) -> f64 => sqrt,
447 powif32(a, x) -> f32 => __powisf2, // compiler-builtins
448 powif64(a, x) -> f64 => __powidf2, // compiler-builtins
449 powf32(a, x) -> f32 => powf,
450 powf64(a, x) -> f64 => pow,
451 logf32(flt) -> f32 => logf,
452 logf64(flt) -> f64 => log,
453 log2f32(flt) -> f32 => log2f,
454 log2f64(flt) -> f64 => log2,
455 log10f32(flt) -> f32 => log10f,
456 log10f64(flt) -> f64 => log10,
457 fabsf32(flt) -> f32 => fabsf,
458 fabsf64(flt) -> f64 => fabs,
459 fmaf32(x, y, z) -> f32 => fmaf,
460 fmaf64(x, y, z) -> f64 => fma,
461 copysignf32(x, y) -> f32 => copysignf,
462 copysignf64(x, y) -> f64 => copysign,
463
464 // rounding variants
465 // FIXME use clif insts
466 floorf32(flt) -> f32 => floorf,
467 floorf64(flt) -> f64 => floor,
468 ceilf32(flt) -> f32 => ceilf,
469 ceilf64(flt) -> f64 => ceil,
470 truncf32(flt) -> f32 => truncf,
471 truncf64(flt) -> f64 => trunc,
472 roundf32(flt) -> f32 => roundf,
473 roundf64(flt) -> f64 => round,
474
475 // trigonometry
476 sinf32(flt) -> f32 => sinf,
477 sinf64(flt) -> f64 => sin,
478 cosf32(flt) -> f32 => cosf,
479 cosf64(flt) -> f64 => cos,
29967ef6
XL
480 }
481
482 intrinsic_match! {
483 fx, intrinsic, substs, args,
484 _ => {
485 fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
486 };
487
488 assume, (c _a) {};
489 likely | unlikely, (c a) {
490 ret.write_cvalue(fx, a);
491 };
492 breakpoint, () {
493 fx.bcx.ins().debugtrap();
494 };
495 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
496 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
29967ef6 497 assert_eq!(args.len(), 3);
fc512014
XL
498 let byte_amount = if elem_size != 1 {
499 fx.bcx.ins().imul_imm(count, elem_size as i64)
500 } else {
501 count
502 };
29967ef6 503
17df50a5 504 if intrinsic == sym::copy_nonoverlapping {
29967ef6 505 // FIXME emit_small_memcpy
a2a8927a 506 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
29967ef6
XL
507 } else {
508 // FIXME emit_small_memmove
a2a8927a 509 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
29967ef6
XL
510 }
511 };
512 // NOTE: the volatile variants have src and dst swapped
513 volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
514 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
29967ef6 515 assert_eq!(args.len(), 3);
fc512014
XL
516 let byte_amount = if elem_size != 1 {
517 fx.bcx.ins().imul_imm(count, elem_size as i64)
518 } else {
519 count
520 };
29967ef6
XL
521
522 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
17df50a5 523 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
29967ef6 524 // FIXME emit_small_memcpy
a2a8927a 525 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
29967ef6
XL
526 } else {
527 // FIXME emit_small_memmove
a2a8927a 528 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
29967ef6
XL
529 }
530 };
29967ef6
XL
531 size_of_val, <T> (c ptr) {
532 let layout = fx.layout_of(T);
533 let size = if layout.is_unsized() {
534 let (_ptr, info) = ptr.load_scalar_pair(fx);
535 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
536 size
537 } else {
538 fx
539 .bcx
540 .ins()
541 .iconst(fx.pointer_type, layout.size.bytes() as i64)
542 };
543 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
544 };
545 min_align_of_val, <T> (c ptr) {
546 let layout = fx.layout_of(T);
547 let align = if layout.is_unsized() {
548 let (_ptr, info) = ptr.load_scalar_pair(fx);
549 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
550 align
551 } else {
552 fx
553 .bcx
554 .ins()
555 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
556 };
557 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
558 };
559
17df50a5
XL
560 unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
561 | unchecked_shl | unchecked_shr, (c x, c y) {
29967ef6
XL
562 // FIXME trap on overflow
563 let bin_op = match intrinsic {
17df50a5
XL
564 sym::unchecked_add => BinOp::Add,
565 sym::unchecked_sub => BinOp::Sub,
566 sym::unchecked_div | sym::exact_div => BinOp::Div,
567 sym::unchecked_rem => BinOp::Rem,
568 sym::unchecked_shl => BinOp::Shl,
569 sym::unchecked_shr => BinOp::Shr,
570 _ => unreachable!(),
29967ef6
XL
571 };
572 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
573 ret.write_cvalue(fx, res);
574 };
17df50a5 575 add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
29967ef6
XL
576 assert_eq!(x.layout().ty, y.layout().ty);
577 let bin_op = match intrinsic {
17df50a5
XL
578 sym::add_with_overflow => BinOp::Add,
579 sym::sub_with_overflow => BinOp::Sub,
580 sym::mul_with_overflow => BinOp::Mul,
581 _ => unreachable!(),
29967ef6
XL
582 };
583
584 let res = crate::num::codegen_checked_int_binop(
585 fx,
586 bin_op,
587 x,
588 y,
589 );
590 ret.write_cvalue(fx, res);
591 };
17df50a5 592 saturating_add | saturating_sub, <T> (c lhs, c rhs) {
29967ef6
XL
593 assert_eq!(lhs.layout().ty, rhs.layout().ty);
594 let bin_op = match intrinsic {
17df50a5
XL
595 sym::saturating_add => BinOp::Add,
596 sym::saturating_sub => BinOp::Sub,
597 _ => unreachable!(),
29967ef6
XL
598 };
599
600 let signed = type_sign(T);
601
602 let checked_res = crate::num::codegen_checked_int_binop(
603 fx,
604 bin_op,
605 lhs,
606 rhs,
607 );
608
609 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
610 let clif_ty = fx.clif_type(T).unwrap();
611
29967ef6
XL
612 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
613
614 let val = match (intrinsic, signed) {
17df50a5
XL
615 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
616 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
617 (sym::saturating_add, true) => {
29967ef6
XL
618 let rhs = rhs.load_scalar(fx);
619 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
620 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
621 fx.bcx.ins().select(has_overflow, sat_val, val)
622 }
17df50a5 623 (sym::saturating_sub, true) => {
29967ef6
XL
624 let rhs = rhs.load_scalar(fx);
625 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
626 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
627 fx.bcx.ins().select(has_overflow, sat_val, val)
628 }
629 _ => unreachable!(),
630 };
631
632 let res = CValue::by_val(val, fx.layout_of(T));
633
634 ret.write_cvalue(fx, res);
635 };
636 rotate_left, <T>(v x, v y) {
637 let layout = fx.layout_of(T);
638 let res = fx.bcx.ins().rotl(x, y);
639 ret.write_cvalue(fx, CValue::by_val(res, layout));
640 };
641 rotate_right, <T>(v x, v y) {
642 let layout = fx.layout_of(T);
643 let res = fx.bcx.ins().rotr(x, y);
644 ret.write_cvalue(fx, CValue::by_val(res, layout));
645 };
646
647 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
648 // doesn't have UB both are codegen'ed the same way
649 offset | arith_offset, (c base, v offset) {
650 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
651 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
fc512014
XL
652 let ptr_diff = if pointee_size != 1 {
653 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
654 } else {
655 offset
656 };
29967ef6
XL
657 let base_val = base.load_scalar(fx);
658 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
659 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
660 };
661
662 transmute, (c from) {
663 ret.write_cvalue_transmute(fx, from);
664 };
665 write_bytes | volatile_set_memory, (c dst, v val, v count) {
666 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
667 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
fc512014
XL
668 let count = if pointee_size != 1 {
669 fx.bcx.ins().imul_imm(count, pointee_size as i64)
670 } else {
671 count
672 };
29967ef6
XL
673 let dst_ptr = dst.load_scalar(fx);
674 // FIXME make the memset actually volatile when switching to emit_small_memset
675 // FIXME use emit_small_memset
a2a8927a 676 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
29967ef6
XL
677 };
678 ctlz | ctlz_nonzero, <T> (v arg) {
679 // FIXME trap on `ctlz_nonzero` with zero arg.
94222f64 680 let res = fx.bcx.ins().clz(arg);
29967ef6
XL
681 let res = CValue::by_val(res, fx.layout_of(T));
682 ret.write_cvalue(fx, res);
683 };
684 cttz | cttz_nonzero, <T> (v arg) {
685 // FIXME trap on `cttz_nonzero` with zero arg.
94222f64 686 let res = fx.bcx.ins().ctz(arg);
29967ef6
XL
687 let res = CValue::by_val(res, fx.layout_of(T));
688 ret.write_cvalue(fx, res);
689 };
690 ctpop, <T> (v arg) {
691 let res = fx.bcx.ins().popcnt(arg);
692 let res = CValue::by_val(res, fx.layout_of(T));
693 ret.write_cvalue(fx, res);
694 };
695 bitreverse, <T> (v arg) {
696 let res = fx.bcx.ins().bitrev(arg);
697 let res = CValue::by_val(res, fx.layout_of(T));
698 ret.write_cvalue(fx, res);
699 };
700 bswap, <T> (v arg) {
701 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
702 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
703 match bcx.func.dfg.value_type(v) {
704 types::I8 => v,
705
706 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
707 types::I16 => {
708 let tmp1 = bcx.ins().ishl_imm(v, 8);
709 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
710
711 let tmp2 = bcx.ins().ushr_imm(v, 8);
712 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
713
714 bcx.ins().bor(n1, n2)
715 }
716 types::I32 => {
717 let tmp1 = bcx.ins().ishl_imm(v, 24);
718 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
719
720 let tmp2 = bcx.ins().ishl_imm(v, 8);
721 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
722
723 let tmp3 = bcx.ins().ushr_imm(v, 8);
724 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
725
726 let tmp4 = bcx.ins().ushr_imm(v, 24);
727 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
728
729 let or_tmp1 = bcx.ins().bor(n1, n2);
730 let or_tmp2 = bcx.ins().bor(n3, n4);
731 bcx.ins().bor(or_tmp1, or_tmp2)
732 }
733 types::I64 => {
734 let tmp1 = bcx.ins().ishl_imm(v, 56);
735 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
736
737 let tmp2 = bcx.ins().ishl_imm(v, 40);
738 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
739
740 let tmp3 = bcx.ins().ishl_imm(v, 24);
741 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
742
743 let tmp4 = bcx.ins().ishl_imm(v, 8);
744 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
745
746 let tmp5 = bcx.ins().ushr_imm(v, 8);
747 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
748
749 let tmp6 = bcx.ins().ushr_imm(v, 24);
750 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
751
752 let tmp7 = bcx.ins().ushr_imm(v, 40);
753 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
754
755 let tmp8 = bcx.ins().ushr_imm(v, 56);
756 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
757
758 let or_tmp1 = bcx.ins().bor(n1, n2);
759 let or_tmp2 = bcx.ins().bor(n3, n4);
760 let or_tmp3 = bcx.ins().bor(n5, n6);
761 let or_tmp4 = bcx.ins().bor(n7, n8);
762
763 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
764 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
765 bcx.ins().bor(or_tmp5, or_tmp6)
766 }
767 types::I128 => {
768 let (lo, hi) = bcx.ins().isplit(v);
769 let lo = swap(bcx, lo);
770 let hi = swap(bcx, hi);
771 bcx.ins().iconcat(hi, lo)
772 }
773 ty => unreachable!("bswap {}", ty),
774 }
5869c6ff 775 }
29967ef6
XL
776 let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
777 ret.write_cvalue(fx, res);
778 };
779 assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
780 let layout = fx.layout_of(T);
781 if layout.abi.is_uninhabited() {
782 with_no_trimmed_paths(|| crate::base::codegen_panic(
783 fx,
784 &format!("attempted to instantiate uninhabited type `{}`", T),
785 span,
786 ));
787 return;
788 }
789
94222f64 790 if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true) {
29967ef6
XL
791 with_no_trimmed_paths(|| crate::base::codegen_panic(
792 fx,
793 &format!("attempted to zero-initialize type `{}`, which is invalid", T),
794 span,
795 ));
796 return;
797 }
798
94222f64 799 if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false) {
29967ef6
XL
800 with_no_trimmed_paths(|| crate::base::codegen_panic(
801 fx,
802 &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
803 span,
804 ));
805 return;
806 }
807 };
808
809 volatile_load | unaligned_volatile_load, (c ptr) {
810 // Cranelift treats loads as volatile by default
29967ef6
XL
811 // FIXME correctly handle unaligned_volatile_load
812 let inner_layout =
813 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
814 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
815 ret.write_cvalue(fx, val);
816 };
817 volatile_store | unaligned_volatile_store, (v ptr, c val) {
818 // Cranelift treats stores as volatile by default
29967ef6
XL
819 // FIXME correctly handle unaligned_volatile_store
820 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
821 dest.write_cvalue(fx, val);
822 };
823
c295e0f8 824 pref_align_of | needs_drop | type_id | type_name | variant_count, () {
29967ef6
XL
825 let const_val =
826 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
827 let val = crate::constant::codegen_const_value(
828 fx,
829 const_val,
830 ret.layout().ty,
831 );
832 ret.write_cvalue(fx, val);
833 };
834
835 ptr_offset_from, <T> (v ptr, v base) {
836 let isize_layout = fx.layout_of(fx.tcx.types.isize);
837
838 let pointee_size: u64 = fx.layout_of(T).size.bytes();
839 let diff = fx.bcx.ins().isub(ptr, base);
840 // FIXME this can be an exact division.
841 let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
842 ret.write_cvalue(fx, val);
843 };
844
845 ptr_guaranteed_eq, (c a, c b) {
846 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
847 ret.write_cvalue(fx, val);
848 };
849
850 ptr_guaranteed_ne, (c a, c b) {
851 let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
852 ret.write_cvalue(fx, val);
853 };
854
855 caller_location, () {
856 let caller_location = fx.get_caller_location(span);
857 ret.write_cvalue(fx, caller_location);
858 };
859
17df50a5 860 _ if intrinsic.as_str().starts_with("atomic_fence"), () {
6a06907d 861 fx.bcx.ins().fence();
29967ef6 862 };
17df50a5 863 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
6a06907d
XL
864 // FIXME use a compiler fence once Cranelift supports it
865 fx.bcx.ins().fence();
29967ef6 866 };
17df50a5 867 _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
6a06907d
XL
868 validate_atomic_type!(fx, intrinsic, span, T);
869 let ty = fx.clif_type(T).unwrap();
29967ef6 870
6a06907d 871 let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
29967ef6 872
6a06907d
XL
873 let val = CValue::by_val(val, fx.layout_of(T));
874 ret.write_cvalue(fx, val);
29967ef6 875 };
17df50a5 876 _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
29967ef6
XL
877 validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
878
6a06907d 879 let val = val.load_scalar(fx);
29967ef6 880
6a06907d 881 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
29967ef6 882 };
17df50a5 883 _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
6a06907d
XL
884 let layout = new.layout();
885 validate_atomic_type!(fx, intrinsic, span, layout.ty);
886 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6 887
6a06907d 888 let new = new.load_scalar(fx);
29967ef6 889
6a06907d 890 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
29967ef6 891
6a06907d
XL
892 let old = CValue::by_val(old, layout);
893 ret.write_cvalue(fx, old);
29967ef6 894 };
17df50a5 895 _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
6a06907d
XL
896 let layout = new.layout();
897 validate_atomic_type!(fx, intrinsic, span, layout.ty);
29967ef6
XL
898
899 let test_old = test_old.load_scalar(fx);
900 let new = new.load_scalar(fx);
901
6a06907d 902 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
29967ef6 903 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
29967ef6
XL
904
905 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
6a06907d 906 ret.write_cvalue(fx, ret_val)
29967ef6
XL
907 };
908
17df50a5 909 _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
6a06907d
XL
910 let layout = amount.layout();
911 validate_atomic_type!(fx, intrinsic, span, layout.ty);
912 let ty = fx.clif_type(layout.ty).unwrap();
913
29967ef6 914 let amount = amount.load_scalar(fx);
6a06907d
XL
915
916 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
917
918 let old = CValue::by_val(old, layout);
919 ret.write_cvalue(fx, old);
29967ef6 920 };
17df50a5 921 _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
6a06907d
XL
922 let layout = amount.layout();
923 validate_atomic_type!(fx, intrinsic, span, layout.ty);
924 let ty = fx.clif_type(layout.ty).unwrap();
925
29967ef6 926 let amount = amount.load_scalar(fx);
6a06907d
XL
927
928 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
929
930 let old = CValue::by_val(old, layout);
931 ret.write_cvalue(fx, old);
29967ef6 932 };
17df50a5 933 _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
6a06907d
XL
934 let layout = src.layout();
935 validate_atomic_type!(fx, intrinsic, span, layout.ty);
936 let ty = fx.clif_type(layout.ty).unwrap();
937
29967ef6 938 let src = src.load_scalar(fx);
6a06907d
XL
939
940 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
941
942 let old = CValue::by_val(old, layout);
943 ret.write_cvalue(fx, old);
29967ef6 944 };
17df50a5 945 _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
6a06907d
XL
946 let layout = src.layout();
947 validate_atomic_type!(fx, intrinsic, span, layout.ty);
948 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6
XL
949
950 let src = src.load_scalar(fx);
951
6a06907d 952 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
29967ef6 953
6a06907d
XL
954 let old = CValue::by_val(old, layout);
955 ret.write_cvalue(fx, old);
29967ef6 956 };
17df50a5 957 _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
6a06907d
XL
958 let layout = src.layout();
959 validate_atomic_type!(fx, intrinsic, span, layout.ty);
960 let ty = fx.clif_type(layout.ty).unwrap();
961
29967ef6 962 let src = src.load_scalar(fx);
6a06907d
XL
963
964 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
965
966 let old = CValue::by_val(old, layout);
967 ret.write_cvalue(fx, old);
29967ef6 968 };
17df50a5 969 _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
6a06907d
XL
970 let layout = src.layout();
971 validate_atomic_type!(fx, intrinsic, span, layout.ty);
972 let ty = fx.clif_type(layout.ty).unwrap();
973
29967ef6 974 let src = src.load_scalar(fx);
6a06907d
XL
975
976 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
977
978 let old = CValue::by_val(old, layout);
979 ret.write_cvalue(fx, old);
29967ef6 980 };
17df50a5 981 _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
6a06907d
XL
982 let layout = src.layout();
983 validate_atomic_type!(fx, intrinsic, span, layout.ty);
984 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6 985
29967ef6 986 let src = src.load_scalar(fx);
6a06907d
XL
987
988 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
989
990 let old = CValue::by_val(old, layout);
991 ret.write_cvalue(fx, old);
29967ef6 992 };
17df50a5 993 _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
6a06907d
XL
994 let layout = src.layout();
995 validate_atomic_type!(fx, intrinsic, span, layout.ty);
996 let ty = fx.clif_type(layout.ty).unwrap();
997
29967ef6 998 let src = src.load_scalar(fx);
6a06907d
XL
999
1000 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1001
1002 let old = CValue::by_val(old, layout);
1003 ret.write_cvalue(fx, old);
29967ef6 1004 };
17df50a5 1005 _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
6a06907d
XL
1006 let layout = src.layout();
1007 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1008 let ty = fx.clif_type(layout.ty).unwrap();
1009
29967ef6 1010 let src = src.load_scalar(fx);
6a06907d
XL
1011
1012 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1013
1014 let old = CValue::by_val(old, layout);
1015 ret.write_cvalue(fx, old);
29967ef6 1016 };
17df50a5 1017 _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
6a06907d
XL
1018 let layout = src.layout();
1019 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1020 let ty = fx.clif_type(layout.ty).unwrap();
1021
29967ef6 1022 let src = src.load_scalar(fx);
6a06907d
XL
1023
1024 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1025
1026 let old = CValue::by_val(old, layout);
1027 ret.write_cvalue(fx, old);
29967ef6
XL
1028 };
1029
94222f64
XL
1030 // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
1031 // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
1032 // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
1033 // a float against itself. Only in case of NaN is it not equal to itself.
29967ef6 1034 minnumf32, (v a, v b) {
94222f64
XL
1035 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1036 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1037 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1038 let val = fx.bcx.ins().select(a_is_nan, b, temp);
29967ef6
XL
1039 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1040 ret.write_cvalue(fx, val);
1041 };
1042 minnumf64, (v a, v b) {
94222f64
XL
1043 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1044 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1045 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1046 let val = fx.bcx.ins().select(a_is_nan, b, temp);
29967ef6
XL
1047 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1048 ret.write_cvalue(fx, val);
1049 };
1050 maxnumf32, (v a, v b) {
94222f64
XL
1051 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1052 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1053 let temp = fx.bcx.ins().select(a_le_b, b, a);
1054 let val = fx.bcx.ins().select(a_is_nan, b, temp);
29967ef6
XL
1055 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1056 ret.write_cvalue(fx, val);
1057 };
1058 maxnumf64, (v a, v b) {
94222f64
XL
1059 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1060 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1061 let temp = fx.bcx.ins().select(a_le_b, b, a);
1062 let val = fx.bcx.ins().select(a_is_nan, b, temp);
29967ef6
XL
1063 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1064 ret.write_cvalue(fx, val);
1065 };
1066
17df50a5 1067 kw.Try, (v f, v data, v _catch_fn) {
29967ef6
XL
1068 // FIXME once unwinding is supported, change this to actually catch panics
1069 let f_sig = fx.bcx.func.import_signature(Signature {
a2a8927a 1070 call_conv: fx.target_config.default_call_conv,
29967ef6
XL
1071 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1072 returns: vec![],
1073 });
1074
1075 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1076
1077 let layout = ret.layout();
1078 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1079 ret.write_cvalue(fx, ret_val);
1080 };
1081
1082 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1083 let res = crate::num::codegen_float_binop(fx, match intrinsic {
17df50a5
XL
1084 sym::fadd_fast => BinOp::Add,
1085 sym::fsub_fast => BinOp::Sub,
1086 sym::fmul_fast => BinOp::Mul,
1087 sym::fdiv_fast => BinOp::Div,
1088 sym::frem_fast => BinOp::Rem,
29967ef6
XL
1089 _ => unreachable!(),
1090 }, x, y);
1091 ret.write_cvalue(fx, res);
1092 };
1093 float_to_int_unchecked, (v f) {
1094 let res = crate::cast::clif_int_or_float_cast(
1095 fx,
1096 f,
1097 false,
1098 fx.clif_type(ret.layout().ty).unwrap(),
1099 type_sign(ret.layout().ty),
1100 );
1101 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1102 };
136023e0
XL
1103
1104 raw_eq, <T>(v lhs_ref, v rhs_ref) {
1105 fn type_by_size(size: Size) -> Option<Type> {
1106 Type::int(size.bits().try_into().ok()?)
1107 }
1108
1109 let size = fx.layout_of(T).layout.size;
94222f64 1110 // FIXME add and use emit_small_memcmp
136023e0
XL
1111 let is_eq_value =
1112 if size == Size::ZERO {
1113 // No bytes means they're trivially equal
1114 fx.bcx.ins().iconst(types::I8, 1)
1115 } else if let Some(clty) = type_by_size(size) {
1116 // Can't use `trusted` for these loads; they could be unaligned.
1117 let mut flags = MemFlags::new();
1118 flags.set_notrap();
1119 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1120 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1121 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1122 fx.bcx.ins().bint(types::I8, eq)
1123 } else {
1124 // Just call `memcmp` (like slices do in core) when the
1125 // size is too large or it's not a power-of-two.
136023e0 1126 let signed_bytes = i64::try_from(size.bytes()).unwrap();
94222f64
XL
1127 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1128 let params = vec![AbiParam::new(fx.pointer_type); 3];
136023e0
XL
1129 let returns = vec![AbiParam::new(types::I32)];
1130 let args = &[lhs_ref, rhs_ref, bytes_val];
1131 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1132 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1133 fx.bcx.ins().bint(types::I8, eq)
1134 };
1135 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1136 };
94222f64
XL
1137
1138 black_box, (c a) {
1139 // FIXME implement black_box semantics
1140 ret.write_cvalue(fx, a);
1141 };
29967ef6
XL
1142 }
1143
1144 if let Some((_, dest)) = destination {
1145 let ret_block = fx.get_block(dest);
1146 fx.bcx.ins().jump(ret_block, &[]);
1147 } else {
1148 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
1149 }
1150}