1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
4 macro_rules
! intrinsic_args
{
5 ($fx
:expr
, $args
:expr
=> ($
($arg
:tt
),*); $intrinsic
:expr
) => {
6 #[allow(unused_parens)]
7 let ($
($arg
),*) = if let [$
($arg
),*] = $args
{
8 ($
(codegen_operand($fx
, $arg
)),*)
10 $
crate::intrinsics
::bug_on_incorrect_arg_count($intrinsic
);
21 pub(crate) use cpuid
::codegen_cpuid_call
;
22 pub(crate) use llvm
::codegen_llvm_intrinsic_call
;
25 use rustc_middle
::ty
::layout
::{HasParamEnv, ValidityRequirement}
;
26 use rustc_middle
::ty
::print
::{with_no_trimmed_paths, with_no_visible_paths}
;
27 use rustc_middle
::ty
::GenericArgsRef
;
28 use rustc_span
::symbol
::{kw, sym, Symbol}
;
30 use crate::prelude
::*;
31 use cranelift_codegen
::ir
::AtomicRmwOp
;
33 fn bug_on_incorrect_arg_count(intrinsic
: impl std
::fmt
::Display
) -> ! {
34 bug
!("wrong number of args for intrinsic {}", intrinsic
);
37 fn report_atomic_type_validation_error
<'tcx
>(
38 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
46 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
50 // Prevent verifier error
51 fx
.bcx
.ins().trap(TrapCode
::UnreachableCodeReached
);
54 pub(crate) fn clif_vector_type
<'tcx
>(tcx
: TyCtxt
<'tcx
>, layout
: TyAndLayout
<'tcx
>) -> Type
{
55 let (element
, count
) = match layout
.abi
{
56 Abi
::Vector { element, count }
=> (element
, count
),
60 scalar_to_clif_type(tcx
, element
).by(u32::try_from(count
).unwrap()).unwrap()
63 fn simd_for_each_lane
<'tcx
>(
64 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
67 f
: &dyn Fn(&mut FunctionCx
<'_
, '_
, 'tcx
>, Ty
<'tcx
>, Ty
<'tcx
>, Value
) -> Value
,
69 let layout
= val
.layout();
71 let (lane_count
, lane_ty
) = layout
.ty
.simd_size_and_type(fx
.tcx
);
72 let lane_layout
= fx
.layout_of(lane_ty
);
73 let (ret_lane_count
, ret_lane_ty
) = ret
.layout().ty
.simd_size_and_type(fx
.tcx
);
74 let ret_lane_layout
= fx
.layout_of(ret_lane_ty
);
75 assert_eq
!(lane_count
, ret_lane_count
);
77 for lane_idx
in 0..lane_count
{
78 let lane
= val
.value_lane(fx
, lane_idx
).load_scalar(fx
);
80 let res_lane
= f(fx
, lane_layout
.ty
, ret_lane_layout
.ty
, lane
);
81 let res_lane
= CValue
::by_val(res_lane
, ret_lane_layout
);
83 ret
.place_lane(fx
, lane_idx
).write_cvalue(fx
, res_lane
);
87 fn simd_pair_for_each_lane_typed
<'tcx
>(
88 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
92 f
: &dyn Fn(&mut FunctionCx
<'_
, '_
, 'tcx
>, CValue
<'tcx
>, CValue
<'tcx
>) -> CValue
<'tcx
>,
94 assert_eq
!(x
.layout(), y
.layout());
95 let layout
= x
.layout();
97 let (lane_count
, _lane_ty
) = layout
.ty
.simd_size_and_type(fx
.tcx
);
98 let (ret_lane_count
, _ret_lane_ty
) = ret
.layout().ty
.simd_size_and_type(fx
.tcx
);
99 assert_eq
!(lane_count
, ret_lane_count
);
101 for lane_idx
in 0..lane_count
{
102 let x_lane
= x
.value_lane(fx
, lane_idx
);
103 let y_lane
= y
.value_lane(fx
, lane_idx
);
105 let res_lane
= f(fx
, x_lane
, y_lane
);
107 ret
.place_lane(fx
, lane_idx
).write_cvalue(fx
, res_lane
);
111 fn simd_pair_for_each_lane
<'tcx
>(
112 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
116 f
: &dyn Fn(&mut FunctionCx
<'_
, '_
, 'tcx
>, Ty
<'tcx
>, Ty
<'tcx
>, Value
, Value
) -> Value
,
118 assert_eq
!(x
.layout(), y
.layout());
119 let layout
= x
.layout();
121 let (lane_count
, lane_ty
) = layout
.ty
.simd_size_and_type(fx
.tcx
);
122 let lane_layout
= fx
.layout_of(lane_ty
);
123 let (ret_lane_count
, ret_lane_ty
) = ret
.layout().ty
.simd_size_and_type(fx
.tcx
);
124 let ret_lane_layout
= fx
.layout_of(ret_lane_ty
);
125 assert_eq
!(lane_count
, ret_lane_count
);
127 for lane_idx
in 0..lane_count
{
128 let x_lane
= x
.value_lane(fx
, lane_idx
).load_scalar(fx
);
129 let y_lane
= y
.value_lane(fx
, lane_idx
).load_scalar(fx
);
131 let res_lane
= f(fx
, lane_layout
.ty
, ret_lane_layout
.ty
, x_lane
, y_lane
);
132 let res_lane
= CValue
::by_val(res_lane
, ret_lane_layout
);
134 ret
.place_lane(fx
, lane_idx
).write_cvalue(fx
, res_lane
);
138 fn simd_reduce
<'tcx
>(
139 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
143 f
: &dyn Fn(&mut FunctionCx
<'_
, '_
, 'tcx
>, Ty
<'tcx
>, Value
, Value
) -> Value
,
145 let (lane_count
, lane_ty
) = val
.layout().ty
.simd_size_and_type(fx
.tcx
);
146 let lane_layout
= fx
.layout_of(lane_ty
);
147 assert_eq
!(lane_layout
, ret
.layout());
149 let (mut res_val
, start_lane
) =
150 if let Some(acc
) = acc { (acc, 0) }
else { (val.value_lane(fx, 0).load_scalar(fx), 1) }
;
151 for lane_idx
in start_lane
..lane_count
{
152 let lane
= val
.value_lane(fx
, lane_idx
).load_scalar(fx
);
153 res_val
= f(fx
, lane_layout
.ty
, res_val
, lane
);
155 let res
= CValue
::by_val(res_val
, lane_layout
);
156 ret
.write_cvalue(fx
, res
);
159 // FIXME move all uses to `simd_reduce`
160 fn simd_reduce_bool
<'tcx
>(
161 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
164 f
: &dyn Fn(&mut FunctionCx
<'_
, '_
, 'tcx
>, Value
, Value
) -> Value
,
166 let (lane_count
, _lane_ty
) = val
.layout().ty
.simd_size_and_type(fx
.tcx
);
167 assert
!(ret
.layout().ty
.is_bool());
169 let res_val
= val
.value_lane(fx
, 0).load_scalar(fx
);
170 let mut res_val
= fx
.bcx
.ins().band_imm(res_val
, 1); // mask to boolean
171 for lane_idx
in 1..lane_count
{
172 let lane
= val
.value_lane(fx
, lane_idx
).load_scalar(fx
);
173 let lane
= fx
.bcx
.ins().band_imm(lane
, 1); // mask to boolean
174 res_val
= f(fx
, res_val
, lane
);
176 let res_val
= if fx
.bcx
.func
.dfg
.value_type(res_val
) != types
::I8
{
177 fx
.bcx
.ins().ireduce(types
::I8
, res_val
)
181 let res
= CValue
::by_val(res_val
, ret
.layout());
182 ret
.write_cvalue(fx
, res
);
185 fn bool_to_zero_or_max_uint
<'tcx
>(
186 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
190 let ty
= fx
.clif_type(ty
).unwrap();
192 let int_ty
= match ty
{
193 types
::F32
=> types
::I32
,
194 types
::F64
=> types
::I64
,
198 let mut res
= fx
.bcx
.ins().bmask(int_ty
, val
);
201 res
= codegen_bitcast(fx
, ty
, res
);
207 pub(crate) fn codegen_intrinsic_call
<'tcx
>(
208 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
209 instance
: Instance
<'tcx
>,
210 args
: &[mir
::Operand
<'tcx
>],
211 destination
: CPlace
<'tcx
>,
212 target
: Option
<BasicBlock
>,
213 source_info
: mir
::SourceInfo
,
215 let intrinsic
= fx
.tcx
.item_name(instance
.def_id());
216 let instance_args
= instance
.args
;
218 if intrinsic
.as_str().starts_with("simd_") {
219 self::simd
::codegen_simd_intrinsic_call(
225 target
.expect("target for simd intrinsic"),
228 } else if codegen_float_intrinsic_call(fx
, intrinsic
, args
, destination
) {
229 let ret_block
= fx
.get_block(target
.expect("target for float intrinsic"));
230 fx
.bcx
.ins().jump(ret_block
, &[]);
232 codegen_regular_intrinsic_call(
245 fn codegen_float_intrinsic_call
<'tcx
>(
246 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
248 args
: &[mir
::Operand
<'tcx
>],
251 let (name
, arg_count
, ty
, clif_ty
) = match intrinsic
{
252 sym
::expf32
=> ("expf", 1, fx
.tcx
.types
.f32, types
::F32
),
253 sym
::expf64
=> ("exp", 1, fx
.tcx
.types
.f64, types
::F64
),
254 sym
::exp2f32
=> ("exp2f", 1, fx
.tcx
.types
.f32, types
::F32
),
255 sym
::exp2f64
=> ("exp2", 1, fx
.tcx
.types
.f64, types
::F64
),
256 sym
::sqrtf32
=> ("sqrtf", 1, fx
.tcx
.types
.f32, types
::F32
),
257 sym
::sqrtf64
=> ("sqrt", 1, fx
.tcx
.types
.f64, types
::F64
),
258 sym
::powif32
=> ("__powisf2", 2, fx
.tcx
.types
.f32, types
::F32
), // compiler-builtins
259 sym
::powif64
=> ("__powidf2", 2, fx
.tcx
.types
.f64, types
::F64
), // compiler-builtins
260 sym
::powf32
=> ("powf", 2, fx
.tcx
.types
.f32, types
::F32
),
261 sym
::powf64
=> ("pow", 2, fx
.tcx
.types
.f64, types
::F64
),
262 sym
::logf32
=> ("logf", 1, fx
.tcx
.types
.f32, types
::F32
),
263 sym
::logf64
=> ("log", 1, fx
.tcx
.types
.f64, types
::F64
),
264 sym
::log2f32
=> ("log2f", 1, fx
.tcx
.types
.f32, types
::F32
),
265 sym
::log2f64
=> ("log2", 1, fx
.tcx
.types
.f64, types
::F64
),
266 sym
::log10f32
=> ("log10f", 1, fx
.tcx
.types
.f32, types
::F32
),
267 sym
::log10f64
=> ("log10", 1, fx
.tcx
.types
.f64, types
::F64
),
268 sym
::fabsf32
=> ("fabsf", 1, fx
.tcx
.types
.f32, types
::F32
),
269 sym
::fabsf64
=> ("fabs", 1, fx
.tcx
.types
.f64, types
::F64
),
270 sym
::fmaf32
=> ("fmaf", 3, fx
.tcx
.types
.f32, types
::F32
),
271 sym
::fmaf64
=> ("fma", 3, fx
.tcx
.types
.f64, types
::F64
),
272 sym
::copysignf32
=> ("copysignf", 2, fx
.tcx
.types
.f32, types
::F32
),
273 sym
::copysignf64
=> ("copysign", 2, fx
.tcx
.types
.f64, types
::F64
),
274 sym
::floorf32
=> ("floorf", 1, fx
.tcx
.types
.f32, types
::F32
),
275 sym
::floorf64
=> ("floor", 1, fx
.tcx
.types
.f64, types
::F64
),
276 sym
::ceilf32
=> ("ceilf", 1, fx
.tcx
.types
.f32, types
::F32
),
277 sym
::ceilf64
=> ("ceil", 1, fx
.tcx
.types
.f64, types
::F64
),
278 sym
::truncf32
=> ("truncf", 1, fx
.tcx
.types
.f32, types
::F32
),
279 sym
::truncf64
=> ("trunc", 1, fx
.tcx
.types
.f64, types
::F64
),
280 sym
::rintf32
=> ("rintf", 1, fx
.tcx
.types
.f32, types
::F32
),
281 sym
::rintf64
=> ("rint", 1, fx
.tcx
.types
.f64, types
::F64
),
282 sym
::roundf32
=> ("roundf", 1, fx
.tcx
.types
.f32, types
::F32
),
283 sym
::roundf64
=> ("round", 1, fx
.tcx
.types
.f64, types
::F64
),
284 sym
::roundevenf32
=> ("roundevenf", 1, fx
.tcx
.types
.f32, types
::F32
),
285 sym
::roundevenf64
=> ("roundeven", 1, fx
.tcx
.types
.f64, types
::F64
),
286 sym
::sinf32
=> ("sinf", 1, fx
.tcx
.types
.f32, types
::F32
),
287 sym
::sinf64
=> ("sin", 1, fx
.tcx
.types
.f64, types
::F64
),
288 sym
::cosf32
=> ("cosf", 1, fx
.tcx
.types
.f32, types
::F32
),
289 sym
::cosf64
=> ("cos", 1, fx
.tcx
.types
.f64, types
::F64
),
293 if args
.len() != arg_count
{
294 bug
!("wrong number of args for intrinsic {:?}", intrinsic
);
298 let args
= match args
{
300 a
= [codegen_operand(fx
, x
).load_scalar(fx
)];
304 b
= [codegen_operand(fx
, x
).load_scalar(fx
), codegen_operand(fx
, y
).load_scalar(fx
)];
309 codegen_operand(fx
, x
).load_scalar(fx
),
310 codegen_operand(fx
, y
).load_scalar(fx
),
311 codegen_operand(fx
, z
).load_scalar(fx
),
318 let layout
= fx
.layout_of(ty
);
319 let res
= match intrinsic
{
320 sym
::fmaf32
| sym
::fmaf64
=> {
321 CValue
::by_val(fx
.bcx
.ins().fma(args
[0], args
[1], args
[2]), layout
)
323 sym
::copysignf32
| sym
::copysignf64
=> {
324 CValue
::by_val(fx
.bcx
.ins().fcopysign(args
[0], args
[1]), layout
)
334 let val
= match intrinsic
{
335 sym
::fabsf32
| sym
::fabsf64
=> fx
.bcx
.ins().fabs(args
[0]),
336 sym
::floorf32
| sym
::floorf64
=> fx
.bcx
.ins().floor(args
[0]),
337 sym
::ceilf32
| sym
::ceilf64
=> fx
.bcx
.ins().ceil(args
[0]),
338 sym
::truncf32
| sym
::truncf64
=> fx
.bcx
.ins().trunc(args
[0]),
342 CValue
::by_val(val
, layout
)
345 // These intrinsics aren't supported natively by Cranelift.
346 // Lower them to a libcall.
347 sym
::powif32
| sym
::powif64
=> {
348 let input_tys
: Vec
<_
> = vec
![AbiParam
::new(clif_ty
), AbiParam
::new(types
::I32
)];
349 let ret_val
= fx
.lib_call(name
, input_tys
, vec
![AbiParam
::new(clif_ty
)], &args
)[0];
350 CValue
::by_val(ret_val
, fx
.layout_of(ty
))
353 let input_tys
: Vec
<_
> = args
.iter().map(|_
| AbiParam
::new(clif_ty
)).collect();
354 let ret_val
= fx
.lib_call(name
, input_tys
, vec
![AbiParam
::new(clif_ty
)], &args
)[0];
355 CValue
::by_val(ret_val
, fx
.layout_of(ty
))
359 ret
.write_cvalue(fx
, res
);
364 fn codegen_regular_intrinsic_call
<'tcx
>(
365 fx
: &mut FunctionCx
<'_
, '_
, 'tcx
>,
366 instance
: Instance
<'tcx
>,
368 generic_args
: GenericArgsRef
<'tcx
>,
369 args
: &[mir
::Operand
<'tcx
>],
371 destination
: Option
<BasicBlock
>,
372 source_info
: mir
::SourceInfo
,
374 let usize_layout
= fx
.layout_of(fx
.tcx
.types
.usize);
378 fx
.bcx
.ins().trap(TrapCode
::User(0));
381 sym
::likely
| sym
::unlikely
=> {
382 intrinsic_args
!(fx
, args
=> (a
); intrinsic
);
384 ret
.write_cvalue(fx
, a
);
387 intrinsic_args
!(fx
, args
=> (); intrinsic
);
389 fx
.bcx
.ins().debugtrap();
392 intrinsic_args
!(fx
, args
=> (src
, dst
, count
); intrinsic
);
393 let src
= src
.load_scalar(fx
);
394 let dst
= dst
.load_scalar(fx
);
395 let count
= count
.load_scalar(fx
);
397 let elem_ty
= generic_args
.type_at(0);
398 let elem_size
: u64 = fx
.layout_of(elem_ty
).size
.bytes();
399 assert_eq
!(args
.len(), 3);
401 if elem_size
!= 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) }
else { count }
;
403 // FIXME emit_small_memmove
404 fx
.bcx
.call_memmove(fx
.target_config
, dst
, src
, byte_amount
);
406 sym
::volatile_copy_memory
| sym
::volatile_copy_nonoverlapping_memory
=> {
407 // NOTE: the volatile variants have src and dst swapped
408 intrinsic_args
!(fx
, args
=> (dst
, src
, count
); intrinsic
);
409 let dst
= dst
.load_scalar(fx
);
410 let src
= src
.load_scalar(fx
);
411 let count
= count
.load_scalar(fx
);
413 let elem_ty
= generic_args
.type_at(0);
414 let elem_size
: u64 = fx
.layout_of(elem_ty
).size
.bytes();
415 assert_eq
!(args
.len(), 3);
417 if elem_size
!= 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) }
else { count }
;
419 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
420 if intrinsic
== sym
::volatile_copy_nonoverlapping_memory
{
421 // FIXME emit_small_memcpy
422 fx
.bcx
.call_memcpy(fx
.target_config
, dst
, src
, byte_amount
);
424 // FIXME emit_small_memmove
425 fx
.bcx
.call_memmove(fx
.target_config
, dst
, src
, byte_amount
);
428 sym
::size_of_val
=> {
429 intrinsic_args
!(fx
, args
=> (ptr
); intrinsic
);
431 let layout
= fx
.layout_of(generic_args
.type_at(0));
432 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
434 let size
= if let Abi
::ScalarPair(_
, _
) = ptr
.layout().abi
{
435 let (_ptr
, info
) = ptr
.load_scalar_pair(fx
);
436 let (size
, _align
) = crate::unsize
::size_and_align_of_dst(fx
, layout
, info
);
439 fx
.bcx
.ins().iconst(fx
.pointer_type
, layout
.size
.bytes() as i64)
441 ret
.write_cvalue(fx
, CValue
::by_val(size
, usize_layout
));
443 sym
::min_align_of_val
=> {
444 intrinsic_args
!(fx
, args
=> (ptr
); intrinsic
);
446 let layout
= fx
.layout_of(generic_args
.type_at(0));
447 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
449 let align
= if let Abi
::ScalarPair(_
, _
) = ptr
.layout().abi
{
450 let (_ptr
, info
) = ptr
.load_scalar_pair(fx
);
451 let (_size
, align
) = crate::unsize
::size_and_align_of_dst(fx
, layout
, info
);
454 fx
.bcx
.ins().iconst(fx
.pointer_type
, layout
.align
.abi
.bytes() as i64)
456 ret
.write_cvalue(fx
, CValue
::by_val(align
, usize_layout
));
459 sym
::vtable_size
=> {
460 intrinsic_args
!(fx
, args
=> (vtable
); intrinsic
);
461 let vtable
= vtable
.load_scalar(fx
);
463 let size
= crate::vtable
::size_of_obj(fx
, vtable
);
464 ret
.write_cvalue(fx
, CValue
::by_val(size
, usize_layout
));
467 sym
::vtable_align
=> {
468 intrinsic_args
!(fx
, args
=> (vtable
); intrinsic
);
469 let vtable
= vtable
.load_scalar(fx
);
471 let align
= crate::vtable
::min_align_of_obj(fx
, vtable
);
472 ret
.write_cvalue(fx
, CValue
::by_val(align
, usize_layout
));
476 intrinsic_args
!(fx
, args
=> (x
, y
); intrinsic
);
478 // FIXME trap on inexact
479 let res
= crate::num
::codegen_int_binop(fx
, BinOp
::Div
, x
, y
);
480 ret
.write_cvalue(fx
, res
);
482 sym
::saturating_add
| sym
::saturating_sub
=> {
483 intrinsic_args
!(fx
, args
=> (lhs
, rhs
); intrinsic
);
485 assert_eq
!(lhs
.layout().ty
, rhs
.layout().ty
);
486 let bin_op
= match intrinsic
{
487 sym
::saturating_add
=> BinOp
::Add
,
488 sym
::saturating_sub
=> BinOp
::Sub
,
492 let res
= crate::num
::codegen_saturating_int_binop(fx
, bin_op
, lhs
, rhs
);
493 ret
.write_cvalue(fx
, res
);
495 sym
::rotate_left
=> {
496 intrinsic_args
!(fx
, args
=> (x
, y
); intrinsic
);
497 let y
= y
.load_scalar(fx
);
499 let layout
= x
.layout();
500 let x
= x
.load_scalar(fx
);
501 let res
= fx
.bcx
.ins().rotl(x
, y
);
502 ret
.write_cvalue(fx
, CValue
::by_val(res
, layout
));
504 sym
::rotate_right
=> {
505 intrinsic_args
!(fx
, args
=> (x
, y
); intrinsic
);
506 let y
= y
.load_scalar(fx
);
508 let layout
= x
.layout();
509 let x
= x
.load_scalar(fx
);
510 let res
= fx
.bcx
.ins().rotr(x
, y
);
511 ret
.write_cvalue(fx
, CValue
::by_val(res
, layout
));
514 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
515 // doesn't have UB both are codegen'ed the same way
516 sym
::arith_offset
=> {
517 intrinsic_args
!(fx
, args
=> (base
, offset
); intrinsic
);
518 let offset
= offset
.load_scalar(fx
);
520 let pointee_ty
= base
.layout().ty
.builtin_deref(true).unwrap().ty
;
521 let pointee_size
= fx
.layout_of(pointee_ty
).size
.bytes();
522 let ptr_diff
= if pointee_size
!= 1 {
523 fx
.bcx
.ins().imul_imm(offset
, pointee_size
as i64)
527 let base_val
= base
.load_scalar(fx
);
528 let res
= fx
.bcx
.ins().iadd(base_val
, ptr_diff
);
529 ret
.write_cvalue(fx
, CValue
::by_val(res
, base
.layout()));
533 intrinsic_args
!(fx
, args
=> (ptr
, mask
); intrinsic
);
534 let ptr
= ptr
.load_scalar(fx
);
535 let mask
= mask
.load_scalar(fx
);
536 fx
.bcx
.ins().band(ptr
, mask
);
539 sym
::write_bytes
| sym
::volatile_set_memory
=> {
540 intrinsic_args
!(fx
, args
=> (dst
, val
, count
); intrinsic
);
541 let val
= val
.load_scalar(fx
);
542 let count
= count
.load_scalar(fx
);
544 let pointee_ty
= dst
.layout().ty
.builtin_deref(true).unwrap().ty
;
545 let pointee_size
= fx
.layout_of(pointee_ty
).size
.bytes();
546 let count
= if pointee_size
!= 1 {
547 fx
.bcx
.ins().imul_imm(count
, pointee_size
as i64)
551 let dst_ptr
= dst
.load_scalar(fx
);
552 // FIXME make the memset actually volatile when switching to emit_small_memset
553 // FIXME use emit_small_memset
554 fx
.bcx
.call_memset(fx
.target_config
, dst_ptr
, val
, count
);
556 sym
::ctlz
| sym
::ctlz_nonzero
=> {
557 intrinsic_args
!(fx
, args
=> (arg
); intrinsic
);
558 let val
= arg
.load_scalar(fx
);
560 // FIXME trap on `ctlz_nonzero` with zero arg.
561 let res
= fx
.bcx
.ins().clz(val
);
562 let res
= CValue
::by_val(res
, arg
.layout());
563 ret
.write_cvalue(fx
, res
);
565 sym
::cttz
| sym
::cttz_nonzero
=> {
566 intrinsic_args
!(fx
, args
=> (arg
); intrinsic
);
567 let val
= arg
.load_scalar(fx
);
569 // FIXME trap on `cttz_nonzero` with zero arg.
570 let res
= fx
.bcx
.ins().ctz(val
);
571 let res
= CValue
::by_val(res
, arg
.layout());
572 ret
.write_cvalue(fx
, res
);
575 intrinsic_args
!(fx
, args
=> (arg
); intrinsic
);
576 let val
= arg
.load_scalar(fx
);
578 let res
= fx
.bcx
.ins().popcnt(val
);
579 let res
= CValue
::by_val(res
, arg
.layout());
580 ret
.write_cvalue(fx
, res
);
583 intrinsic_args
!(fx
, args
=> (arg
); intrinsic
);
584 let val
= arg
.load_scalar(fx
);
586 let res
= fx
.bcx
.ins().bitrev(val
);
587 let res
= CValue
::by_val(res
, arg
.layout());
588 ret
.write_cvalue(fx
, res
);
591 intrinsic_args
!(fx
, args
=> (arg
); intrinsic
);
592 let val
= arg
.load_scalar(fx
);
594 let res
= if fx
.bcx
.func
.dfg
.value_type(val
) == types
::I8
{
597 fx
.bcx
.ins().bswap(val
)
599 let res
= CValue
::by_val(res
, arg
.layout());
600 ret
.write_cvalue(fx
, res
);
602 sym
::assert_inhabited
| sym
::assert_zero_valid
| sym
::assert_mem_uninitialized_valid
=> {
603 intrinsic_args
!(fx
, args
=> (); intrinsic
);
605 let ty
= generic_args
.type_at(0);
607 let requirement
= ValidityRequirement
::from_intrinsic(intrinsic
);
609 if let Some(requirement
) = requirement
{
612 .check_validity_requirement((requirement
, fx
.param_env().and(ty
)))
613 .expect("expect to have layout during codegen");
616 let layout
= fx
.layout_of(ty
);
617 let msg_str
= with_no_visible_paths
!({
618 with_no_trimmed_paths
!({
619 if layout
.abi
.is_uninhabited() {
620 // Use this error even for the other intrinsics as it is more precise.
621 format
!("attempted to instantiate uninhabited type `{}`", ty
)
622 } else if intrinsic
== sym
::assert_zero_valid
{
624 "attempted to zero-initialize type `{}`, which is invalid",
629 "attempted to leave type `{}` uninitialized, which is invalid",
635 crate::base
::codegen_panic_nounwind(fx
, &msg_str
, source_info
);
641 sym
::volatile_load
| sym
::unaligned_volatile_load
=> {
642 intrinsic_args
!(fx
, args
=> (ptr
); intrinsic
);
644 // Cranelift treats loads as volatile by default
645 // FIXME correctly handle unaligned_volatile_load
646 let inner_layout
= fx
.layout_of(ptr
.layout().ty
.builtin_deref(true).unwrap().ty
);
647 let val
= CValue
::by_ref(Pointer
::new(ptr
.load_scalar(fx
)), inner_layout
);
648 ret
.write_cvalue(fx
, val
);
650 sym
::volatile_store
| sym
::unaligned_volatile_store
| sym
::nontemporal_store
=> {
651 intrinsic_args
!(fx
, args
=> (ptr
, val
); intrinsic
);
652 let ptr
= ptr
.load_scalar(fx
);
654 // Cranelift treats stores as volatile by default
655 // FIXME correctly handle unaligned_volatile_store
656 // FIXME actually do nontemporal stores if requested
657 let dest
= CPlace
::for_ptr(Pointer
::new(ptr
), val
.layout());
658 dest
.write_cvalue(fx
, val
);
665 | sym
::variant_count
=> {
666 intrinsic_args
!(fx
, args
=> (); intrinsic
);
669 fx
.tcx
.const_eval_instance(ParamEnv
::reveal_all(), instance
, None
).unwrap();
670 let val
= crate::constant
::codegen_const_value(fx
, const_val
, ret
.layout().ty
);
671 ret
.write_cvalue(fx
, val
);
674 sym
::ptr_offset_from
| sym
::ptr_offset_from_unsigned
=> {
675 intrinsic_args
!(fx
, args
=> (ptr
, base
); intrinsic
);
676 let ptr
= ptr
.load_scalar(fx
);
677 let base
= base
.load_scalar(fx
);
678 let ty
= generic_args
.type_at(0);
680 let pointee_size
: u64 = fx
.layout_of(ty
).size
.bytes();
681 let diff_bytes
= fx
.bcx
.ins().isub(ptr
, base
);
682 // FIXME this can be an exact division.
683 let val
= if intrinsic
== sym
::ptr_offset_from_unsigned
{
684 let usize_layout
= fx
.layout_of(fx
.tcx
.types
.usize);
685 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
686 // but unsigned is slightly easier to codegen, so might as well.
687 CValue
::by_val(fx
.bcx
.ins().udiv_imm(diff_bytes
, pointee_size
as i64), usize_layout
)
689 let isize_layout
= fx
.layout_of(fx
.tcx
.types
.isize);
690 CValue
::by_val(fx
.bcx
.ins().sdiv_imm(diff_bytes
, pointee_size
as i64), isize_layout
)
692 ret
.write_cvalue(fx
, val
);
695 sym
::ptr_guaranteed_cmp
=> {
696 intrinsic_args
!(fx
, args
=> (a
, b
); intrinsic
);
698 let val
= crate::num
::codegen_ptr_binop(fx
, BinOp
::Eq
, a
, b
).load_scalar(fx
);
699 ret
.write_cvalue(fx
, CValue
::by_val(val
, fx
.layout_of(fx
.tcx
.types
.u8)));
702 sym
::caller_location
=> {
703 intrinsic_args
!(fx
, args
=> (); intrinsic
);
705 let caller_location
= fx
.get_caller_location(source_info
);
706 ret
.write_cvalue(fx
, caller_location
);
709 _
if intrinsic
.as_str().starts_with("atomic_fence") => {
710 intrinsic_args
!(fx
, args
=> (); intrinsic
);
712 fx
.bcx
.ins().fence();
714 _
if intrinsic
.as_str().starts_with("atomic_singlethreadfence") => {
715 intrinsic_args
!(fx
, args
=> (); intrinsic
);
717 // FIXME use a compiler fence once Cranelift supports it
718 fx
.bcx
.ins().fence();
720 _
if intrinsic
.as_str().starts_with("atomic_load") => {
721 intrinsic_args
!(fx
, args
=> (ptr
); intrinsic
);
722 let ptr
= ptr
.load_scalar(fx
);
724 let ty
= generic_args
.type_at(0);
726 ty
::Uint(UintTy
::U128
) | ty
::Int(IntTy
::I128
) => {
727 // FIXME implement 128bit atomics
728 if fx
.tcx
.is_compiler_builtins(LOCAL_CRATE
) {
729 // special case for compiler-builtins to avoid having to patch it
730 crate::trap
::trap_unimplemented(fx
, "128bit atomics not yet supported");
735 .span_fatal(source_info
.span
, "128bit atomics not yet supported");
738 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
740 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, ty
);
744 let clif_ty
= fx
.clif_type(ty
).unwrap();
746 let val
= fx
.bcx
.ins().atomic_load(clif_ty
, MemFlags
::trusted(), ptr
);
748 let val
= CValue
::by_val(val
, fx
.layout_of(ty
));
749 ret
.write_cvalue(fx
, val
);
751 _
if intrinsic
.as_str().starts_with("atomic_store") => {
752 intrinsic_args
!(fx
, args
=> (ptr
, val
); intrinsic
);
753 let ptr
= ptr
.load_scalar(fx
);
755 let ty
= generic_args
.type_at(0);
757 ty
::Uint(UintTy
::U128
) | ty
::Int(IntTy
::I128
) => {
758 // FIXME implement 128bit atomics
759 if fx
.tcx
.is_compiler_builtins(LOCAL_CRATE
) {
760 // special case for compiler-builtins to avoid having to patch it
761 crate::trap
::trap_unimplemented(fx
, "128bit atomics not yet supported");
766 .span_fatal(source_info
.span
, "128bit atomics not yet supported");
769 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
771 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, ty
);
776 let val
= val
.load_scalar(fx
);
778 fx
.bcx
.ins().atomic_store(MemFlags
::trusted(), val
, ptr
);
780 _
if intrinsic
.as_str().starts_with("atomic_xchg") => {
781 intrinsic_args
!(fx
, args
=> (ptr
, new
); intrinsic
);
782 let ptr
= ptr
.load_scalar(fx
);
784 let layout
= new
.layout();
785 match layout
.ty
.kind() {
786 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
788 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
792 let ty
= fx
.clif_type(layout
.ty
).unwrap();
794 let new
= new
.load_scalar(fx
);
796 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Xchg
, ptr
, new
);
798 let old
= CValue
::by_val(old
, layout
);
799 ret
.write_cvalue(fx
, old
);
801 _
if intrinsic
.as_str().starts_with("atomic_cxchg") => {
802 // both atomic_cxchg_* and atomic_cxchgweak_*
803 intrinsic_args
!(fx
, args
=> (ptr
, test_old
, new
); intrinsic
);
804 let ptr
= ptr
.load_scalar(fx
);
806 let layout
= new
.layout();
807 match layout
.ty
.kind() {
808 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
810 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
815 let test_old
= test_old
.load_scalar(fx
);
816 let new
= new
.load_scalar(fx
);
818 let old
= fx
.bcx
.ins().atomic_cas(MemFlags
::trusted(), ptr
, test_old
, new
);
819 let is_eq
= fx
.bcx
.ins().icmp(IntCC
::Equal
, old
, test_old
);
821 let ret_val
= CValue
::by_val_pair(old
, is_eq
, ret
.layout());
822 ret
.write_cvalue(fx
, ret_val
)
825 _
if intrinsic
.as_str().starts_with("atomic_xadd") => {
826 intrinsic_args
!(fx
, args
=> (ptr
, amount
); intrinsic
);
827 let ptr
= ptr
.load_scalar(fx
);
829 let layout
= amount
.layout();
830 match layout
.ty
.kind() {
831 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
833 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
837 let ty
= fx
.clif_type(layout
.ty
).unwrap();
839 let amount
= amount
.load_scalar(fx
);
842 fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Add
, ptr
, amount
);
844 let old
= CValue
::by_val(old
, layout
);
845 ret
.write_cvalue(fx
, old
);
847 _
if intrinsic
.as_str().starts_with("atomic_xsub") => {
848 intrinsic_args
!(fx
, args
=> (ptr
, amount
); intrinsic
);
849 let ptr
= ptr
.load_scalar(fx
);
851 let layout
= amount
.layout();
852 match layout
.ty
.kind() {
853 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
855 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
859 let ty
= fx
.clif_type(layout
.ty
).unwrap();
861 let amount
= amount
.load_scalar(fx
);
864 fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Sub
, ptr
, amount
);
866 let old
= CValue
::by_val(old
, layout
);
867 ret
.write_cvalue(fx
, old
);
869 _
if intrinsic
.as_str().starts_with("atomic_and") => {
870 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
871 let ptr
= ptr
.load_scalar(fx
);
873 let layout
= src
.layout();
874 match layout
.ty
.kind() {
875 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
877 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
881 let ty
= fx
.clif_type(layout
.ty
).unwrap();
883 let src
= src
.load_scalar(fx
);
885 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::And
, ptr
, src
);
887 let old
= CValue
::by_val(old
, layout
);
888 ret
.write_cvalue(fx
, old
);
890 _
if intrinsic
.as_str().starts_with("atomic_or") => {
891 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
892 let ptr
= ptr
.load_scalar(fx
);
894 let layout
= src
.layout();
895 match layout
.ty
.kind() {
896 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
898 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
902 let ty
= fx
.clif_type(layout
.ty
).unwrap();
904 let src
= src
.load_scalar(fx
);
906 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Or
, ptr
, src
);
908 let old
= CValue
::by_val(old
, layout
);
909 ret
.write_cvalue(fx
, old
);
911 _
if intrinsic
.as_str().starts_with("atomic_xor") => {
912 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
913 let ptr
= ptr
.load_scalar(fx
);
915 let layout
= src
.layout();
916 match layout
.ty
.kind() {
917 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
919 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
923 let ty
= fx
.clif_type(layout
.ty
).unwrap();
925 let src
= src
.load_scalar(fx
);
927 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Xor
, ptr
, src
);
929 let old
= CValue
::by_val(old
, layout
);
930 ret
.write_cvalue(fx
, old
);
932 _
if intrinsic
.as_str().starts_with("atomic_nand") => {
933 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
934 let ptr
= ptr
.load_scalar(fx
);
936 let layout
= src
.layout();
937 match layout
.ty
.kind() {
938 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
940 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
944 let ty
= fx
.clif_type(layout
.ty
).unwrap();
946 let src
= src
.load_scalar(fx
);
948 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Nand
, ptr
, src
);
950 let old
= CValue
::by_val(old
, layout
);
951 ret
.write_cvalue(fx
, old
);
953 _
if intrinsic
.as_str().starts_with("atomic_max") => {
954 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
955 let ptr
= ptr
.load_scalar(fx
);
957 let layout
= src
.layout();
958 match layout
.ty
.kind() {
959 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
961 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
965 let ty
= fx
.clif_type(layout
.ty
).unwrap();
967 let src
= src
.load_scalar(fx
);
969 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Smax
, ptr
, src
);
971 let old
= CValue
::by_val(old
, layout
);
972 ret
.write_cvalue(fx
, old
);
974 _
if intrinsic
.as_str().starts_with("atomic_umax") => {
975 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
976 let ptr
= ptr
.load_scalar(fx
);
978 let layout
= src
.layout();
979 match layout
.ty
.kind() {
980 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
982 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
986 let ty
= fx
.clif_type(layout
.ty
).unwrap();
988 let src
= src
.load_scalar(fx
);
990 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Umax
, ptr
, src
);
992 let old
= CValue
::by_val(old
, layout
);
993 ret
.write_cvalue(fx
, old
);
995 _
if intrinsic
.as_str().starts_with("atomic_min") => {
996 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
997 let ptr
= ptr
.load_scalar(fx
);
999 let layout
= src
.layout();
1000 match layout
.ty
.kind() {
1001 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
1003 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
1007 let ty
= fx
.clif_type(layout
.ty
).unwrap();
1009 let src
= src
.load_scalar(fx
);
1011 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Smin
, ptr
, src
);
1013 let old
= CValue
::by_val(old
, layout
);
1014 ret
.write_cvalue(fx
, old
);
1016 _
if intrinsic
.as_str().starts_with("atomic_umin") => {
1017 intrinsic_args
!(fx
, args
=> (ptr
, src
); intrinsic
);
1018 let ptr
= ptr
.load_scalar(fx
);
1020 let layout
= src
.layout();
1021 match layout
.ty
.kind() {
1022 ty
::Uint(_
) | ty
::Int(_
) | ty
::RawPtr(..) => {}
1024 report_atomic_type_validation_error(fx
, intrinsic
, source_info
.span
, layout
.ty
);
1028 let ty
= fx
.clif_type(layout
.ty
).unwrap();
1030 let src
= src
.load_scalar(fx
);
1032 let old
= fx
.bcx
.ins().atomic_rmw(ty
, MemFlags
::trusted(), AtomicRmwOp
::Umin
, ptr
, src
);
1034 let old
= CValue
::by_val(old
, layout
);
1035 ret
.write_cvalue(fx
, old
);
1039 intrinsic_args
!(fx
, args
=> (a
, b
); intrinsic
);
1040 let a
= a
.load_scalar(fx
);
1041 let b
= b
.load_scalar(fx
);
1043 let val
= crate::num
::codegen_float_min(fx
, a
, b
);
1044 let val
= CValue
::by_val(val
, fx
.layout_of(fx
.tcx
.types
.f32));
1045 ret
.write_cvalue(fx
, val
);
1048 intrinsic_args
!(fx
, args
=> (a
, b
); intrinsic
);
1049 let a
= a
.load_scalar(fx
);
1050 let b
= b
.load_scalar(fx
);
1052 let val
= crate::num
::codegen_float_min(fx
, a
, b
);
1053 let val
= CValue
::by_val(val
, fx
.layout_of(fx
.tcx
.types
.f64));
1054 ret
.write_cvalue(fx
, val
);
1057 intrinsic_args
!(fx
, args
=> (a
, b
); intrinsic
);
1058 let a
= a
.load_scalar(fx
);
1059 let b
= b
.load_scalar(fx
);
1061 let val
= crate::num
::codegen_float_max(fx
, a
, b
);
1062 let val
= CValue
::by_val(val
, fx
.layout_of(fx
.tcx
.types
.f32));
1063 ret
.write_cvalue(fx
, val
);
1066 intrinsic_args
!(fx
, args
=> (a
, b
); intrinsic
);
1067 let a
= a
.load_scalar(fx
);
1068 let b
= b
.load_scalar(fx
);
1070 let val
= crate::num
::codegen_float_max(fx
, a
, b
);
1071 let val
= CValue
::by_val(val
, fx
.layout_of(fx
.tcx
.types
.f64));
1072 ret
.write_cvalue(fx
, val
);
1076 intrinsic_args
!(fx
, args
=> (f
, data
, catch_fn
); intrinsic
);
1077 let f
= f
.load_scalar(fx
);
1078 let data
= data
.load_scalar(fx
);
1079 let _catch_fn
= catch_fn
.load_scalar(fx
);
1081 // FIXME once unwinding is supported, change this to actually catch panics
1082 let f_sig
= fx
.bcx
.func
.import_signature(Signature
{
1083 call_conv
: fx
.target_config
.default_call_conv
,
1084 params
: vec
![AbiParam
::new(pointer_ty(fx
.tcx
))],
1088 fx
.bcx
.ins().call_indirect(f_sig
, f
, &[data
]);
1090 let layout
= fx
.layout_of(fx
.tcx
.types
.i32);
1091 let ret_val
= CValue
::by_val(fx
.bcx
.ins().iconst(types
::I32
, 0), layout
);
1092 ret
.write_cvalue(fx
, ret_val
);
1095 sym
::fadd_fast
| sym
::fsub_fast
| sym
::fmul_fast
| sym
::fdiv_fast
| sym
::frem_fast
=> {
1096 intrinsic_args
!(fx
, args
=> (x
, y
); intrinsic
);
1098 let res
= crate::num
::codegen_float_binop(
1101 sym
::fadd_fast
=> BinOp
::Add
,
1102 sym
::fsub_fast
=> BinOp
::Sub
,
1103 sym
::fmul_fast
=> BinOp
::Mul
,
1104 sym
::fdiv_fast
=> BinOp
::Div
,
1105 sym
::frem_fast
=> BinOp
::Rem
,
1106 _
=> unreachable
!(),
1111 ret
.write_cvalue(fx
, res
);
1113 sym
::float_to_int_unchecked
=> {
1114 intrinsic_args
!(fx
, args
=> (f
); intrinsic
);
1115 let f
= f
.load_scalar(fx
);
1117 let res
= crate::cast
::clif_int_or_float_cast(
1121 fx
.clif_type(ret
.layout().ty
).unwrap(),
1122 type_sign(ret
.layout().ty
),
1124 ret
.write_cvalue(fx
, CValue
::by_val(res
, ret
.layout()));
1128 intrinsic_args
!(fx
, args
=> (lhs_ref
, rhs_ref
); intrinsic
);
1129 let lhs_ref
= lhs_ref
.load_scalar(fx
);
1130 let rhs_ref
= rhs_ref
.load_scalar(fx
);
1132 let size
= fx
.layout_of(generic_args
.type_at(0)).layout
.size();
1133 // FIXME add and use emit_small_memcmp
1134 let is_eq_value
= if size
== Size
::ZERO
{
1135 // No bytes means they're trivially equal
1136 fx
.bcx
.ins().iconst(types
::I8
, 1)
1137 } else if let Some(clty
) = size
.bits().try_into().ok().and_then(Type
::int
) {
1138 // Can't use `trusted` for these loads; they could be unaligned.
1139 let mut flags
= MemFlags
::new();
1141 let lhs_val
= fx
.bcx
.ins().load(clty
, flags
, lhs_ref
, 0);
1142 let rhs_val
= fx
.bcx
.ins().load(clty
, flags
, rhs_ref
, 0);
1143 fx
.bcx
.ins().icmp(IntCC
::Equal
, lhs_val
, rhs_val
)
1145 // Just call `memcmp` (like slices do in core) when the
1146 // size is too large or it's not a power-of-two.
1147 let signed_bytes
= i64::try_from(size
.bytes()).unwrap();
1148 let bytes_val
= fx
.bcx
.ins().iconst(fx
.pointer_type
, signed_bytes
);
1149 let params
= vec
![AbiParam
::new(fx
.pointer_type
); 3];
1150 let returns
= vec
![AbiParam
::new(types
::I32
)];
1151 let args
= &[lhs_ref
, rhs_ref
, bytes_val
];
1152 let cmp
= fx
.lib_call("memcmp", params
, returns
, args
)[0];
1153 fx
.bcx
.ins().icmp_imm(IntCC
::Equal
, cmp
, 0)
1155 ret
.write_cvalue(fx
, CValue
::by_val(is_eq_value
, ret
.layout()));
1158 sym
::compare_bytes
=> {
1159 intrinsic_args
!(fx
, args
=> (lhs_ptr
, rhs_ptr
, bytes_val
); intrinsic
);
1160 let lhs_ptr
= lhs_ptr
.load_scalar(fx
);
1161 let rhs_ptr
= rhs_ptr
.load_scalar(fx
);
1162 let bytes_val
= bytes_val
.load_scalar(fx
);
1164 let params
= vec
![AbiParam
::new(fx
.pointer_type
); 3];
1165 let returns
= vec
![AbiParam
::new(types
::I32
)];
1166 let args
= &[lhs_ptr
, rhs_ptr
, bytes_val
];
1167 // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
1168 let cmp
= fx
.lib_call("memcmp", params
, returns
, args
)[0];
1169 ret
.write_cvalue(fx
, CValue
::by_val(cmp
, ret
.layout()));
1172 sym
::const_allocate
=> {
1173 intrinsic_args
!(fx
, args
=> (_size
, _align
); intrinsic
);
1175 // returns a null pointer at runtime.
1176 let null
= fx
.bcx
.ins().iconst(fx
.pointer_type
, 0);
1177 ret
.write_cvalue(fx
, CValue
::by_val(null
, ret
.layout()));
1180 sym
::const_deallocate
=> {
1181 intrinsic_args
!(fx
, args
=> (_ptr
, _size
, _align
); intrinsic
);
1186 intrinsic_args
!(fx
, args
=> (a
); intrinsic
);
1188 // FIXME implement black_box semantics
1189 ret
.write_cvalue(fx
, a
);
1192 // FIXME implement variadics in cranelift
1193 sym
::va_copy
| sym
::va_arg
| sym
::va_end
=> {
1194 fx
.tcx
.sess
.span_fatal(
1196 "Defining variadic functions is not yet supported by Cranelift",
1203 .span_fatal(source_info
.span
, format
!("unsupported intrinsic {}", intrinsic
));
1207 let ret_block
= fx
.get_block(destination
.unwrap());
1208 fx
.bcx
.ins().jump(ret_block
, &[]);