]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / intrinsics / llvm.rs
CommitLineData
29967ef6
XL
1//! Emulate LLVM intrinsics
2
3use crate::intrinsics::*;
4use crate::prelude::*;
5
6use rustc_middle::ty::subst::SubstsRef;
7
8pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
6a06907d 9 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6 10 intrinsic: &str,
5e7ed085 11 _substs: SubstsRef<'tcx>,
29967ef6 12 args: &[mir::Operand<'tcx>],
923072b8
FG
13 ret: CPlace<'tcx>,
14 target: Option<BasicBlock>,
29967ef6 15) {
29967ef6 16 intrinsic_match! {
5e7ed085 17 fx, intrinsic, args,
29967ef6
XL
18 _ => {
19 fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
20 crate::trap::trap_unimplemented(fx, intrinsic);
21 };
22
23 // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
17df50a5 24 "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd", (c a) {
5869c6ff
XL
25 let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
26 let lane_ty = fx.clif_type(lane_ty).unwrap();
29967ef6
XL
27 assert!(lane_count <= 32);
28
29 let mut res = fx.bcx.ins().iconst(types::I32, 0);
30
31 for lane in (0..lane_count).rev() {
32 let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
33
34 // cast float to int
35 let a_lane = match lane_ty {
36 types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
37 types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
38 _ => a_lane,
39 };
40
41 // extract sign bit of an int
42 let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
43
44 // shift sign bit into result
45 let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
46 res = fx.bcx.ins().ishl_imm(res, 1);
47 res = fx.bcx.ins().bor(res, a_lane_sign);
48 }
49
50 let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
51 ret.write_cvalue(fx, res);
52 };
17df50a5 53 "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd", (c x, c y, o kind) {
5e7ed085
FG
54 let kind = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
55 let flt_cc = match kind.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) {
29967ef6
XL
56 0 => FloatCC::Equal,
57 1 => FloatCC::LessThan,
58 2 => FloatCC::LessThanOrEqual,
59 7 => {
60 unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
61 }
62 3 => {
63 unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
64 }
65 4 => FloatCC::NotEqual,
66 5 => {
67 unimplemented!("not less than");
68 }
69 6 => {
70 unimplemented!("not less than or equal");
71 }
72 kind => unreachable!("kind {:?}", kind),
73 };
74
5e7ed085
FG
75 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
76 let res_lane = match lane_ty.kind() {
29967ef6 77 ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
5e7ed085 78 _ => unreachable!("{:?}", lane_ty),
29967ef6 79 };
5e7ed085 80 bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
29967ef6
XL
81 });
82 };
17df50a5 83 "llvm.x86.sse2.psrli.d", (c a, o imm8) {
29967ef6 84 let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
5e7ed085
FG
85 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
86 match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
29967ef6
XL
87 imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
88 _ => fx.bcx.ins().iconst(types::I32, 0),
5e7ed085 89 }
29967ef6
XL
90 });
91 };
17df50a5 92 "llvm.x86.sse2.pslli.d", (c a, o imm8) {
29967ef6 93 let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
5e7ed085
FG
94 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
95 match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
29967ef6
XL
96 imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
97 _ => fx.bcx.ins().iconst(types::I32, 0),
5e7ed085 98 }
29967ef6
XL
99 });
100 };
17df50a5 101 "llvm.x86.sse2.storeu.dq", (v mem_addr, c a) {
29967ef6
XL
102 // FIXME correctly handle the unalignment
103 let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
104 dest.write_cvalue(fx, a);
105 };
136023e0
XL
106 "llvm.x86.addcarry.64", (v c_in, c a, c b) {
107 llvm_add_sub(
108 fx,
109 BinOp::Add,
110 ret,
111 c_in,
112 a,
113 b
114 );
115 };
116 "llvm.x86.subborrow.64", (v b_in, c a, c b) {
117 llvm_add_sub(
118 fx,
119 BinOp::Sub,
120 ret,
121 b_in,
122 a,
123 b
124 );
125 };
29967ef6
XL
126 }
127
923072b8 128 let dest = target.expect("all llvm intrinsics used by stdlib should return");
5e7ed085
FG
129 let ret_block = fx.get_block(dest);
130 fx.bcx.ins().jump(ret_block, &[]);
29967ef6
XL
131}
132
133// llvm.x86.avx2.vperm2i128
134// llvm.x86.ssse3.pshuf.b.128
135// llvm.x86.avx2.pshuf.b
136// llvm.x86.avx2.psrli.w
137// llvm.x86.sse2.psrli.w
136023e0
XL
138
139fn llvm_add_sub<'tcx>(
140 fx: &mut FunctionCx<'_, '_, 'tcx>,
141 bin_op: BinOp,
142 ret: CPlace<'tcx>,
143 cb_in: Value,
144 a: CValue<'tcx>,
145 b: CValue<'tcx>,
146) {
147 assert_eq!(
148 a.layout().ty,
149 fx.tcx.types.u64,
150 "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
151 );
152 assert_eq!(
153 b.layout().ty,
154 fx.tcx.types.u64,
155 "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
156 );
157
158 // c + carry -> c + first intermediate carry or borrow respectively
159 let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
160 let c = int0.value_field(fx, mir::Field::new(0));
161 let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
162
163 // c + carry -> c + second intermediate carry or borrow respectively
164 let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
165 let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
166 let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
167 let (c, cb1) = int1.load_scalar_pair(fx);
168
169 // carry0 | carry1 -> carry or borrow respectively
170 let cb_out = fx.bcx.ins().bor(cb0, cb1);
171
172 let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
173 let val = CValue::by_val_pair(cb_out, c, layout);
174 ret.write_cvalue(fx, val);
175}