]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / intrinsics / llvm_x86.rs
CommitLineData
9c376795
FG
1//! Emulate x86 LLVM intrinsics
2
3use crate::intrinsics::*;
4use crate::prelude::*;
5
6use rustc_middle::ty::subst::SubstsRef;
7
8pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
9 fx: &mut FunctionCx<'_, '_, 'tcx>,
10 intrinsic: &str,
11 _substs: SubstsRef<'tcx>,
12 args: &[mir::Operand<'tcx>],
13 ret: CPlace<'tcx>,
14 target: Option<BasicBlock>,
15) {
16 match intrinsic {
17 "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => {
18 // Spin loop hint
19 }
20
21 // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
22 "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
23 intrinsic_args!(fx, args => (a); intrinsic);
24
25 let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
26 let lane_ty = fx.clif_type(lane_ty).unwrap();
27 assert!(lane_count <= 32);
28
29 let mut res = fx.bcx.ins().iconst(types::I32, 0);
30
31 for lane in (0..lane_count).rev() {
32 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
33
34 // cast float to int
35 let a_lane = match lane_ty {
36 types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
37 types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
38 _ => a_lane,
39 };
40
41 // extract sign bit of an int
42 let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
43
44 // shift sign bit into result
45 let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
46 res = fx.bcx.ins().ishl_imm(res, 1);
47 res = fx.bcx.ins().bor(res, a_lane_sign);
48 }
49
50 let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
51 ret.write_cvalue(fx, res);
52 }
53 "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
54 let (x, y, kind) = match args {
55 [x, y, kind] => (x, y, kind),
56 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
57 };
58 let x = codegen_operand(fx, x);
59 let y = codegen_operand(fx, y);
60 let kind = crate::constant::mir_operand_get_const_val(fx, kind)
61 .expect("llvm.x86.sse2.cmp.* kind not const");
62
63 let flt_cc = match kind
64 .try_to_bits(Size::from_bytes(1))
65 .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
66 {
67 0 => FloatCC::Equal,
68 1 => FloatCC::LessThan,
69 2 => FloatCC::LessThanOrEqual,
70 7 => FloatCC::Ordered,
71 3 => FloatCC::Unordered,
72 4 => FloatCC::NotEqual,
73 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
74 6 => FloatCC::UnorderedOrGreaterThan,
75 kind => unreachable!("kind {:?}", kind),
76 };
77
78 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
79 let res_lane = match lane_ty.kind() {
80 ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
81 _ => unreachable!("{:?}", lane_ty),
82 };
83 bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
84 });
85 }
86 "llvm.x86.sse2.psrli.d" => {
87 let (a, imm8) = match args {
88 [a, imm8] => (a, imm8),
89 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
90 };
91 let a = codegen_operand(fx, a);
92 let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
93 .expect("llvm.x86.sse2.psrli.d imm8 not const");
94
95 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
96 .try_to_bits(Size::from_bytes(4))
97 .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
98 {
99 imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
100 _ => fx.bcx.ins().iconst(types::I32, 0),
101 });
102 }
103 "llvm.x86.sse2.pslli.d" => {
104 let (a, imm8) = match args {
105 [a, imm8] => (a, imm8),
106 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
107 };
108 let a = codegen_operand(fx, a);
109 let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
110 .expect("llvm.x86.sse2.psrli.d imm8 not const");
111
112 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
113 .try_to_bits(Size::from_bytes(4))
114 .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
115 {
116 imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
117 _ => fx.bcx.ins().iconst(types::I32, 0),
118 });
119 }
120 "llvm.x86.sse2.storeu.dq" => {
121 intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
122 let mem_addr = mem_addr.load_scalar(fx);
123
124 // FIXME correctly handle the unalignment
125 let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
126 dest.write_cvalue(fx, a);
127 }
128 "llvm.x86.addcarry.64" => {
129 intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
130 let c_in = c_in.load_scalar(fx);
131
132 llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
133 }
134 "llvm.x86.subborrow.64" => {
135 intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
136 let b_in = b_in.load_scalar(fx);
137
138 llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
139 }
140 _ => {
141 fx.tcx.sess.warn(&format!(
142 "unsupported x86 llvm intrinsic {}; replacing with trap",
143 intrinsic
144 ));
145 crate::trap::trap_unimplemented(fx, intrinsic);
146 return;
147 }
148 }
149
150 let dest = target.expect("all llvm intrinsics used by stdlib should return");
151 let ret_block = fx.get_block(dest);
152 fx.bcx.ins().jump(ret_block, &[]);
153}
154
155// llvm.x86.avx2.vperm2i128
156// llvm.x86.ssse3.pshuf.b.128
157// llvm.x86.avx2.pshuf.b
158// llvm.x86.avx2.psrli.w
159// llvm.x86.sse2.psrli.w
160
161fn llvm_add_sub<'tcx>(
162 fx: &mut FunctionCx<'_, '_, 'tcx>,
163 bin_op: BinOp,
164 ret: CPlace<'tcx>,
165 cb_in: Value,
166 a: CValue<'tcx>,
167 b: CValue<'tcx>,
168) {
169 assert_eq!(
170 a.layout().ty,
171 fx.tcx.types.u64,
172 "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
173 );
174 assert_eq!(
175 b.layout().ty,
176 fx.tcx.types.u64,
177 "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
178 );
179
180 // c + carry -> c + first intermediate carry or borrow respectively
181 let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
182 let c = int0.value_field(fx, mir::Field::new(0));
183 let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
184
185 // c + carry -> c + second intermediate carry or borrow respectively
186 let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
187 let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
188 let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
189 let (c, cb1) = int1.load_scalar_pair(fx);
190
191 // carry0 | carry1 -> carry or borrow respectively
192 let cb_out = fx.bcx.ins().bor(cb0, cb1);
193
194 let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
195 let val = CValue::by_val_pair(cb_out, c, layout);
196 ret.write_cvalue(fx, val);
197}