]> git.proxmox.com Git - mirror_qemu.git/blob - target/loongarch/vec_helper.c
target/loongarch: Implement vext2xv
[mirror_qemu.git] / target / loongarch / vec_helper.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch vector helper functions.
4 *
5 * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "exec/exec-all.h"
11 #include "exec/helper-proto.h"
12 #include "fpu/softfloat.h"
13 #include "internals.h"
14 #include "tcg/tcg.h"
15 #include "vec.h"
16 #include "tcg/tcg-gvec-desc.h"
17
18 #define DO_ADD(a, b) (a + b)
19 #define DO_SUB(a, b) (a - b)
20
21 #define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
22 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
23 { \
24 int i; \
25 VReg *Vd = (VReg *)vd; \
26 VReg *Vj = (VReg *)vj; \
27 VReg *Vk = (VReg *)vk; \
28 typedef __typeof(Vd->E1(0)) TD; \
29 int oprsz = simd_oprsz(desc); \
30 \
31 for (i = 0; i < oprsz / (BIT / 8); i++) { \
32 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
33 } \
34 }
35
36 DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD)
37 DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD)
38 DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD)
39
40 void HELPER(vhaddw_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
41 {
42 int i;
43 VReg *Vd = (VReg *)vd;
44 VReg *Vj = (VReg *)vj;
45 VReg *Vk = (VReg *)vk;
46 int oprsz = simd_oprsz(desc);
47
48 for (i = 0; i < oprsz / 16 ; i++) {
49 Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i + 1)),
50 int128_makes64(Vk->D(2 * i)));
51 }
52 }
53
54 DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB)
55 DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB)
56 DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB)
57
58 void HELPER(vhsubw_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
59 {
60 int i;
61 VReg *Vd = (VReg *)vd;
62 VReg *Vj = (VReg *)vj;
63 VReg *Vk = (VReg *)vk;
64 int oprsz = simd_oprsz(desc);
65
66 for (i = 0; i < oprsz / 16; i++) {
67 Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i + 1)),
68 int128_makes64(Vk->D(2 * i)));
69 }
70 }
71
72 DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD)
73 DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD)
74 DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD)
75
76 void HELPER(vhaddw_qu_du)(void *vd, void *vj, void *vk, uint32_t desc)
77 {
78 int i;
79 VReg *Vd = (VReg *)vd;
80 VReg *Vj = (VReg *)vj;
81 VReg *Vk = (VReg *)vk;
82 int oprsz = simd_oprsz(desc);
83
84 for (i = 0; i < oprsz / 16; i ++) {
85 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
86 int128_make64(Vk->UD(2 * i)));
87 }
88 }
89
90 DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB)
91 DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB)
92 DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB)
93
94 void HELPER(vhsubw_qu_du)(void *vd, void *vj, void *vk, uint32_t desc)
95 {
96 int i;
97 VReg *Vd = (VReg *)vd;
98 VReg *Vj = (VReg *)vj;
99 VReg *Vk = (VReg *)vk;
100 int oprsz = simd_oprsz(desc);
101
102 for (i = 0; i < oprsz / 16; i++) {
103 Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i + 1)),
104 int128_make64(Vk->UD(2 * i)));
105 }
106 }
107
108 #define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
109 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
110 { \
111 int i; \
112 VReg *Vd = (VReg *)vd; \
113 VReg *Vj = (VReg *)vj; \
114 VReg *Vk = (VReg *)vk; \
115 typedef __typeof(Vd->E1(0)) TD; \
116 int oprsz = simd_oprsz(desc); \
117 \
118 for (i = 0; i < oprsz / (BIT / 8); i++) { \
119 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
120 } \
121 }
122
123 #define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
124 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
125 { \
126 int i; \
127 VReg *Vd = (VReg *)vd; \
128 VReg *Vj = (VReg *)vj; \
129 VReg *Vk = (VReg *)vk; \
130 typedef __typeof(Vd->E1(0)) TD; \
131 int oprsz = simd_oprsz(desc); \
132 \
133 for (i = 0; i < oprsz / (BIT / 8); i++) { \
134 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
135 } \
136 }
137
138 void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
139 {
140 int i;
141 VReg *Vd = (VReg *)vd;
142 VReg *Vj = (VReg *)vj;
143 VReg *Vk = (VReg *)vk;
144 int oprsz = simd_oprsz(desc);
145
146 for (i = 0; i < oprsz / 16; i++) {
147 Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i)),
148 int128_makes64(Vk->D(2 * i)));
149 }
150 }
151
152 DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD)
153 DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD)
154 DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD)
155
156 void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
157 {
158 int i;
159 VReg *Vd = (VReg *)vd;
160 VReg *Vj = (VReg *)vj;
161 VReg *Vk = (VReg *)vk;
162 int oprsz = simd_oprsz(desc);
163
164 for (i = 0; i < oprsz / 16; i++) {
165 Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i +1)),
166 int128_makes64(Vk->D(2 * i +1)));
167 }
168 }
169
170 DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD)
171 DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD)
172 DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD)
173
174 void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
175 {
176 int i;
177 VReg *Vd = (VReg *)vd;
178 VReg *Vj = (VReg *)vj;
179 VReg *Vk = (VReg *)vk;
180 int oprsz = simd_oprsz(desc);
181
182 for (i = 0; i < oprsz / 16; i++) {
183 Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i)),
184 int128_makes64(Vk->D(2 * i)));
185 }
186 }
187
188 DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB)
189 DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB)
190 DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB)
191
192 void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
193 {
194 int i;
195 VReg *Vd = (VReg *)vd;
196 VReg *Vj = (VReg *)vj;
197 VReg *Vk = (VReg *)vk;
198 int oprsz = simd_oprsz(desc);
199
200 for (i = 0; i < oprsz / 16; i++) {
201 Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i + 1)),
202 int128_makes64(Vk->D(2 * i + 1)));
203 }
204 }
205
206 DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB)
207 DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB)
208 DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB)
209
210 void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
211 {
212 int i;
213 VReg *Vd = (VReg *)vd;
214 VReg *Vj = (VReg *)vj;
215 VReg *Vk = (VReg *)vk;
216 int oprsz = simd_oprsz(desc);
217
218 for (i = 0; i < oprsz / 16; i++) {
219 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i)),
220 int128_make64(Vk->UD(2 * i)));
221 }
222 }
223
224 DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD)
225 DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD)
226 DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD)
227
228 void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
229 {
230 int i;
231 VReg *Vd = (VReg *)vd;
232 VReg *Vj = (VReg *)vj;
233 VReg *Vk = (VReg *)vk;
234 int oprsz = simd_oprsz(desc);
235
236 for (i = 0; i < oprsz / 16; i++) {
237 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
238 int128_make64(Vk->UD(2 * i + 1)));
239 }
240 }
241
242 DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD)
243 DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD)
244 DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD)
245
246 void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
247 {
248 int i;
249 VReg *Vd = (VReg *)vd;
250 VReg *Vj = (VReg *)vj;
251 VReg *Vk = (VReg *)vk;
252 int oprsz = simd_oprsz(desc);
253
254 for (i = 0; i < oprsz / 16; i++) {
255 Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i)),
256 int128_make64(Vk->UD(2 * i)));
257 }
258 }
259
260 DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB)
261 DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB)
262 DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB)
263
264 void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
265 {
266 int i;
267 VReg *Vd = (VReg *)vd;
268 VReg *Vj = (VReg *)vj;
269 VReg *Vk = (VReg *)vk;
270 int oprsz = simd_oprsz(desc);
271
272 for (i = 0; i < oprsz / 16; i++) {
273 Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i + 1)),
274 int128_make64(Vk->UD(2 * i + 1)));
275 }
276 }
277
278 DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB)
279 DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB)
280 DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB)
281
282 #define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
283 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
284 { \
285 int i; \
286 VReg *Vd = (VReg *)vd; \
287 VReg *Vj = (VReg *)vj; \
288 VReg *Vk = (VReg *)vk; \
289 typedef __typeof(Vd->ES1(0)) TDS; \
290 typedef __typeof(Vd->EU1(0)) TDU; \
291 int oprsz = simd_oprsz(desc); \
292 \
293 for (i = 0; i < oprsz / (BIT / 8); i++) { \
294 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
295 } \
296 }
297
298 #define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
299 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
300 { \
301 int i; \
302 VReg *Vd = (VReg *)vd; \
303 VReg *Vj = (VReg *)vj; \
304 VReg *Vk = (VReg *)vk; \
305 typedef __typeof(Vd->ES1(0)) TDS; \
306 typedef __typeof(Vd->EU1(0)) TDU; \
307 int oprsz = simd_oprsz(desc); \
308 \
309 for (i = 0; i < oprsz / (BIT / 8); i++) { \
310 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
311 } \
312 }
313
314 void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc)
315 {
316 int i;
317 VReg *Vd = (VReg *)vd;
318 VReg *Vj = (VReg *)vj;
319 VReg *Vk = (VReg *)vk;
320 int oprsz = simd_oprsz(desc);
321
322 for (i = 0; i < oprsz / 16; i++) {
323 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i)),
324 int128_makes64(Vk->D(2 * i)));
325 }
326 }
327
328 DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD)
329 DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD)
330 DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD)
331
332 void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc)
333 {
334 int i;
335 VReg *Vd = (VReg *)vd;
336 VReg *Vj = (VReg *)vj;
337 VReg *Vk = (VReg *)vk;
338 int oprsz = simd_oprsz(desc);
339
340 for (i = 0; i < oprsz / 16; i++) {
341 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
342 int128_makes64(Vk->D(2 * i + 1)));
343 }
344 }
345
346 DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD)
347 DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD)
348 DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD)
349
350 #define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
351 #define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
352
353 #define DO_3OP(NAME, BIT, E, DO_OP) \
354 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
355 { \
356 int i; \
357 VReg *Vd = (VReg *)vd; \
358 VReg *Vj = (VReg *)vj; \
359 VReg *Vk = (VReg *)vk; \
360 int oprsz = simd_oprsz(desc); \
361 \
362 for (i = 0; i < oprsz / (BIT / 8); i++) { \
363 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
364 } \
365 }
366
367 DO_3OP(vavg_b, 8, B, DO_VAVG)
368 DO_3OP(vavg_h, 16, H, DO_VAVG)
369 DO_3OP(vavg_w, 32, W, DO_VAVG)
370 DO_3OP(vavg_d, 64, D, DO_VAVG)
371 DO_3OP(vavgr_b, 8, B, DO_VAVGR)
372 DO_3OP(vavgr_h, 16, H, DO_VAVGR)
373 DO_3OP(vavgr_w, 32, W, DO_VAVGR)
374 DO_3OP(vavgr_d, 64, D, DO_VAVGR)
375 DO_3OP(vavg_bu, 8, UB, DO_VAVG)
376 DO_3OP(vavg_hu, 16, UH, DO_VAVG)
377 DO_3OP(vavg_wu, 32, UW, DO_VAVG)
378 DO_3OP(vavg_du, 64, UD, DO_VAVG)
379 DO_3OP(vavgr_bu, 8, UB, DO_VAVGR)
380 DO_3OP(vavgr_hu, 16, UH, DO_VAVGR)
381 DO_3OP(vavgr_wu, 32, UW, DO_VAVGR)
382 DO_3OP(vavgr_du, 64, UD, DO_VAVGR)
383
384 #define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
385
386 DO_3OP(vabsd_b, 8, B, DO_VABSD)
387 DO_3OP(vabsd_h, 16, H, DO_VABSD)
388 DO_3OP(vabsd_w, 32, W, DO_VABSD)
389 DO_3OP(vabsd_d, 64, D, DO_VABSD)
390 DO_3OP(vabsd_bu, 8, UB, DO_VABSD)
391 DO_3OP(vabsd_hu, 16, UH, DO_VABSD)
392 DO_3OP(vabsd_wu, 32, UW, DO_VABSD)
393 DO_3OP(vabsd_du, 64, UD, DO_VABSD)
394
395 #define DO_VABS(a) ((a < 0) ? (-a) : (a))
396
397 #define DO_VADDA(NAME, BIT, E) \
398 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
399 { \
400 int i; \
401 VReg *Vd = (VReg *)vd; \
402 VReg *Vj = (VReg *)vj; \
403 VReg *Vk = (VReg *)vk; \
404 int oprsz = simd_oprsz(desc); \
405 \
406 for (i = 0; i < oprsz / (BIT / 8); i++) { \
407 Vd->E(i) = DO_VABS(Vj->E(i)) + DO_VABS(Vk->E(i)); \
408 } \
409 }
410
411 DO_VADDA(vadda_b, 8, B)
412 DO_VADDA(vadda_h, 16, H)
413 DO_VADDA(vadda_w, 32, W)
414 DO_VADDA(vadda_d, 64, D)
415
416 #define DO_MIN(a, b) (a < b ? a : b)
417 #define DO_MAX(a, b) (a > b ? a : b)
418
419 #define VMINMAXI(NAME, BIT, E, DO_OP) \
420 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
421 { \
422 int i; \
423 VReg *Vd = (VReg *)vd; \
424 VReg *Vj = (VReg *)vj; \
425 typedef __typeof(Vd->E(0)) TD; \
426 int oprsz = simd_oprsz(desc); \
427 \
428 for (i = 0; i < oprsz / (BIT / 8); i++) { \
429 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
430 } \
431 }
432
433 VMINMAXI(vmini_b, 8, B, DO_MIN)
434 VMINMAXI(vmini_h, 16, H, DO_MIN)
435 VMINMAXI(vmini_w, 32, W, DO_MIN)
436 VMINMAXI(vmini_d, 64, D, DO_MIN)
437 VMINMAXI(vmaxi_b, 8, B, DO_MAX)
438 VMINMAXI(vmaxi_h, 16, H, DO_MAX)
439 VMINMAXI(vmaxi_w, 32, W, DO_MAX)
440 VMINMAXI(vmaxi_d, 64, D, DO_MAX)
441 VMINMAXI(vmini_bu, 8, UB, DO_MIN)
442 VMINMAXI(vmini_hu, 16, UH, DO_MIN)
443 VMINMAXI(vmini_wu, 32, UW, DO_MIN)
444 VMINMAXI(vmini_du, 64, UD, DO_MIN)
445 VMINMAXI(vmaxi_bu, 8, UB, DO_MAX)
446 VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
447 VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
448 VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
449
450 #define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
451 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
452 { \
453 int i; \
454 VReg *Vd = (VReg *)vd; \
455 VReg *Vj = (VReg *)vj; \
456 VReg *Vk = (VReg *)vk; \
457 typedef __typeof(Vd->E1(0)) T; \
458 int oprsz = simd_oprsz(desc); \
459 \
460 for (i = 0; i < oprsz / (BIT / 8); i++) { \
461 Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
462 } \
463 }
464
465 void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t desc)
466 {
467 int i;
468 uint64_t l, h;
469 VReg *Vd = (VReg *)vd;
470 VReg *Vj = (VReg *)vj;
471 VReg *Vk = (VReg *)vk;
472 int oprsz = simd_oprsz(desc);
473
474 for (i = 0; i < oprsz / 8; i++) {
475 muls64(&l, &h, Vj->D(i), Vk->D(i));
476 Vd->D(i) = h;
477 }
478 }
479
480 DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
481 DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
482 DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
483
484 void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t desc)
485 {
486 int i;
487 uint64_t l, h;
488 VReg *Vd = (VReg *)vd;
489 VReg *Vj = (VReg *)vj;
490 VReg *Vk = (VReg *)vk;
491 int oprsz = simd_oprsz(desc);
492
493 for (i = 0; i < oprsz / 8; i++) {
494 mulu64(&l, &h, Vj->D(i), Vk->D(i));
495 Vd->D(i) = h;
496 }
497 }
498
499 DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
500 DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH)
501 DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH)
502
503 #define DO_MUL(a, b) (a * b)
504
505 DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL)
506 DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL)
507 DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL)
508
509 DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL)
510 DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL)
511 DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL)
512
513 DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL)
514 DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL)
515 DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL)
516
517 DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL)
518 DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL)
519 DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL)
520
521 DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
522 DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
523 DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
524
525 DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
526 DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
527 DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
528
529 #define DO_MADD(a, b, c) (a + b * c)
530 #define DO_MSUB(a, b, c) (a - b * c)
531
532 #define VMADDSUB(NAME, BIT, E, DO_OP) \
533 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
534 { \
535 int i; \
536 VReg *Vd = (VReg *)vd; \
537 VReg *Vj = (VReg *)vj; \
538 VReg *Vk = (VReg *)vk; \
539 int oprsz = simd_oprsz(desc); \
540 \
541 for (i = 0; i < oprsz / (BIT / 8); i++) { \
542 Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
543 } \
544 }
545
546 VMADDSUB(vmadd_b, 8, B, DO_MADD)
547 VMADDSUB(vmadd_h, 16, H, DO_MADD)
548 VMADDSUB(vmadd_w, 32, W, DO_MADD)
549 VMADDSUB(vmadd_d, 64, D, DO_MADD)
550 VMADDSUB(vmsub_b, 8, B, DO_MSUB)
551 VMADDSUB(vmsub_h, 16, H, DO_MSUB)
552 VMADDSUB(vmsub_w, 32, W, DO_MSUB)
553 VMADDSUB(vmsub_d, 64, D, DO_MSUB)
554
555 #define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
556 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
557 { \
558 int i; \
559 VReg *Vd = (VReg *)vd; \
560 VReg *Vj = (VReg *)vj; \
561 VReg *Vk = (VReg *)vk; \
562 typedef __typeof(Vd->E1(0)) TD; \
563 int oprsz = simd_oprsz(desc); \
564 \
565 for (i = 0; i < oprsz / (BIT / 8); i++) { \
566 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
567 } \
568 }
569
570 VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL)
571 VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL)
572 VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL)
573 VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL)
574 VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL)
575 VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL)
576
577 #define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
578 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
579 { \
580 int i; \
581 VReg *Vd = (VReg *)vd; \
582 VReg *Vj = (VReg *)vj; \
583 VReg *Vk = (VReg *)vk; \
584 typedef __typeof(Vd->E1(0)) TD; \
585 int oprsz = simd_oprsz(desc); \
586 \
587 for (i = 0; i < oprsz / (BIT / 8); i++) { \
588 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
589 (TD)Vk->E2(2 * i + 1)); \
590 } \
591 }
592
593 VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL)
594 VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL)
595 VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL)
596 VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL)
597 VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL)
598 VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL)
599
600 #define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
601 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
602 { \
603 int i; \
604 VReg *Vd = (VReg *)vd; \
605 VReg *Vj = (VReg *)vj; \
606 VReg *Vk = (VReg *)vk; \
607 typedef __typeof(Vd->ES1(0)) TS1; \
608 typedef __typeof(Vd->EU1(0)) TU1; \
609 int oprsz = simd_oprsz(desc); \
610 \
611 for (i = 0; i < oprsz / (BIT / 8); i++) { \
612 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
613 (TS1)Vk->ES2(2 * i)); \
614 } \
615 }
616
617 VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
618 VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
619 VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
620
621 #define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
622 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
623 { \
624 int i; \
625 VReg *Vd = (VReg *)vd; \
626 VReg *Vj = (VReg *)vj; \
627 VReg *Vk = (VReg *)vk; \
628 typedef __typeof(Vd->ES1(0)) TS1; \
629 typedef __typeof(Vd->EU1(0)) TU1; \
630 int oprsz = simd_oprsz(desc); \
631 \
632 for (i = 0; i < oprsz / (BIT / 8); i++) { \
633 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
634 (TS1)Vk->ES2(2 * i + 1)); \
635 } \
636 }
637
638 VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
639 VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
640 VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
641
642 #define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
643 #define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
644 #define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
645 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
646 #define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
647 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
648
649 #define VDIV(NAME, BIT, E, DO_OP) \
650 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
651 { \
652 int i; \
653 VReg *Vd = (VReg *)vd; \
654 VReg *Vj = (VReg *)vj; \
655 VReg *Vk = (VReg *)vk; \
656 int oprsz = simd_oprsz(desc); \
657 \
658 for (i = 0; i < oprsz / (BIT / 8); i++) { \
659 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
660 } \
661 }
662
663 VDIV(vdiv_b, 8, B, DO_DIV)
664 VDIV(vdiv_h, 16, H, DO_DIV)
665 VDIV(vdiv_w, 32, W, DO_DIV)
666 VDIV(vdiv_d, 64, D, DO_DIV)
667 VDIV(vdiv_bu, 8, UB, DO_DIVU)
668 VDIV(vdiv_hu, 16, UH, DO_DIVU)
669 VDIV(vdiv_wu, 32, UW, DO_DIVU)
670 VDIV(vdiv_du, 64, UD, DO_DIVU)
671 VDIV(vmod_b, 8, B, DO_REM)
672 VDIV(vmod_h, 16, H, DO_REM)
673 VDIV(vmod_w, 32, W, DO_REM)
674 VDIV(vmod_d, 64, D, DO_REM)
675 VDIV(vmod_bu, 8, UB, DO_REMU)
676 VDIV(vmod_hu, 16, UH, DO_REMU)
677 VDIV(vmod_wu, 32, UW, DO_REMU)
678 VDIV(vmod_du, 64, UD, DO_REMU)
679
680 #define VSAT_S(NAME, BIT, E) \
681 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t desc) \
682 { \
683 int i; \
684 VReg *Vd = (VReg *)vd; \
685 VReg *Vj = (VReg *)vj; \
686 typedef __typeof(Vd->E(0)) TD; \
687 int oprsz = simd_oprsz(desc); \
688 \
689 for (i = 0; i < oprsz / (BIT / 8); i++) { \
690 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
691 Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
692 } \
693 }
694
695 VSAT_S(vsat_b, 8, B)
696 VSAT_S(vsat_h, 16, H)
697 VSAT_S(vsat_w, 32, W)
698 VSAT_S(vsat_d, 64, D)
699
700 #define VSAT_U(NAME, BIT, E) \
701 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t desc) \
702 { \
703 int i; \
704 VReg *Vd = (VReg *)vd; \
705 VReg *Vj = (VReg *)vj; \
706 typedef __typeof(Vd->E(0)) TD; \
707 int oprsz = simd_oprsz(desc); \
708 \
709 for (i = 0; i < oprsz / (BIT / 8); i++) { \
710 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
711 } \
712 }
713
714 VSAT_U(vsat_bu, 8, UB)
715 VSAT_U(vsat_hu, 16, UH)
716 VSAT_U(vsat_wu, 32, UW)
717 VSAT_U(vsat_du, 64, UD)
718
719 #define VEXTH(NAME, BIT, E1, E2) \
720 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
721 { \
722 int i, j, ofs; \
723 VReg *Vd = (VReg *)vd; \
724 VReg *Vj = (VReg *)vj; \
725 int oprsz = simd_oprsz(desc); \
726 \
727 ofs = LSX_LEN / BIT; \
728 for (i = 0; i < oprsz / 16; i++) { \
729 for (j = 0; j < ofs; j++) { \
730 Vd->E1(j + i * ofs) = Vj->E2(j + ofs + ofs * 2 * i); \
731 } \
732 } \
733 }
734
735 void HELPER(vexth_q_d)(void *vd, void *vj, uint32_t desc)
736 {
737 int i;
738 VReg *Vd = (VReg *)vd;
739 VReg *Vj = (VReg *)vj;
740 int oprsz = simd_oprsz(desc);
741
742 for (i = 0; i < oprsz / 16; i++) {
743 Vd->Q(i) = int128_makes64(Vj->D(2 * i + 1));
744 }
745 }
746
747 void HELPER(vexth_qu_du)(void *vd, void *vj, uint32_t desc)
748 {
749 int i;
750 VReg *Vd = (VReg *)vd;
751 VReg *Vj = (VReg *)vj;
752 int oprsz = simd_oprsz(desc);
753
754 for (i = 0; i < oprsz / 16; i++) {
755 Vd->Q(i) = int128_make64(Vj->UD(2 * i + 1));
756 }
757 }
758
759 VEXTH(vexth_h_b, 16, H, B)
760 VEXTH(vexth_w_h, 32, W, H)
761 VEXTH(vexth_d_w, 64, D, W)
762 VEXTH(vexth_hu_bu, 16, UH, UB)
763 VEXTH(vexth_wu_hu, 32, UW, UH)
764 VEXTH(vexth_du_wu, 64, UD, UW)
765
766 #define VEXT2XV(NAME, BIT, E1, E2) \
767 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
768 { \
769 int i; \
770 VReg temp = {}; \
771 VReg *Vd = (VReg *)vd; \
772 VReg *Vj = (VReg *)vj; \
773 int oprsz = simd_oprsz(desc); \
774 \
775 for (i = 0; i < oprsz / (BIT / 8); i++) { \
776 temp.E1(i) = Vj->E2(i); \
777 } \
778 *Vd = temp; \
779 }
780
781 VEXT2XV(vext2xv_h_b, 16, H, B)
782 VEXT2XV(vext2xv_w_b, 32, W, B)
783 VEXT2XV(vext2xv_d_b, 64, D, B)
784 VEXT2XV(vext2xv_w_h, 32, W, H)
785 VEXT2XV(vext2xv_d_h, 64, D, H)
786 VEXT2XV(vext2xv_d_w, 64, D, W)
787 VEXT2XV(vext2xv_hu_bu, 16, UH, UB)
788 VEXT2XV(vext2xv_wu_bu, 32, UW, UB)
789 VEXT2XV(vext2xv_du_bu, 64, UD, UB)
790 VEXT2XV(vext2xv_wu_hu, 32, UW, UH)
791 VEXT2XV(vext2xv_du_hu, 64, UD, UH)
792 VEXT2XV(vext2xv_du_wu, 64, UD, UW)
793
794 #define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
795
796 DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV)
797 DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV)
798 DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV)
799 DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV)
800
801 static uint64_t do_vmskltz_b(int64_t val)
802 {
803 uint64_t m = 0x8080808080808080ULL;
804 uint64_t c = val & m;
805 c |= c << 7;
806 c |= c << 14;
807 c |= c << 28;
808 return c >> 56;
809 }
810
811 void HELPER(vmskltz_b)(void *vd, void *vj, uint32_t desc)
812 {
813 uint16_t temp = 0;
814 VReg *Vd = (VReg *)vd;
815 VReg *Vj = (VReg *)vj;
816
817 temp = do_vmskltz_b(Vj->D(0));
818 temp |= (do_vmskltz_b(Vj->D(1)) << 8);
819 Vd->D(0) = temp;
820 Vd->D(1) = 0;
821 }
822
823 static uint64_t do_vmskltz_h(int64_t val)
824 {
825 uint64_t m = 0x8000800080008000ULL;
826 uint64_t c = val & m;
827 c |= c << 15;
828 c |= c << 30;
829 return c >> 60;
830 }
831
832 void HELPER(vmskltz_h)(void *vd, void *vj, uint32_t desc)
833 {
834 uint16_t temp = 0;
835 VReg *Vd = (VReg *)vd;
836 VReg *Vj = (VReg *)vj;
837
838 temp = do_vmskltz_h(Vj->D(0));
839 temp |= (do_vmskltz_h(Vj->D(1)) << 4);
840 Vd->D(0) = temp;
841 Vd->D(1) = 0;
842 }
843
844 static uint64_t do_vmskltz_w(int64_t val)
845 {
846 uint64_t m = 0x8000000080000000ULL;
847 uint64_t c = val & m;
848 c |= c << 31;
849 return c >> 62;
850 }
851
852 void HELPER(vmskltz_w)(void *vd, void *vj, uint32_t desc)
853 {
854 uint16_t temp = 0;
855 VReg *Vd = (VReg *)vd;
856 VReg *Vj = (VReg *)vj;
857
858 temp = do_vmskltz_w(Vj->D(0));
859 temp |= (do_vmskltz_w(Vj->D(1)) << 2);
860 Vd->D(0) = temp;
861 Vd->D(1) = 0;
862 }
863
864 static uint64_t do_vmskltz_d(int64_t val)
865 {
866 return (uint64_t)val >> 63;
867 }
868 void HELPER(vmskltz_d)(void *vd, void *vj, uint32_t desc)
869 {
870 uint16_t temp = 0;
871 VReg *Vd = (VReg *)vd;
872 VReg *Vj = (VReg *)vj;
873
874 temp = do_vmskltz_d(Vj->D(0));
875 temp |= (do_vmskltz_d(Vj->D(1)) << 1);
876 Vd->D(0) = temp;
877 Vd->D(1) = 0;
878 }
879
880 void HELPER(vmskgez_b)(void *vd, void *vj, uint32_t desc)
881 {
882 uint16_t temp = 0;
883 VReg *Vd = (VReg *)vd;
884 VReg *Vj = (VReg *)vj;
885
886 temp = do_vmskltz_b(Vj->D(0));
887 temp |= (do_vmskltz_b(Vj->D(1)) << 8);
888 Vd->D(0) = (uint16_t)(~temp);
889 Vd->D(1) = 0;
890 }
891
892 static uint64_t do_vmskez_b(uint64_t a)
893 {
894 uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
895 uint64_t c = ~(((a & m) + m) | a | m);
896 c |= c << 7;
897 c |= c << 14;
898 c |= c << 28;
899 return c >> 56;
900 }
901
902 void HELPER(vmsknz_b)(void *vd, void *vj, uint32_t desc)
903 {
904 uint16_t temp = 0;
905 VReg *Vd = (VReg *)vd;
906 VReg *Vj = (VReg *)vj;
907
908 temp = do_vmskez_b(Vj->D(0));
909 temp |= (do_vmskez_b(Vj->D(1)) << 8);
910 Vd->D(0) = (uint16_t)(~temp);
911 Vd->D(1) = 0;
912 }
913
914 void HELPER(vnori_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
915 {
916 int i;
917 VReg *Vd = (VReg *)vd;
918 VReg *Vj = (VReg *)vj;
919
920 for (i = 0; i < LSX_LEN/8; i++) {
921 Vd->B(i) = ~(Vj->B(i) | (uint8_t)imm);
922 }
923 }
924
925 #define VSLLWIL(NAME, BIT, E1, E2) \
926 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
927 { \
928 int i; \
929 VReg temp; \
930 VReg *Vd = (VReg *)vd; \
931 VReg *Vj = (VReg *)vj; \
932 typedef __typeof(temp.E1(0)) TD; \
933 \
934 temp.D(0) = 0; \
935 temp.D(1) = 0; \
936 for (i = 0; i < LSX_LEN/BIT; i++) { \
937 temp.E1(i) = (TD)Vj->E2(i) << (imm % BIT); \
938 } \
939 *Vd = temp; \
940 }
941
942 void HELPER(vextl_q_d)(void *vd, void *vj, uint32_t desc)
943 {
944 VReg *Vd = (VReg *)vd;
945 VReg *Vj = (VReg *)vj;
946
947 Vd->Q(0) = int128_makes64(Vj->D(0));
948 }
949
950 void HELPER(vextl_qu_du)(void *vd, void *vj, uint32_t desc)
951 {
952 VReg *Vd = (VReg *)vd;
953 VReg *Vj = (VReg *)vj;
954
955 Vd->Q(0) = int128_make64(Vj->D(0));
956 }
957
958 VSLLWIL(vsllwil_h_b, 16, H, B)
959 VSLLWIL(vsllwil_w_h, 32, W, H)
960 VSLLWIL(vsllwil_d_w, 64, D, W)
961 VSLLWIL(vsllwil_hu_bu, 16, UH, UB)
962 VSLLWIL(vsllwil_wu_hu, 32, UW, UH)
963 VSLLWIL(vsllwil_du_wu, 64, UD, UW)
964
965 #define do_vsrlr(E, T) \
966 static T do_vsrlr_ ##E(T s1, int sh) \
967 { \
968 if (sh == 0) { \
969 return s1; \
970 } else { \
971 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
972 } \
973 }
974
975 do_vsrlr(B, uint8_t)
976 do_vsrlr(H, uint16_t)
977 do_vsrlr(W, uint32_t)
978 do_vsrlr(D, uint64_t)
979
980 #define VSRLR(NAME, BIT, T, E) \
981 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
982 { \
983 int i; \
984 VReg *Vd = (VReg *)vd; \
985 VReg *Vj = (VReg *)vj; \
986 VReg *Vk = (VReg *)vk; \
987 \
988 for (i = 0; i < LSX_LEN/BIT; i++) { \
989 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
990 } \
991 }
992
993 VSRLR(vsrlr_b, 8, uint8_t, B)
994 VSRLR(vsrlr_h, 16, uint16_t, H)
995 VSRLR(vsrlr_w, 32, uint32_t, W)
996 VSRLR(vsrlr_d, 64, uint64_t, D)
997
998 #define VSRLRI(NAME, BIT, E) \
999 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1000 { \
1001 int i; \
1002 VReg *Vd = (VReg *)vd; \
1003 VReg *Vj = (VReg *)vj; \
1004 \
1005 for (i = 0; i < LSX_LEN/BIT; i++) { \
1006 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
1007 } \
1008 }
1009
1010 VSRLRI(vsrlri_b, 8, B)
1011 VSRLRI(vsrlri_h, 16, H)
1012 VSRLRI(vsrlri_w, 32, W)
1013 VSRLRI(vsrlri_d, 64, D)
1014
1015 #define do_vsrar(E, T) \
1016 static T do_vsrar_ ##E(T s1, int sh) \
1017 { \
1018 if (sh == 0) { \
1019 return s1; \
1020 } else { \
1021 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
1022 } \
1023 }
1024
1025 do_vsrar(B, int8_t)
1026 do_vsrar(H, int16_t)
1027 do_vsrar(W, int32_t)
1028 do_vsrar(D, int64_t)
1029
1030 #define VSRAR(NAME, BIT, T, E) \
1031 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1032 { \
1033 int i; \
1034 VReg *Vd = (VReg *)vd; \
1035 VReg *Vj = (VReg *)vj; \
1036 VReg *Vk = (VReg *)vk; \
1037 \
1038 for (i = 0; i < LSX_LEN/BIT; i++) { \
1039 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
1040 } \
1041 }
1042
1043 VSRAR(vsrar_b, 8, uint8_t, B)
1044 VSRAR(vsrar_h, 16, uint16_t, H)
1045 VSRAR(vsrar_w, 32, uint32_t, W)
1046 VSRAR(vsrar_d, 64, uint64_t, D)
1047
1048 #define VSRARI(NAME, BIT, E) \
1049 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1050 { \
1051 int i; \
1052 VReg *Vd = (VReg *)vd; \
1053 VReg *Vj = (VReg *)vj; \
1054 \
1055 for (i = 0; i < LSX_LEN/BIT; i++) { \
1056 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
1057 } \
1058 }
1059
1060 VSRARI(vsrari_b, 8, B)
1061 VSRARI(vsrari_h, 16, H)
1062 VSRARI(vsrari_w, 32, W)
1063 VSRARI(vsrari_d, 64, D)
1064
1065 #define R_SHIFT(a, b) (a >> b)
1066
1067 #define VSRLN(NAME, BIT, T, E1, E2) \
1068 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1069 { \
1070 int i; \
1071 VReg *Vd = (VReg *)vd; \
1072 VReg *Vj = (VReg *)vj; \
1073 VReg *Vk = (VReg *)vk; \
1074 \
1075 for (i = 0; i < LSX_LEN/BIT; i++) { \
1076 Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \
1077 } \
1078 Vd->D(1) = 0; \
1079 }
1080
1081 VSRLN(vsrln_b_h, 16, uint16_t, B, H)
1082 VSRLN(vsrln_h_w, 32, uint32_t, H, W)
1083 VSRLN(vsrln_w_d, 64, uint64_t, W, D)
1084
1085 #define VSRAN(NAME, BIT, T, E1, E2) \
1086 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1087 { \
1088 int i; \
1089 VReg *Vd = (VReg *)vd; \
1090 VReg *Vj = (VReg *)vj; \
1091 VReg *Vk = (VReg *)vk; \
1092 \
1093 for (i = 0; i < LSX_LEN/BIT; i++) { \
1094 Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \
1095 } \
1096 Vd->D(1) = 0; \
1097 }
1098
1099 VSRAN(vsran_b_h, 16, uint16_t, B, H)
1100 VSRAN(vsran_h_w, 32, uint32_t, H, W)
1101 VSRAN(vsran_w_d, 64, uint64_t, W, D)
1102
1103 #define VSRLNI(NAME, BIT, T, E1, E2) \
1104 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1105 { \
1106 int i, max; \
1107 VReg temp; \
1108 VReg *Vd = (VReg *)vd; \
1109 VReg *Vj = (VReg *)vj; \
1110 \
1111 temp.D(0) = 0; \
1112 temp.D(1) = 0; \
1113 max = LSX_LEN/BIT; \
1114 for (i = 0; i < max; i++) { \
1115 temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \
1116 temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \
1117 } \
1118 *Vd = temp; \
1119 }
1120
1121 void HELPER(vsrlni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1122 {
1123 VReg temp;
1124 VReg *Vd = (VReg *)vd;
1125 VReg *Vj = (VReg *)vj;
1126
1127 temp.D(0) = 0;
1128 temp.D(1) = 0;
1129 temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128));
1130 temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128));
1131 *Vd = temp;
1132 }
1133
1134 VSRLNI(vsrlni_b_h, 16, uint16_t, B, H)
1135 VSRLNI(vsrlni_h_w, 32, uint32_t, H, W)
1136 VSRLNI(vsrlni_w_d, 64, uint64_t, W, D)
1137
1138 #define VSRANI(NAME, BIT, E1, E2) \
1139 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1140 { \
1141 int i, max; \
1142 VReg temp; \
1143 VReg *Vd = (VReg *)vd; \
1144 VReg *Vj = (VReg *)vj; \
1145 \
1146 temp.D(0) = 0; \
1147 temp.D(1) = 0; \
1148 max = LSX_LEN/BIT; \
1149 for (i = 0; i < max; i++) { \
1150 temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \
1151 temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \
1152 } \
1153 *Vd = temp; \
1154 }
1155
1156 void HELPER(vsrani_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1157 {
1158 VReg temp;
1159 VReg *Vd = (VReg *)vd;
1160 VReg *Vj = (VReg *)vj;
1161
1162 temp.D(0) = 0;
1163 temp.D(1) = 0;
1164 temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128));
1165 temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128));
1166 *Vd = temp;
1167 }
1168
1169 VSRANI(vsrani_b_h, 16, B, H)
1170 VSRANI(vsrani_h_w, 32, H, W)
1171 VSRANI(vsrani_w_d, 64, W, D)
1172
1173 #define VSRLRN(NAME, BIT, T, E1, E2) \
1174 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1175 { \
1176 int i; \
1177 VReg *Vd = (VReg *)vd; \
1178 VReg *Vj = (VReg *)vj; \
1179 VReg *Vk = (VReg *)vk; \
1180 \
1181 for (i = 0; i < LSX_LEN/BIT; i++) { \
1182 Vd->E1(i) = do_vsrlr_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
1183 } \
1184 Vd->D(1) = 0; \
1185 }
1186
1187 VSRLRN(vsrlrn_b_h, 16, uint16_t, B, H)
1188 VSRLRN(vsrlrn_h_w, 32, uint32_t, H, W)
1189 VSRLRN(vsrlrn_w_d, 64, uint64_t, W, D)
1190
1191 #define VSRARN(NAME, BIT, T, E1, E2) \
1192 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1193 { \
1194 int i; \
1195 VReg *Vd = (VReg *)vd; \
1196 VReg *Vj = (VReg *)vj; \
1197 VReg *Vk = (VReg *)vk; \
1198 \
1199 for (i = 0; i < LSX_LEN/BIT; i++) { \
1200 Vd->E1(i) = do_vsrar_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
1201 } \
1202 Vd->D(1) = 0; \
1203 }
1204
1205 VSRARN(vsrarn_b_h, 16, uint8_t, B, H)
1206 VSRARN(vsrarn_h_w, 32, uint16_t, H, W)
1207 VSRARN(vsrarn_w_d, 64, uint32_t, W, D)
1208
1209 #define VSRLRNI(NAME, BIT, E1, E2) \
1210 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1211 { \
1212 int i, max; \
1213 VReg temp; \
1214 VReg *Vd = (VReg *)vd; \
1215 VReg *Vj = (VReg *)vj; \
1216 \
1217 temp.D(0) = 0; \
1218 temp.D(1) = 0; \
1219 max = LSX_LEN/BIT; \
1220 for (i = 0; i < max; i++) { \
1221 temp.E1(i) = do_vsrlr_ ## E2(Vj->E2(i), imm); \
1222 temp.E1(i + max) = do_vsrlr_ ## E2(Vd->E2(i), imm); \
1223 } \
1224 *Vd = temp; \
1225 }
1226
1227 void HELPER(vsrlrni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1228 {
1229 VReg temp;
1230 VReg *Vd = (VReg *)vd;
1231 VReg *Vj = (VReg *)vj;
1232 Int128 r1, r2;
1233
1234 if (imm == 0) {
1235 temp.D(0) = int128_getlo(Vj->Q(0));
1236 temp.D(1) = int128_getlo(Vd->Q(0));
1237 } else {
1238 r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one());
1239 r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one());
1240
1241 temp.D(0) = int128_getlo(int128_add(int128_urshift(Vj->Q(0), imm), r1));
1242 temp.D(1) = int128_getlo(int128_add(int128_urshift(Vd->Q(0), imm), r2));
1243 }
1244 *Vd = temp;
1245 }
1246
1247 VSRLRNI(vsrlrni_b_h, 16, B, H)
1248 VSRLRNI(vsrlrni_h_w, 32, H, W)
1249 VSRLRNI(vsrlrni_w_d, 64, W, D)
1250
1251 #define VSRARNI(NAME, BIT, E1, E2) \
1252 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1253 { \
1254 int i, max; \
1255 VReg temp; \
1256 VReg *Vd = (VReg *)vd; \
1257 VReg *Vj = (VReg *)vj; \
1258 \
1259 temp.D(0) = 0; \
1260 temp.D(1) = 0; \
1261 max = LSX_LEN/BIT; \
1262 for (i = 0; i < max; i++) { \
1263 temp.E1(i) = do_vsrar_ ## E2(Vj->E2(i), imm); \
1264 temp.E1(i + max) = do_vsrar_ ## E2(Vd->E2(i), imm); \
1265 } \
1266 *Vd = temp; \
1267 }
1268
1269 void HELPER(vsrarni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1270 {
1271 VReg temp;
1272 VReg *Vd = (VReg *)vd;
1273 VReg *Vj = (VReg *)vj;
1274 Int128 r1, r2;
1275
1276 if (imm == 0) {
1277 temp.D(0) = int128_getlo(Vj->Q(0));
1278 temp.D(1) = int128_getlo(Vd->Q(0));
1279 } else {
1280 r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
1281 r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
1282
1283 temp.D(0) = int128_getlo(int128_add(int128_rshift(Vj->Q(0), imm), r1));
1284 temp.D(1) = int128_getlo(int128_add(int128_rshift(Vd->Q(0), imm), r2));
1285 }
1286 *Vd = temp;
1287 }
1288
1289 VSRARNI(vsrarni_b_h, 16, B, H)
1290 VSRARNI(vsrarni_h_w, 32, H, W)
1291 VSRARNI(vsrarni_w_d, 64, W, D)
1292
1293 #define SSRLNS(NAME, T1, T2, T3) \
1294 static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \
1295 { \
1296 T1 shft_res; \
1297 if (sa == 0) { \
1298 shft_res = e2; \
1299 } else { \
1300 shft_res = (((T1)e2) >> sa); \
1301 } \
1302 T3 mask; \
1303 mask = (1ull << sh) -1; \
1304 if (shft_res > mask) { \
1305 return mask; \
1306 } else { \
1307 return shft_res; \
1308 } \
1309 }
1310
1311 SSRLNS(B, uint16_t, int16_t, uint8_t)
1312 SSRLNS(H, uint32_t, int32_t, uint16_t)
1313 SSRLNS(W, uint64_t, int64_t, uint32_t)
1314
1315 #define VSSRLN(NAME, BIT, T, E1, E2) \
1316 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1317 { \
1318 int i; \
1319 VReg *Vd = (VReg *)vd; \
1320 VReg *Vj = (VReg *)vj; \
1321 VReg *Vk = (VReg *)vk; \
1322 \
1323 for (i = 0; i < LSX_LEN/BIT; i++) { \
1324 Vd->E1(i) = do_ssrlns_ ## E1(Vj->E2(i), (T)Vk->E2(i)% BIT, BIT/2 -1); \
1325 } \
1326 Vd->D(1) = 0; \
1327 }
1328
1329 VSSRLN(vssrln_b_h, 16, uint16_t, B, H)
1330 VSSRLN(vssrln_h_w, 32, uint32_t, H, W)
1331 VSSRLN(vssrln_w_d, 64, uint64_t, W, D)
1332
1333 #define SSRANS(E, T1, T2) \
1334 static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \
1335 { \
1336 T1 shft_res; \
1337 if (sa == 0) { \
1338 shft_res = e2; \
1339 } else { \
1340 shft_res = e2 >> sa; \
1341 } \
1342 T2 mask; \
1343 mask = (1ll << sh) -1; \
1344 if (shft_res > mask) { \
1345 return mask; \
1346 } else if (shft_res < -(mask +1)) { \
1347 return ~mask; \
1348 } else { \
1349 return shft_res; \
1350 } \
1351 }
1352
1353 SSRANS(B, int16_t, int8_t)
1354 SSRANS(H, int32_t, int16_t)
1355 SSRANS(W, int64_t, int32_t)
1356
1357 #define VSSRAN(NAME, BIT, T, E1, E2) \
1358 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1359 { \
1360 int i; \
1361 VReg *Vd = (VReg *)vd; \
1362 VReg *Vj = (VReg *)vj; \
1363 VReg *Vk = (VReg *)vk; \
1364 \
1365 for (i = 0; i < LSX_LEN/BIT; i++) { \
1366 Vd->E1(i) = do_ssrans_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
1367 } \
1368 Vd->D(1) = 0; \
1369 }
1370
1371 VSSRAN(vssran_b_h, 16, uint16_t, B, H)
1372 VSSRAN(vssran_h_w, 32, uint32_t, H, W)
1373 VSSRAN(vssran_w_d, 64, uint64_t, W, D)
1374
1375 #define SSRLNU(E, T1, T2, T3) \
1376 static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \
1377 { \
1378 T1 shft_res; \
1379 if (sa == 0) { \
1380 shft_res = e2; \
1381 } else { \
1382 shft_res = (((T1)e2) >> sa); \
1383 } \
1384 T2 mask; \
1385 mask = (1ull << sh) -1; \
1386 if (shft_res > mask) { \
1387 return mask; \
1388 } else { \
1389 return shft_res; \
1390 } \
1391 }
1392
1393 SSRLNU(B, uint16_t, uint8_t, int16_t)
1394 SSRLNU(H, uint32_t, uint16_t, int32_t)
1395 SSRLNU(W, uint64_t, uint32_t, int64_t)
1396
1397 #define VSSRLNU(NAME, BIT, T, E1, E2) \
1398 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1399 { \
1400 int i; \
1401 VReg *Vd = (VReg *)vd; \
1402 VReg *Vj = (VReg *)vj; \
1403 VReg *Vk = (VReg *)vk; \
1404 \
1405 for (i = 0; i < LSX_LEN/BIT; i++) { \
1406 Vd->E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
1407 } \
1408 Vd->D(1) = 0; \
1409 }
1410
1411 VSSRLNU(vssrln_bu_h, 16, uint16_t, B, H)
1412 VSSRLNU(vssrln_hu_w, 32, uint32_t, H, W)
1413 VSSRLNU(vssrln_wu_d, 64, uint64_t, W, D)
1414
1415 #define SSRANU(E, T1, T2, T3) \
1416 static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \
1417 { \
1418 T1 shft_res; \
1419 if (sa == 0) { \
1420 shft_res = e2; \
1421 } else { \
1422 shft_res = e2 >> sa; \
1423 } \
1424 if (e2 < 0) { \
1425 shft_res = 0; \
1426 } \
1427 T2 mask; \
1428 mask = (1ull << sh) -1; \
1429 if (shft_res > mask) { \
1430 return mask; \
1431 } else { \
1432 return shft_res; \
1433 } \
1434 }
1435
1436 SSRANU(B, uint16_t, uint8_t, int16_t)
1437 SSRANU(H, uint32_t, uint16_t, int32_t)
1438 SSRANU(W, uint64_t, uint32_t, int64_t)
1439
1440 #define VSSRANU(NAME, BIT, T, E1, E2) \
1441 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1442 { \
1443 int i; \
1444 VReg *Vd = (VReg *)vd; \
1445 VReg *Vj = (VReg *)vj; \
1446 VReg *Vk = (VReg *)vk; \
1447 \
1448 for (i = 0; i < LSX_LEN/BIT; i++) { \
1449 Vd->E1(i) = do_ssranu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
1450 } \
1451 Vd->D(1) = 0; \
1452 }
1453
1454 VSSRANU(vssran_bu_h, 16, uint16_t, B, H)
1455 VSSRANU(vssran_hu_w, 32, uint32_t, H, W)
1456 VSSRANU(vssran_wu_d, 64, uint64_t, W, D)
1457
1458 #define VSSRLNI(NAME, BIT, E1, E2) \
1459 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1460 { \
1461 int i; \
1462 VReg temp; \
1463 VReg *Vd = (VReg *)vd; \
1464 VReg *Vj = (VReg *)vj; \
1465 \
1466 for (i = 0; i < LSX_LEN/BIT; i++) { \
1467 temp.E1(i) = do_ssrlns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
1468 temp.E1(i + LSX_LEN/BIT) = do_ssrlns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
1469 } \
1470 *Vd = temp; \
1471 }
1472
1473 void HELPER(vssrlni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1474 {
1475 Int128 shft_res1, shft_res2, mask;
1476 VReg *Vd = (VReg *)vd;
1477 VReg *Vj = (VReg *)vj;
1478
1479 if (imm == 0) {
1480 shft_res1 = Vj->Q(0);
1481 shft_res2 = Vd->Q(0);
1482 } else {
1483 shft_res1 = int128_urshift(Vj->Q(0), imm);
1484 shft_res2 = int128_urshift(Vd->Q(0), imm);
1485 }
1486 mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
1487
1488 if (int128_ult(mask, shft_res1)) {
1489 Vd->D(0) = int128_getlo(mask);
1490 }else {
1491 Vd->D(0) = int128_getlo(shft_res1);
1492 }
1493
1494 if (int128_ult(mask, shft_res2)) {
1495 Vd->D(1) = int128_getlo(mask);
1496 }else {
1497 Vd->D(1) = int128_getlo(shft_res2);
1498 }
1499 }
1500
1501 VSSRLNI(vssrlni_b_h, 16, B, H)
1502 VSSRLNI(vssrlni_h_w, 32, H, W)
1503 VSSRLNI(vssrlni_w_d, 64, W, D)
1504
1505 #define VSSRANI(NAME, BIT, E1, E2) \
1506 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1507 { \
1508 int i; \
1509 VReg temp; \
1510 VReg *Vd = (VReg *)vd; \
1511 VReg *Vj = (VReg *)vj; \
1512 \
1513 for (i = 0; i < LSX_LEN/BIT; i++) { \
1514 temp.E1(i) = do_ssrans_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
1515 temp.E1(i + LSX_LEN/BIT) = do_ssrans_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
1516 } \
1517 *Vd = temp; \
1518 }
1519
1520 void HELPER(vssrani_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1521 {
1522 Int128 shft_res1, shft_res2, mask, min;
1523 VReg *Vd = (VReg *)vd;
1524 VReg *Vj = (VReg *)vj;
1525
1526 if (imm == 0) {
1527 shft_res1 = Vj->Q(0);
1528 shft_res2 = Vd->Q(0);
1529 } else {
1530 shft_res1 = int128_rshift(Vj->Q(0), imm);
1531 shft_res2 = int128_rshift(Vd->Q(0), imm);
1532 }
1533 mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
1534 min = int128_lshift(int128_one(), 63);
1535
1536 if (int128_gt(shft_res1, mask)) {
1537 Vd->D(0) = int128_getlo(mask);
1538 } else if (int128_lt(shft_res1, int128_neg(min))) {
1539 Vd->D(0) = int128_getlo(min);
1540 } else {
1541 Vd->D(0) = int128_getlo(shft_res1);
1542 }
1543
1544 if (int128_gt(shft_res2, mask)) {
1545 Vd->D(1) = int128_getlo(mask);
1546 } else if (int128_lt(shft_res2, int128_neg(min))) {
1547 Vd->D(1) = int128_getlo(min);
1548 } else {
1549 Vd->D(1) = int128_getlo(shft_res2);
1550 }
1551 }
1552
1553 VSSRANI(vssrani_b_h, 16, B, H)
1554 VSSRANI(vssrani_h_w, 32, H, W)
1555 VSSRANI(vssrani_w_d, 64, W, D)
1556
1557 #define VSSRLNUI(NAME, BIT, E1, E2) \
1558 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1559 { \
1560 int i; \
1561 VReg temp; \
1562 VReg *Vd = (VReg *)vd; \
1563 VReg *Vj = (VReg *)vj; \
1564 \
1565 for (i = 0; i < LSX_LEN/BIT; i++) { \
1566 temp.E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), imm, BIT/2); \
1567 temp.E1(i + LSX_LEN/BIT) = do_ssrlnu_ ## E1(Vd->E2(i), imm, BIT/2); \
1568 } \
1569 *Vd = temp; \
1570 }
1571
1572 void HELPER(vssrlni_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1573 {
1574 Int128 shft_res1, shft_res2, mask;
1575 VReg *Vd = (VReg *)vd;
1576 VReg *Vj = (VReg *)vj;
1577
1578 if (imm == 0) {
1579 shft_res1 = Vj->Q(0);
1580 shft_res2 = Vd->Q(0);
1581 } else {
1582 shft_res1 = int128_urshift(Vj->Q(0), imm);
1583 shft_res2 = int128_urshift(Vd->Q(0), imm);
1584 }
1585 mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
1586
1587 if (int128_ult(mask, shft_res1)) {
1588 Vd->D(0) = int128_getlo(mask);
1589 }else {
1590 Vd->D(0) = int128_getlo(shft_res1);
1591 }
1592
1593 if (int128_ult(mask, shft_res2)) {
1594 Vd->D(1) = int128_getlo(mask);
1595 }else {
1596 Vd->D(1) = int128_getlo(shft_res2);
1597 }
1598 }
1599
1600 VSSRLNUI(vssrlni_bu_h, 16, B, H)
1601 VSSRLNUI(vssrlni_hu_w, 32, H, W)
1602 VSSRLNUI(vssrlni_wu_d, 64, W, D)
1603
1604 #define VSSRANUI(NAME, BIT, E1, E2) \
1605 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1606 { \
1607 int i; \
1608 VReg temp; \
1609 VReg *Vd = (VReg *)vd; \
1610 VReg *Vj = (VReg *)vj; \
1611 \
1612 for (i = 0; i < LSX_LEN/BIT; i++) { \
1613 temp.E1(i) = do_ssranu_ ## E1(Vj->E2(i), imm, BIT/2); \
1614 temp.E1(i + LSX_LEN/BIT) = do_ssranu_ ## E1(Vd->E2(i), imm, BIT/2); \
1615 } \
1616 *Vd = temp; \
1617 }
1618
1619 void HELPER(vssrani_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1620 {
1621 Int128 shft_res1, shft_res2, mask;
1622 VReg *Vd = (VReg *)vd;
1623 VReg *Vj = (VReg *)vj;
1624
1625 if (imm == 0) {
1626 shft_res1 = Vj->Q(0);
1627 shft_res2 = Vd->Q(0);
1628 } else {
1629 shft_res1 = int128_rshift(Vj->Q(0), imm);
1630 shft_res2 = int128_rshift(Vd->Q(0), imm);
1631 }
1632
1633 if (int128_lt(Vj->Q(0), int128_zero())) {
1634 shft_res1 = int128_zero();
1635 }
1636
1637 if (int128_lt(Vd->Q(0), int128_zero())) {
1638 shft_res2 = int128_zero();
1639 }
1640
1641 mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
1642
1643 if (int128_ult(mask, shft_res1)) {
1644 Vd->D(0) = int128_getlo(mask);
1645 }else {
1646 Vd->D(0) = int128_getlo(shft_res1);
1647 }
1648
1649 if (int128_ult(mask, shft_res2)) {
1650 Vd->D(1) = int128_getlo(mask);
1651 }else {
1652 Vd->D(1) = int128_getlo(shft_res2);
1653 }
1654 }
1655
1656 VSSRANUI(vssrani_bu_h, 16, B, H)
1657 VSSRANUI(vssrani_hu_w, 32, H, W)
1658 VSSRANUI(vssrani_wu_d, 64, W, D)
1659
1660 #define SSRLRNS(E1, E2, T1, T2, T3) \
1661 static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \
1662 { \
1663 T1 shft_res; \
1664 \
1665 shft_res = do_vsrlr_ ## E2(e2, sa); \
1666 T1 mask; \
1667 mask = (1ull << sh) -1; \
1668 if (shft_res > mask) { \
1669 return mask; \
1670 } else { \
1671 return shft_res; \
1672 } \
1673 }
1674
1675 SSRLRNS(B, H, uint16_t, int16_t, uint8_t)
1676 SSRLRNS(H, W, uint32_t, int32_t, uint16_t)
1677 SSRLRNS(W, D, uint64_t, int64_t, uint32_t)
1678
1679 #define VSSRLRN(NAME, BIT, T, E1, E2) \
1680 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1681 { \
1682 int i; \
1683 VReg *Vd = (VReg *)vd; \
1684 VReg *Vj = (VReg *)vj; \
1685 VReg *Vk = (VReg *)vk; \
1686 \
1687 for (i = 0; i < LSX_LEN/BIT; i++) { \
1688 Vd->E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
1689 } \
1690 Vd->D(1) = 0; \
1691 }
1692
1693 VSSRLRN(vssrlrn_b_h, 16, uint16_t, B, H)
1694 VSSRLRN(vssrlrn_h_w, 32, uint32_t, H, W)
1695 VSSRLRN(vssrlrn_w_d, 64, uint64_t, W, D)
1696
1697 #define SSRARNS(E1, E2, T1, T2) \
1698 static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \
1699 { \
1700 T1 shft_res; \
1701 \
1702 shft_res = do_vsrar_ ## E2(e2, sa); \
1703 T2 mask; \
1704 mask = (1ll << sh) -1; \
1705 if (shft_res > mask) { \
1706 return mask; \
1707 } else if (shft_res < -(mask +1)) { \
1708 return ~mask; \
1709 } else { \
1710 return shft_res; \
1711 } \
1712 }
1713
1714 SSRARNS(B, H, int16_t, int8_t)
1715 SSRARNS(H, W, int32_t, int16_t)
1716 SSRARNS(W, D, int64_t, int32_t)
1717
1718 #define VSSRARN(NAME, BIT, T, E1, E2) \
1719 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1720 { \
1721 int i; \
1722 VReg *Vd = (VReg *)vd; \
1723 VReg *Vj = (VReg *)vj; \
1724 VReg *Vk = (VReg *)vk; \
1725 \
1726 for (i = 0; i < LSX_LEN/BIT; i++) { \
1727 Vd->E1(i) = do_ssrarns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
1728 } \
1729 Vd->D(1) = 0; \
1730 }
1731
1732 VSSRARN(vssrarn_b_h, 16, uint16_t, B, H)
1733 VSSRARN(vssrarn_h_w, 32, uint32_t, H, W)
1734 VSSRARN(vssrarn_w_d, 64, uint64_t, W, D)
1735
1736 #define SSRLRNU(E1, E2, T1, T2, T3) \
1737 static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \
1738 { \
1739 T1 shft_res; \
1740 \
1741 shft_res = do_vsrlr_ ## E2(e2, sa); \
1742 \
1743 T2 mask; \
1744 mask = (1ull << sh) -1; \
1745 if (shft_res > mask) { \
1746 return mask; \
1747 } else { \
1748 return shft_res; \
1749 } \
1750 }
1751
1752 SSRLRNU(B, H, uint16_t, uint8_t, int16_t)
1753 SSRLRNU(H, W, uint32_t, uint16_t, int32_t)
1754 SSRLRNU(W, D, uint64_t, uint32_t, int64_t)
1755
1756 #define VSSRLRNU(NAME, BIT, T, E1, E2) \
1757 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1758 { \
1759 int i; \
1760 VReg *Vd = (VReg *)vd; \
1761 VReg *Vj = (VReg *)vj; \
1762 VReg *Vk = (VReg *)vk; \
1763 \
1764 for (i = 0; i < LSX_LEN/BIT; i++) { \
1765 Vd->E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
1766 } \
1767 Vd->D(1) = 0; \
1768 }
1769
1770 VSSRLRNU(vssrlrn_bu_h, 16, uint16_t, B, H)
1771 VSSRLRNU(vssrlrn_hu_w, 32, uint32_t, H, W)
1772 VSSRLRNU(vssrlrn_wu_d, 64, uint64_t, W, D)
1773
1774 #define SSRARNU(E1, E2, T1, T2, T3) \
1775 static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \
1776 { \
1777 T1 shft_res; \
1778 \
1779 if (e2 < 0) { \
1780 shft_res = 0; \
1781 } else { \
1782 shft_res = do_vsrar_ ## E2(e2, sa); \
1783 } \
1784 T2 mask; \
1785 mask = (1ull << sh) -1; \
1786 if (shft_res > mask) { \
1787 return mask; \
1788 } else { \
1789 return shft_res; \
1790 } \
1791 }
1792
1793 SSRARNU(B, H, uint16_t, uint8_t, int16_t)
1794 SSRARNU(H, W, uint32_t, uint16_t, int32_t)
1795 SSRARNU(W, D, uint64_t, uint32_t, int64_t)
1796
1797 #define VSSRARNU(NAME, BIT, T, E1, E2) \
1798 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1799 { \
1800 int i; \
1801 VReg *Vd = (VReg *)vd; \
1802 VReg *Vj = (VReg *)vj; \
1803 VReg *Vk = (VReg *)vk; \
1804 \
1805 for (i = 0; i < LSX_LEN/BIT; i++) { \
1806 Vd->E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
1807 } \
1808 Vd->D(1) = 0; \
1809 }
1810
1811 VSSRARNU(vssrarn_bu_h, 16, uint16_t, B, H)
1812 VSSRARNU(vssrarn_hu_w, 32, uint32_t, H, W)
1813 VSSRARNU(vssrarn_wu_d, 64, uint64_t, W, D)
1814
1815 #define VSSRLRNI(NAME, BIT, E1, E2) \
1816 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1817 { \
1818 int i; \
1819 VReg temp; \
1820 VReg *Vd = (VReg *)vd; \
1821 VReg *Vj = (VReg *)vj; \
1822 \
1823 for (i = 0; i < LSX_LEN/BIT; i++) { \
1824 temp.E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
1825 temp.E1(i + LSX_LEN/BIT) = do_ssrlrns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
1826 } \
1827 *Vd = temp; \
1828 }
1829
1830 #define VSSRLRNI_Q(NAME, sh) \
1831 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1832 { \
1833 Int128 shft_res1, shft_res2, mask, r1, r2; \
1834 VReg *Vd = (VReg *)vd; \
1835 VReg *Vj = (VReg *)vj; \
1836 \
1837 if (imm == 0) { \
1838 shft_res1 = Vj->Q(0); \
1839 shft_res2 = Vd->Q(0); \
1840 } else { \
1841 r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); \
1842 r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); \
1843 \
1844 shft_res1 = (int128_add(int128_urshift(Vj->Q(0), imm), r1)); \
1845 shft_res2 = (int128_add(int128_urshift(Vd->Q(0), imm), r2)); \
1846 } \
1847 \
1848 mask = int128_sub(int128_lshift(int128_one(), sh), int128_one()); \
1849 \
1850 if (int128_ult(mask, shft_res1)) { \
1851 Vd->D(0) = int128_getlo(mask); \
1852 }else { \
1853 Vd->D(0) = int128_getlo(shft_res1); \
1854 } \
1855 \
1856 if (int128_ult(mask, shft_res2)) { \
1857 Vd->D(1) = int128_getlo(mask); \
1858 }else { \
1859 Vd->D(1) = int128_getlo(shft_res2); \
1860 } \
1861 }
1862
1863 VSSRLRNI(vssrlrni_b_h, 16, B, H)
1864 VSSRLRNI(vssrlrni_h_w, 32, H, W)
1865 VSSRLRNI(vssrlrni_w_d, 64, W, D)
1866 VSSRLRNI_Q(vssrlrni_d_q, 63)
1867
1868 #define VSSRARNI(NAME, BIT, E1, E2) \
1869 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1870 { \
1871 int i; \
1872 VReg temp; \
1873 VReg *Vd = (VReg *)vd; \
1874 VReg *Vj = (VReg *)vj; \
1875 \
1876 for (i = 0; i < LSX_LEN/BIT; i++) { \
1877 temp.E1(i) = do_ssrarns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
1878 temp.E1(i + LSX_LEN/BIT) = do_ssrarns_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
1879 } \
1880 *Vd = temp; \
1881 }
1882
1883 void HELPER(vssrarni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1884 {
1885 Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
1886 VReg *Vd = (VReg *)vd;
1887 VReg *Vj = (VReg *)vj;
1888
1889 if (imm == 0) {
1890 shft_res1 = Vj->Q(0);
1891 shft_res2 = Vd->Q(0);
1892 } else {
1893 r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
1894 r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
1895
1896 shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
1897 shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
1898 }
1899
1900 mask1 = int128_sub(int128_lshift(int128_one(), 63), int128_one());
1901 mask2 = int128_lshift(int128_one(), 63);
1902
1903 if (int128_gt(shft_res1, mask1)) {
1904 Vd->D(0) = int128_getlo(mask1);
1905 } else if (int128_lt(shft_res1, int128_neg(mask2))) {
1906 Vd->D(0) = int128_getlo(mask2);
1907 } else {
1908 Vd->D(0) = int128_getlo(shft_res1);
1909 }
1910
1911 if (int128_gt(shft_res2, mask1)) {
1912 Vd->D(1) = int128_getlo(mask1);
1913 } else if (int128_lt(shft_res2, int128_neg(mask2))) {
1914 Vd->D(1) = int128_getlo(mask2);
1915 } else {
1916 Vd->D(1) = int128_getlo(shft_res2);
1917 }
1918 }
1919
1920 VSSRARNI(vssrarni_b_h, 16, B, H)
1921 VSSRARNI(vssrarni_h_w, 32, H, W)
1922 VSSRARNI(vssrarni_w_d, 64, W, D)
1923
1924 #define VSSRLRNUI(NAME, BIT, E1, E2) \
1925 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1926 { \
1927 int i; \
1928 VReg temp; \
1929 VReg *Vd = (VReg *)vd; \
1930 VReg *Vj = (VReg *)vj; \
1931 \
1932 for (i = 0; i < LSX_LEN/BIT; i++) { \
1933 temp.E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), imm, BIT/2); \
1934 temp.E1(i + LSX_LEN/BIT) = do_ssrlrnu_ ## E1(Vd->E2(i), imm, BIT/2); \
1935 } \
1936 *Vd = temp; \
1937 }
1938
1939 VSSRLRNUI(vssrlrni_bu_h, 16, B, H)
1940 VSSRLRNUI(vssrlrni_hu_w, 32, H, W)
1941 VSSRLRNUI(vssrlrni_wu_d, 64, W, D)
1942 VSSRLRNI_Q(vssrlrni_du_q, 64)
1943
1944 #define VSSRARNUI(NAME, BIT, E1, E2) \
1945 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1946 { \
1947 int i; \
1948 VReg temp; \
1949 VReg *Vd = (VReg *)vd; \
1950 VReg *Vj = (VReg *)vj; \
1951 \
1952 for (i = 0; i < LSX_LEN/BIT; i++) { \
1953 temp.E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), imm, BIT/2); \
1954 temp.E1(i + LSX_LEN/BIT) = do_ssrarnu_ ## E1(Vd->E2(i), imm, BIT/2); \
1955 } \
1956 *Vd = temp; \
1957 }
1958
1959 void HELPER(vssrarni_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1960 {
1961 Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
1962 VReg *Vd = (VReg *)vd;
1963 VReg *Vj = (VReg *)vj;
1964
1965 if (imm == 0) {
1966 shft_res1 = Vj->Q(0);
1967 shft_res2 = Vd->Q(0);
1968 } else {
1969 r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
1970 r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
1971
1972 shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
1973 shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
1974 }
1975
1976 if (int128_lt(Vj->Q(0), int128_zero())) {
1977 shft_res1 = int128_zero();
1978 }
1979 if (int128_lt(Vd->Q(0), int128_zero())) {
1980 shft_res2 = int128_zero();
1981 }
1982
1983 mask1 = int128_sub(int128_lshift(int128_one(), 64), int128_one());
1984 mask2 = int128_lshift(int128_one(), 64);
1985
1986 if (int128_gt(shft_res1, mask1)) {
1987 Vd->D(0) = int128_getlo(mask1);
1988 } else if (int128_lt(shft_res1, int128_neg(mask2))) {
1989 Vd->D(0) = int128_getlo(mask2);
1990 } else {
1991 Vd->D(0) = int128_getlo(shft_res1);
1992 }
1993
1994 if (int128_gt(shft_res2, mask1)) {
1995 Vd->D(1) = int128_getlo(mask1);
1996 } else if (int128_lt(shft_res2, int128_neg(mask2))) {
1997 Vd->D(1) = int128_getlo(mask2);
1998 } else {
1999 Vd->D(1) = int128_getlo(shft_res2);
2000 }
2001 }
2002
2003 VSSRARNUI(vssrarni_bu_h, 16, B, H)
2004 VSSRARNUI(vssrarni_hu_w, 32, H, W)
2005 VSSRARNUI(vssrarni_wu_d, 64, W, D)
2006
2007 #define DO_2OP(NAME, BIT, E, DO_OP) \
2008 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
2009 { \
2010 int i; \
2011 VReg *Vd = (VReg *)vd; \
2012 VReg *Vj = (VReg *)vj; \
2013 \
2014 for (i = 0; i < LSX_LEN/BIT; i++) \
2015 { \
2016 Vd->E(i) = DO_OP(Vj->E(i)); \
2017 } \
2018 }
2019
2020 #define DO_CLO_B(N) (clz32(~N & 0xff) - 24)
2021 #define DO_CLO_H(N) (clz32(~N & 0xffff) - 16)
2022 #define DO_CLO_W(N) (clz32(~N))
2023 #define DO_CLO_D(N) (clz64(~N))
2024 #define DO_CLZ_B(N) (clz32(N) - 24)
2025 #define DO_CLZ_H(N) (clz32(N) - 16)
2026 #define DO_CLZ_W(N) (clz32(N))
2027 #define DO_CLZ_D(N) (clz64(N))
2028
2029 DO_2OP(vclo_b, 8, UB, DO_CLO_B)
2030 DO_2OP(vclo_h, 16, UH, DO_CLO_H)
2031 DO_2OP(vclo_w, 32, UW, DO_CLO_W)
2032 DO_2OP(vclo_d, 64, UD, DO_CLO_D)
2033 DO_2OP(vclz_b, 8, UB, DO_CLZ_B)
2034 DO_2OP(vclz_h, 16, UH, DO_CLZ_H)
2035 DO_2OP(vclz_w, 32, UW, DO_CLZ_W)
2036 DO_2OP(vclz_d, 64, UD, DO_CLZ_D)
2037
2038 #define VPCNT(NAME, BIT, E, FN) \
2039 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
2040 { \
2041 int i; \
2042 VReg *Vd = (VReg *)vd; \
2043 VReg *Vj = (VReg *)vj; \
2044 \
2045 for (i = 0; i < LSX_LEN/BIT; i++) \
2046 { \
2047 Vd->E(i) = FN(Vj->E(i)); \
2048 } \
2049 }
2050
2051 VPCNT(vpcnt_b, 8, UB, ctpop8)
2052 VPCNT(vpcnt_h, 16, UH, ctpop16)
2053 VPCNT(vpcnt_w, 32, UW, ctpop32)
2054 VPCNT(vpcnt_d, 64, UD, ctpop64)
2055
2056 #define DO_BITCLR(a, bit) (a & ~(1ull << bit))
2057 #define DO_BITSET(a, bit) (a | 1ull << bit)
2058 #define DO_BITREV(a, bit) (a ^ (1ull << bit))
2059
2060 #define DO_BIT(NAME, BIT, E, DO_OP) \
2061 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
2062 { \
2063 int i; \
2064 VReg *Vd = (VReg *)vd; \
2065 VReg *Vj = (VReg *)vj; \
2066 VReg *Vk = (VReg *)vk; \
2067 \
2068 for (i = 0; i < LSX_LEN/BIT; i++) { \
2069 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \
2070 } \
2071 }
2072
2073 DO_BIT(vbitclr_b, 8, UB, DO_BITCLR)
2074 DO_BIT(vbitclr_h, 16, UH, DO_BITCLR)
2075 DO_BIT(vbitclr_w, 32, UW, DO_BITCLR)
2076 DO_BIT(vbitclr_d, 64, UD, DO_BITCLR)
2077 DO_BIT(vbitset_b, 8, UB, DO_BITSET)
2078 DO_BIT(vbitset_h, 16, UH, DO_BITSET)
2079 DO_BIT(vbitset_w, 32, UW, DO_BITSET)
2080 DO_BIT(vbitset_d, 64, UD, DO_BITSET)
2081 DO_BIT(vbitrev_b, 8, UB, DO_BITREV)
2082 DO_BIT(vbitrev_h, 16, UH, DO_BITREV)
2083 DO_BIT(vbitrev_w, 32, UW, DO_BITREV)
2084 DO_BIT(vbitrev_d, 64, UD, DO_BITREV)
2085
2086 #define DO_BITI(NAME, BIT, E, DO_OP) \
2087 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
2088 { \
2089 int i; \
2090 VReg *Vd = (VReg *)vd; \
2091 VReg *Vj = (VReg *)vj; \
2092 \
2093 for (i = 0; i < LSX_LEN/BIT; i++) { \
2094 Vd->E(i) = DO_OP(Vj->E(i), imm); \
2095 } \
2096 }
2097
2098 DO_BITI(vbitclri_b, 8, UB, DO_BITCLR)
2099 DO_BITI(vbitclri_h, 16, UH, DO_BITCLR)
2100 DO_BITI(vbitclri_w, 32, UW, DO_BITCLR)
2101 DO_BITI(vbitclri_d, 64, UD, DO_BITCLR)
2102 DO_BITI(vbitseti_b, 8, UB, DO_BITSET)
2103 DO_BITI(vbitseti_h, 16, UH, DO_BITSET)
2104 DO_BITI(vbitseti_w, 32, UW, DO_BITSET)
2105 DO_BITI(vbitseti_d, 64, UD, DO_BITSET)
2106 DO_BITI(vbitrevi_b, 8, UB, DO_BITREV)
2107 DO_BITI(vbitrevi_h, 16, UH, DO_BITREV)
2108 DO_BITI(vbitrevi_w, 32, UW, DO_BITREV)
2109 DO_BITI(vbitrevi_d, 64, UD, DO_BITREV)
2110
2111 #define VFRSTP(NAME, BIT, MASK, E) \
2112 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2113 { \
2114 int i, m; \
2115 VReg *Vd = (VReg *)vd; \
2116 VReg *Vj = (VReg *)vj; \
2117 VReg *Vk = (VReg *)vk; \
2118 \
2119 for (i = 0; i < LSX_LEN/BIT; i++) { \
2120 if (Vj->E(i) < 0) { \
2121 break; \
2122 } \
2123 } \
2124 m = Vk->E(0) & MASK; \
2125 Vd->E(m) = i; \
2126 }
2127
2128 VFRSTP(vfrstp_b, 8, 0xf, B)
2129 VFRSTP(vfrstp_h, 16, 0x7, H)
2130
2131 #define VFRSTPI(NAME, BIT, E) \
2132 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2133 { \
2134 int i, m; \
2135 VReg *Vd = (VReg *)vd; \
2136 VReg *Vj = (VReg *)vj; \
2137 \
2138 for (i = 0; i < LSX_LEN/BIT; i++) { \
2139 if (Vj->E(i) < 0) { \
2140 break; \
2141 } \
2142 } \
2143 m = imm % (LSX_LEN/BIT); \
2144 Vd->E(m) = i; \
2145 }
2146
2147 VFRSTPI(vfrstpi_b, 8, B)
2148 VFRSTPI(vfrstpi_h, 16, H)
2149
2150 static void vec_update_fcsr0_mask(CPULoongArchState *env,
2151 uintptr_t pc, int mask)
2152 {
2153 int flags = get_float_exception_flags(&env->fp_status);
2154
2155 set_float_exception_flags(0, &env->fp_status);
2156
2157 flags &= ~mask;
2158
2159 if (flags) {
2160 flags = ieee_ex_to_loongarch(flags);
2161 UPDATE_FP_CAUSE(env->fcsr0, flags);
2162 }
2163
2164 if (GET_FP_ENABLES(env->fcsr0) & flags) {
2165 do_raise_exception(env, EXCCODE_FPE, pc);
2166 } else {
2167 UPDATE_FP_FLAGS(env->fcsr0, flags);
2168 }
2169 }
2170
2171 static void vec_update_fcsr0(CPULoongArchState *env, uintptr_t pc)
2172 {
2173 vec_update_fcsr0_mask(env, pc, 0);
2174 }
2175
2176 static inline void vec_clear_cause(CPULoongArchState *env)
2177 {
2178 SET_FP_CAUSE(env->fcsr0, 0);
2179 }
2180
2181 #define DO_3OP_F(NAME, BIT, E, FN) \
2182 void HELPER(NAME)(void *vd, void *vj, void *vk, \
2183 CPULoongArchState *env, uint32_t desc) \
2184 { \
2185 int i; \
2186 VReg *Vd = (VReg *)vd; \
2187 VReg *Vj = (VReg *)vj; \
2188 VReg *Vk = (VReg *)vk; \
2189 \
2190 vec_clear_cause(env); \
2191 for (i = 0; i < LSX_LEN/BIT; i++) { \
2192 Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
2193 vec_update_fcsr0(env, GETPC()); \
2194 } \
2195 }
2196
2197 DO_3OP_F(vfadd_s, 32, UW, float32_add)
2198 DO_3OP_F(vfadd_d, 64, UD, float64_add)
2199 DO_3OP_F(vfsub_s, 32, UW, float32_sub)
2200 DO_3OP_F(vfsub_d, 64, UD, float64_sub)
2201 DO_3OP_F(vfmul_s, 32, UW, float32_mul)
2202 DO_3OP_F(vfmul_d, 64, UD, float64_mul)
2203 DO_3OP_F(vfdiv_s, 32, UW, float32_div)
2204 DO_3OP_F(vfdiv_d, 64, UD, float64_div)
2205 DO_3OP_F(vfmax_s, 32, UW, float32_maxnum)
2206 DO_3OP_F(vfmax_d, 64, UD, float64_maxnum)
2207 DO_3OP_F(vfmin_s, 32, UW, float32_minnum)
2208 DO_3OP_F(vfmin_d, 64, UD, float64_minnum)
2209 DO_3OP_F(vfmaxa_s, 32, UW, float32_maxnummag)
2210 DO_3OP_F(vfmaxa_d, 64, UD, float64_maxnummag)
2211 DO_3OP_F(vfmina_s, 32, UW, float32_minnummag)
2212 DO_3OP_F(vfmina_d, 64, UD, float64_minnummag)
2213
2214 #define DO_4OP_F(NAME, BIT, E, FN, flags) \
2215 void HELPER(NAME)(void *vd, void *vj, void *vk, void *va, \
2216 CPULoongArchState *env, uint32_t desc) \
2217 { \
2218 int i; \
2219 VReg *Vd = (VReg *)vd; \
2220 VReg *Vj = (VReg *)vj; \
2221 VReg *Vk = (VReg *)vk; \
2222 VReg *Va = (VReg *)va; \
2223 \
2224 vec_clear_cause(env); \
2225 for (i = 0; i < LSX_LEN/BIT; i++) { \
2226 Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \
2227 vec_update_fcsr0(env, GETPC()); \
2228 } \
2229 }
2230
2231 DO_4OP_F(vfmadd_s, 32, UW, float32_muladd, 0)
2232 DO_4OP_F(vfmadd_d, 64, UD, float64_muladd, 0)
2233 DO_4OP_F(vfmsub_s, 32, UW, float32_muladd, float_muladd_negate_c)
2234 DO_4OP_F(vfmsub_d, 64, UD, float64_muladd, float_muladd_negate_c)
2235 DO_4OP_F(vfnmadd_s, 32, UW, float32_muladd, float_muladd_negate_result)
2236 DO_4OP_F(vfnmadd_d, 64, UD, float64_muladd, float_muladd_negate_result)
2237 DO_4OP_F(vfnmsub_s, 32, UW, float32_muladd,
2238 float_muladd_negate_c | float_muladd_negate_result)
2239 DO_4OP_F(vfnmsub_d, 64, UD, float64_muladd,
2240 float_muladd_negate_c | float_muladd_negate_result)
2241
2242 #define DO_2OP_F(NAME, BIT, E, FN) \
2243 void HELPER(NAME)(void *vd, void *vj, \
2244 CPULoongArchState *env, uint32_t desc) \
2245 { \
2246 int i; \
2247 VReg *Vd = (VReg *)vd; \
2248 VReg *Vj = (VReg *)vj; \
2249 \
2250 vec_clear_cause(env); \
2251 for (i = 0; i < LSX_LEN/BIT; i++) { \
2252 Vd->E(i) = FN(env, Vj->E(i)); \
2253 } \
2254 }
2255
2256 #define FLOGB(BIT, T) \
2257 static T do_flogb_## BIT(CPULoongArchState *env, T fj) \
2258 { \
2259 T fp, fd; \
2260 float_status *status = &env->fp_status; \
2261 FloatRoundMode old_mode = get_float_rounding_mode(status); \
2262 \
2263 set_float_rounding_mode(float_round_down, status); \
2264 fp = float ## BIT ##_log2(fj, status); \
2265 fd = float ## BIT ##_round_to_int(fp, status); \
2266 set_float_rounding_mode(old_mode, status); \
2267 vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \
2268 return fd; \
2269 }
2270
2271 FLOGB(32, uint32_t)
2272 FLOGB(64, uint64_t)
2273
2274 #define FCLASS(NAME, BIT, E, FN) \
2275 void HELPER(NAME)(void *vd, void *vj, \
2276 CPULoongArchState *env, uint32_t desc) \
2277 { \
2278 int i; \
2279 VReg *Vd = (VReg *)vd; \
2280 VReg *Vj = (VReg *)vj; \
2281 \
2282 for (i = 0; i < LSX_LEN/BIT; i++) { \
2283 Vd->E(i) = FN(env, Vj->E(i)); \
2284 } \
2285 }
2286
2287 FCLASS(vfclass_s, 32, UW, helper_fclass_s)
2288 FCLASS(vfclass_d, 64, UD, helper_fclass_d)
2289
2290 #define FSQRT(BIT, T) \
2291 static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \
2292 { \
2293 T fd; \
2294 fd = float ## BIT ##_sqrt(fj, &env->fp_status); \
2295 vec_update_fcsr0(env, GETPC()); \
2296 return fd; \
2297 }
2298
2299 FSQRT(32, uint32_t)
2300 FSQRT(64, uint64_t)
2301
2302 #define FRECIP(BIT, T) \
2303 static T do_frecip_## BIT(CPULoongArchState *env, T fj) \
2304 { \
2305 T fd; \
2306 fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \
2307 vec_update_fcsr0(env, GETPC()); \
2308 return fd; \
2309 }
2310
2311 FRECIP(32, uint32_t)
2312 FRECIP(64, uint64_t)
2313
2314 #define FRSQRT(BIT, T) \
2315 static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \
2316 { \
2317 T fd, fp; \
2318 fp = float ## BIT ##_sqrt(fj, &env->fp_status); \
2319 fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \
2320 vec_update_fcsr0(env, GETPC()); \
2321 return fd; \
2322 }
2323
2324 FRSQRT(32, uint32_t)
2325 FRSQRT(64, uint64_t)
2326
2327 DO_2OP_F(vflogb_s, 32, UW, do_flogb_32)
2328 DO_2OP_F(vflogb_d, 64, UD, do_flogb_64)
2329 DO_2OP_F(vfsqrt_s, 32, UW, do_fsqrt_32)
2330 DO_2OP_F(vfsqrt_d, 64, UD, do_fsqrt_64)
2331 DO_2OP_F(vfrecip_s, 32, UW, do_frecip_32)
2332 DO_2OP_F(vfrecip_d, 64, UD, do_frecip_64)
2333 DO_2OP_F(vfrsqrt_s, 32, UW, do_frsqrt_32)
2334 DO_2OP_F(vfrsqrt_d, 64, UD, do_frsqrt_64)
2335
2336 static uint32_t float16_cvt_float32(uint16_t h, float_status *status)
2337 {
2338 return float16_to_float32(h, true, status);
2339 }
2340 static uint64_t float32_cvt_float64(uint32_t s, float_status *status)
2341 {
2342 return float32_to_float64(s, status);
2343 }
2344
2345 static uint16_t float32_cvt_float16(uint32_t s, float_status *status)
2346 {
2347 return float32_to_float16(s, true, status);
2348 }
2349 static uint32_t float64_cvt_float32(uint64_t d, float_status *status)
2350 {
2351 return float64_to_float32(d, status);
2352 }
2353
2354 void HELPER(vfcvtl_s_h)(void *vd, void *vj,
2355 CPULoongArchState *env, uint32_t desc)
2356 {
2357 int i;
2358 VReg temp;
2359 VReg *Vd = (VReg *)vd;
2360 VReg *Vj = (VReg *)vj;
2361
2362 vec_clear_cause(env);
2363 for (i = 0; i < LSX_LEN/32; i++) {
2364 temp.UW(i) = float16_cvt_float32(Vj->UH(i), &env->fp_status);
2365 vec_update_fcsr0(env, GETPC());
2366 }
2367 *Vd = temp;
2368 }
2369
2370 void HELPER(vfcvtl_d_s)(void *vd, void *vj,
2371 CPULoongArchState *env, uint32_t desc)
2372 {
2373 int i;
2374 VReg temp;
2375 VReg *Vd = (VReg *)vd;
2376 VReg *Vj = (VReg *)vj;
2377
2378 vec_clear_cause(env);
2379 for (i = 0; i < LSX_LEN/64; i++) {
2380 temp.UD(i) = float32_cvt_float64(Vj->UW(i), &env->fp_status);
2381 vec_update_fcsr0(env, GETPC());
2382 }
2383 *Vd = temp;
2384 }
2385
2386 void HELPER(vfcvth_s_h)(void *vd, void *vj,
2387 CPULoongArchState *env, uint32_t desc)
2388 {
2389 int i;
2390 VReg temp;
2391 VReg *Vd = (VReg *)vd;
2392 VReg *Vj = (VReg *)vj;
2393
2394 vec_clear_cause(env);
2395 for (i = 0; i < LSX_LEN/32; i++) {
2396 temp.UW(i) = float16_cvt_float32(Vj->UH(i + 4), &env->fp_status);
2397 vec_update_fcsr0(env, GETPC());
2398 }
2399 *Vd = temp;
2400 }
2401
2402 void HELPER(vfcvth_d_s)(void *vd, void *vj,
2403 CPULoongArchState *env, uint32_t desc)
2404 {
2405 int i;
2406 VReg temp;
2407 VReg *Vd = (VReg *)vd;
2408 VReg *Vj = (VReg *)vj;
2409
2410 vec_clear_cause(env);
2411 for (i = 0; i < LSX_LEN/64; i++) {
2412 temp.UD(i) = float32_cvt_float64(Vj->UW(i + 2), &env->fp_status);
2413 vec_update_fcsr0(env, GETPC());
2414 }
2415 *Vd = temp;
2416 }
2417
2418 void HELPER(vfcvt_h_s)(void *vd, void *vj, void *vk,
2419 CPULoongArchState *env, uint32_t desc)
2420 {
2421 int i;
2422 VReg temp;
2423 VReg *Vd = (VReg *)vd;
2424 VReg *Vj = (VReg *)vj;
2425 VReg *Vk = (VReg *)vk;
2426
2427 vec_clear_cause(env);
2428 for(i = 0; i < LSX_LEN/32; i++) {
2429 temp.UH(i + 4) = float32_cvt_float16(Vj->UW(i), &env->fp_status);
2430 temp.UH(i) = float32_cvt_float16(Vk->UW(i), &env->fp_status);
2431 vec_update_fcsr0(env, GETPC());
2432 }
2433 *Vd = temp;
2434 }
2435
2436 void HELPER(vfcvt_s_d)(void *vd, void *vj, void *vk,
2437 CPULoongArchState *env, uint32_t desc)
2438 {
2439 int i;
2440 VReg temp;
2441 VReg *Vd = (VReg *)vd;
2442 VReg *Vj = (VReg *)vj;
2443 VReg *Vk = (VReg *)vk;
2444
2445 vec_clear_cause(env);
2446 for(i = 0; i < LSX_LEN/64; i++) {
2447 temp.UW(i + 2) = float64_cvt_float32(Vj->UD(i), &env->fp_status);
2448 temp.UW(i) = float64_cvt_float32(Vk->UD(i), &env->fp_status);
2449 vec_update_fcsr0(env, GETPC());
2450 }
2451 *Vd = temp;
2452 }
2453
2454 void HELPER(vfrint_s)(void *vd, void *vj,
2455 CPULoongArchState *env, uint32_t desc)
2456 {
2457 int i;
2458 VReg *Vd = (VReg *)vd;
2459 VReg *Vj = (VReg *)vj;
2460
2461 vec_clear_cause(env);
2462 for (i = 0; i < 4; i++) {
2463 Vd->W(i) = float32_round_to_int(Vj->UW(i), &env->fp_status);
2464 vec_update_fcsr0(env, GETPC());
2465 }
2466 }
2467
2468 void HELPER(vfrint_d)(void *vd, void *vj,
2469 CPULoongArchState *env, uint32_t desc)
2470 {
2471 int i;
2472 VReg *Vd = (VReg *)vd;
2473 VReg *Vj = (VReg *)vj;
2474
2475 vec_clear_cause(env);
2476 for (i = 0; i < 2; i++) {
2477 Vd->D(i) = float64_round_to_int(Vj->UD(i), &env->fp_status);
2478 vec_update_fcsr0(env, GETPC());
2479 }
2480 }
2481
2482 #define FCVT_2OP(NAME, BIT, E, MODE) \
2483 void HELPER(NAME)(void *vd, void *vj, \
2484 CPULoongArchState *env, uint32_t desc) \
2485 { \
2486 int i; \
2487 VReg *Vd = (VReg *)vd; \
2488 VReg *Vj = (VReg *)vj; \
2489 \
2490 vec_clear_cause(env); \
2491 for (i = 0; i < LSX_LEN/BIT; i++) { \
2492 FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
2493 set_float_rounding_mode(MODE, &env->fp_status); \
2494 Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \
2495 set_float_rounding_mode(old_mode, &env->fp_status); \
2496 vec_update_fcsr0(env, GETPC()); \
2497 } \
2498 }
2499
2500 FCVT_2OP(vfrintrne_s, 32, UW, float_round_nearest_even)
2501 FCVT_2OP(vfrintrne_d, 64, UD, float_round_nearest_even)
2502 FCVT_2OP(vfrintrz_s, 32, UW, float_round_to_zero)
2503 FCVT_2OP(vfrintrz_d, 64, UD, float_round_to_zero)
2504 FCVT_2OP(vfrintrp_s, 32, UW, float_round_up)
2505 FCVT_2OP(vfrintrp_d, 64, UD, float_round_up)
2506 FCVT_2OP(vfrintrm_s, 32, UW, float_round_down)
2507 FCVT_2OP(vfrintrm_d, 64, UD, float_round_down)
2508
2509 #define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \
2510 static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \
2511 { \
2512 T2 fd; \
2513 FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
2514 \
2515 set_float_rounding_mode(MODE, &env->fp_status); \
2516 fd = do_## FMT1 ##_to_## FMT2(env, fj); \
2517 set_float_rounding_mode(old_mode, &env->fp_status); \
2518 return fd; \
2519 }
2520
2521 #define DO_FTINT(FMT1, FMT2, T1, T2) \
2522 static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \
2523 { \
2524 T2 fd; \
2525 \
2526 fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
2527 if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \
2528 if (FMT1 ##_is_any_nan(fj)) { \
2529 fd = 0; \
2530 } \
2531 } \
2532 vec_update_fcsr0(env, GETPC()); \
2533 return fd; \
2534 }
2535
2536 DO_FTINT(float32, int32, uint32_t, uint32_t)
2537 DO_FTINT(float64, int64, uint64_t, uint64_t)
2538 DO_FTINT(float32, uint32, uint32_t, uint32_t)
2539 DO_FTINT(float64, uint64, uint64_t, uint64_t)
2540 DO_FTINT(float64, int32, uint64_t, uint32_t)
2541 DO_FTINT(float32, int64, uint32_t, uint64_t)
2542
2543 FTINT(rne_w_s, float32, int32, uint32_t, uint32_t, float_round_nearest_even)
2544 FTINT(rne_l_d, float64, int64, uint64_t, uint64_t, float_round_nearest_even)
2545 FTINT(rp_w_s, float32, int32, uint32_t, uint32_t, float_round_up)
2546 FTINT(rp_l_d, float64, int64, uint64_t, uint64_t, float_round_up)
2547 FTINT(rz_w_s, float32, int32, uint32_t, uint32_t, float_round_to_zero)
2548 FTINT(rz_l_d, float64, int64, uint64_t, uint64_t, float_round_to_zero)
2549 FTINT(rm_w_s, float32, int32, uint32_t, uint32_t, float_round_down)
2550 FTINT(rm_l_d, float64, int64, uint64_t, uint64_t, float_round_down)
2551
2552 DO_2OP_F(vftintrne_w_s, 32, UW, do_ftintrne_w_s)
2553 DO_2OP_F(vftintrne_l_d, 64, UD, do_ftintrne_l_d)
2554 DO_2OP_F(vftintrp_w_s, 32, UW, do_ftintrp_w_s)
2555 DO_2OP_F(vftintrp_l_d, 64, UD, do_ftintrp_l_d)
2556 DO_2OP_F(vftintrz_w_s, 32, UW, do_ftintrz_w_s)
2557 DO_2OP_F(vftintrz_l_d, 64, UD, do_ftintrz_l_d)
2558 DO_2OP_F(vftintrm_w_s, 32, UW, do_ftintrm_w_s)
2559 DO_2OP_F(vftintrm_l_d, 64, UD, do_ftintrm_l_d)
2560 DO_2OP_F(vftint_w_s, 32, UW, do_float32_to_int32)
2561 DO_2OP_F(vftint_l_d, 64, UD, do_float64_to_int64)
2562
2563 FTINT(rz_wu_s, float32, uint32, uint32_t, uint32_t, float_round_to_zero)
2564 FTINT(rz_lu_d, float64, uint64, uint64_t, uint64_t, float_round_to_zero)
2565
2566 DO_2OP_F(vftintrz_wu_s, 32, UW, do_ftintrz_wu_s)
2567 DO_2OP_F(vftintrz_lu_d, 64, UD, do_ftintrz_lu_d)
2568 DO_2OP_F(vftint_wu_s, 32, UW, do_float32_to_uint32)
2569 DO_2OP_F(vftint_lu_d, 64, UD, do_float64_to_uint64)
2570
2571 FTINT(rm_w_d, float64, int32, uint64_t, uint32_t, float_round_down)
2572 FTINT(rp_w_d, float64, int32, uint64_t, uint32_t, float_round_up)
2573 FTINT(rz_w_d, float64, int32, uint64_t, uint32_t, float_round_to_zero)
2574 FTINT(rne_w_d, float64, int32, uint64_t, uint32_t, float_round_nearest_even)
2575
2576 #define FTINT_W_D(NAME, FN) \
2577 void HELPER(NAME)(void *vd, void *vj, void *vk, \
2578 CPULoongArchState *env, uint32_t desc) \
2579 { \
2580 int i; \
2581 VReg temp; \
2582 VReg *Vd = (VReg *)vd; \
2583 VReg *Vj = (VReg *)vj; \
2584 VReg *Vk = (VReg *)vk; \
2585 \
2586 vec_clear_cause(env); \
2587 for (i = 0; i < 2; i++) { \
2588 temp.W(i + 2) = FN(env, Vj->UD(i)); \
2589 temp.W(i) = FN(env, Vk->UD(i)); \
2590 } \
2591 *Vd = temp; \
2592 }
2593
2594 FTINT_W_D(vftint_w_d, do_float64_to_int32)
2595 FTINT_W_D(vftintrm_w_d, do_ftintrm_w_d)
2596 FTINT_W_D(vftintrp_w_d, do_ftintrp_w_d)
2597 FTINT_W_D(vftintrz_w_d, do_ftintrz_w_d)
2598 FTINT_W_D(vftintrne_w_d, do_ftintrne_w_d)
2599
2600 FTINT(rml_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
2601 FTINT(rpl_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
2602 FTINT(rzl_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
2603 FTINT(rnel_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
2604 FTINT(rmh_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
2605 FTINT(rph_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
2606 FTINT(rzh_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
2607 FTINT(rneh_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
2608
2609 #define FTINTL_L_S(NAME, FN) \
2610 void HELPER(NAME)(void *vd, void *vj, \
2611 CPULoongArchState *env, uint32_t desc) \
2612 { \
2613 int i; \
2614 VReg temp; \
2615 VReg *Vd = (VReg *)vd; \
2616 VReg *Vj = (VReg *)vj; \
2617 \
2618 vec_clear_cause(env); \
2619 for (i = 0; i < 2; i++) { \
2620 temp.D(i) = FN(env, Vj->UW(i)); \
2621 } \
2622 *Vd = temp; \
2623 }
2624
2625 FTINTL_L_S(vftintl_l_s, do_float32_to_int64)
2626 FTINTL_L_S(vftintrml_l_s, do_ftintrml_l_s)
2627 FTINTL_L_S(vftintrpl_l_s, do_ftintrpl_l_s)
2628 FTINTL_L_S(vftintrzl_l_s, do_ftintrzl_l_s)
2629 FTINTL_L_S(vftintrnel_l_s, do_ftintrnel_l_s)
2630
2631 #define FTINTH_L_S(NAME, FN) \
2632 void HELPER(NAME)(void *vd, void *vj, \
2633 CPULoongArchState *env, uint32_t desc) \
2634 { \
2635 int i; \
2636 VReg temp; \
2637 VReg *Vd = (VReg *)vd; \
2638 VReg *Vj = (VReg *)vj; \
2639 \
2640 vec_clear_cause(env); \
2641 for (i = 0; i < 2; i++) { \
2642 temp.D(i) = FN(env, Vj->UW(i + 2)); \
2643 } \
2644 *Vd = temp; \
2645 }
2646
2647 FTINTH_L_S(vftinth_l_s, do_float32_to_int64)
2648 FTINTH_L_S(vftintrmh_l_s, do_ftintrmh_l_s)
2649 FTINTH_L_S(vftintrph_l_s, do_ftintrph_l_s)
2650 FTINTH_L_S(vftintrzh_l_s, do_ftintrzh_l_s)
2651 FTINTH_L_S(vftintrneh_l_s, do_ftintrneh_l_s)
2652
2653 #define FFINT(NAME, FMT1, FMT2, T1, T2) \
2654 static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \
2655 { \
2656 T2 fd; \
2657 \
2658 fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
2659 vec_update_fcsr0(env, GETPC()); \
2660 return fd; \
2661 }
2662
2663 FFINT(s_w, int32, float32, int32_t, uint32_t)
2664 FFINT(d_l, int64, float64, int64_t, uint64_t)
2665 FFINT(s_wu, uint32, float32, uint32_t, uint32_t)
2666 FFINT(d_lu, uint64, float64, uint64_t, uint64_t)
2667
2668 DO_2OP_F(vffint_s_w, 32, W, do_ffint_s_w)
2669 DO_2OP_F(vffint_d_l, 64, D, do_ffint_d_l)
2670 DO_2OP_F(vffint_s_wu, 32, UW, do_ffint_s_wu)
2671 DO_2OP_F(vffint_d_lu, 64, UD, do_ffint_d_lu)
2672
2673 void HELPER(vffintl_d_w)(void *vd, void *vj,
2674 CPULoongArchState *env, uint32_t desc)
2675 {
2676 int i;
2677 VReg temp;
2678 VReg *Vd = (VReg *)vd;
2679 VReg *Vj = (VReg *)vj;
2680
2681 vec_clear_cause(env);
2682 for (i = 0; i < 2; i++) {
2683 temp.D(i) = int32_to_float64(Vj->W(i), &env->fp_status);
2684 vec_update_fcsr0(env, GETPC());
2685 }
2686 *Vd = temp;
2687 }
2688
2689 void HELPER(vffinth_d_w)(void *vd, void *vj,
2690 CPULoongArchState *env, uint32_t desc)
2691 {
2692 int i;
2693 VReg temp;
2694 VReg *Vd = (VReg *)vd;
2695 VReg *Vj = (VReg *)vj;
2696
2697 vec_clear_cause(env);
2698 for (i = 0; i < 2; i++) {
2699 temp.D(i) = int32_to_float64(Vj->W(i + 2), &env->fp_status);
2700 vec_update_fcsr0(env, GETPC());
2701 }
2702 *Vd = temp;
2703 }
2704
2705 void HELPER(vffint_s_l)(void *vd, void *vj, void *vk,
2706 CPULoongArchState *env, uint32_t desc)
2707 {
2708 int i;
2709 VReg temp;
2710 VReg *Vd = (VReg *)vd;
2711 VReg *Vj = (VReg *)vj;
2712 VReg *Vk = (VReg *)vk;
2713
2714 vec_clear_cause(env);
2715 for (i = 0; i < 2; i++) {
2716 temp.W(i + 2) = int64_to_float32(Vj->D(i), &env->fp_status);
2717 temp.W(i) = int64_to_float32(Vk->D(i), &env->fp_status);
2718 vec_update_fcsr0(env, GETPC());
2719 }
2720 *Vd = temp;
2721 }
2722
2723 #define VSEQ(a, b) (a == b ? -1 : 0)
2724 #define VSLE(a, b) (a <= b ? -1 : 0)
2725 #define VSLT(a, b) (a < b ? -1 : 0)
2726
2727 #define VCMPI(NAME, BIT, E, DO_OP) \
2728 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
2729 { \
2730 int i; \
2731 VReg *Vd = (VReg *)vd; \
2732 VReg *Vj = (VReg *)vj; \
2733 typedef __typeof(Vd->E(0)) TD; \
2734 \
2735 for (i = 0; i < LSX_LEN/BIT; i++) { \
2736 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
2737 } \
2738 }
2739
2740 VCMPI(vseqi_b, 8, B, VSEQ)
2741 VCMPI(vseqi_h, 16, H, VSEQ)
2742 VCMPI(vseqi_w, 32, W, VSEQ)
2743 VCMPI(vseqi_d, 64, D, VSEQ)
2744 VCMPI(vslei_b, 8, B, VSLE)
2745 VCMPI(vslei_h, 16, H, VSLE)
2746 VCMPI(vslei_w, 32, W, VSLE)
2747 VCMPI(vslei_d, 64, D, VSLE)
2748 VCMPI(vslei_bu, 8, UB, VSLE)
2749 VCMPI(vslei_hu, 16, UH, VSLE)
2750 VCMPI(vslei_wu, 32, UW, VSLE)
2751 VCMPI(vslei_du, 64, UD, VSLE)
2752 VCMPI(vslti_b, 8, B, VSLT)
2753 VCMPI(vslti_h, 16, H, VSLT)
2754 VCMPI(vslti_w, 32, W, VSLT)
2755 VCMPI(vslti_d, 64, D, VSLT)
2756 VCMPI(vslti_bu, 8, UB, VSLT)
2757 VCMPI(vslti_hu, 16, UH, VSLT)
2758 VCMPI(vslti_wu, 32, UW, VSLT)
2759 VCMPI(vslti_du, 64, UD, VSLT)
2760
2761 static uint64_t vfcmp_common(CPULoongArchState *env,
2762 FloatRelation cmp, uint32_t flags)
2763 {
2764 uint64_t ret = 0;
2765
2766 switch (cmp) {
2767 case float_relation_less:
2768 ret = (flags & FCMP_LT);
2769 break;
2770 case float_relation_equal:
2771 ret = (flags & FCMP_EQ);
2772 break;
2773 case float_relation_greater:
2774 ret = (flags & FCMP_GT);
2775 break;
2776 case float_relation_unordered:
2777 ret = (flags & FCMP_UN);
2778 break;
2779 default:
2780 g_assert_not_reached();
2781 }
2782
2783 if (ret) {
2784 ret = -1;
2785 }
2786
2787 return ret;
2788 }
2789
2790 #define VFCMP(NAME, BIT, E, FN) \
2791 void HELPER(NAME)(CPULoongArchState *env, \
2792 uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \
2793 { \
2794 int i; \
2795 VReg t; \
2796 VReg *Vd = &(env->fpr[vd].vreg); \
2797 VReg *Vj = &(env->fpr[vj].vreg); \
2798 VReg *Vk = &(env->fpr[vk].vreg); \
2799 \
2800 vec_clear_cause(env); \
2801 for (i = 0; i < LSX_LEN/BIT ; i++) { \
2802 FloatRelation cmp; \
2803 cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
2804 t.E(i) = vfcmp_common(env, cmp, flags); \
2805 vec_update_fcsr0(env, GETPC()); \
2806 } \
2807 *Vd = t; \
2808 }
2809
2810 VFCMP(vfcmp_c_s, 32, UW, float32_compare_quiet)
2811 VFCMP(vfcmp_s_s, 32, UW, float32_compare)
2812 VFCMP(vfcmp_c_d, 64, UD, float64_compare_quiet)
2813 VFCMP(vfcmp_s_d, 64, UD, float64_compare)
2814
2815 void HELPER(vbitseli_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
2816 {
2817 int i;
2818 VReg *Vd = (VReg *)vd;
2819 VReg *Vj = (VReg *)vj;
2820
2821 for (i = 0; i < 16; i++) {
2822 Vd->B(i) = (~Vd->B(i) & Vj->B(i)) | (Vd->B(i) & imm);
2823 }
2824 }
2825
2826 /* Copy from target/arm/tcg/sve_helper.c */
2827 static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
2828 {
2829 uint64_t bits = 8 << esz;
2830 uint64_t ones = dup_const(esz, 1);
2831 uint64_t signs = ones << (bits - 1);
2832 uint64_t cmp0, cmp1;
2833
2834 cmp1 = dup_const(esz, n);
2835 cmp0 = cmp1 ^ m0;
2836 cmp1 = cmp1 ^ m1;
2837 cmp0 = (cmp0 - ones) & ~cmp0;
2838 cmp1 = (cmp1 - ones) & ~cmp1;
2839 return (cmp0 | cmp1) & signs;
2840 }
2841
2842 #define SETANYEQZ(NAME, MO) \
2843 void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
2844 { \
2845 VReg *Vj = &(env->fpr[vj].vreg); \
2846 \
2847 env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \
2848 }
2849 SETANYEQZ(vsetanyeqz_b, MO_8)
2850 SETANYEQZ(vsetanyeqz_h, MO_16)
2851 SETANYEQZ(vsetanyeqz_w, MO_32)
2852 SETANYEQZ(vsetanyeqz_d, MO_64)
2853
2854 #define SETALLNEZ(NAME, MO) \
2855 void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
2856 { \
2857 VReg *Vj = &(env->fpr[vj].vreg); \
2858 \
2859 env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \
2860 }
2861 SETALLNEZ(vsetallnez_b, MO_8)
2862 SETALLNEZ(vsetallnez_h, MO_16)
2863 SETALLNEZ(vsetallnez_w, MO_32)
2864 SETALLNEZ(vsetallnez_d, MO_64)
2865
2866 #define VPACKEV(NAME, BIT, E) \
2867 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2868 { \
2869 int i; \
2870 VReg temp; \
2871 VReg *Vd = (VReg *)vd; \
2872 VReg *Vj = (VReg *)vj; \
2873 VReg *Vk = (VReg *)vk; \
2874 \
2875 for (i = 0; i < LSX_LEN/BIT; i++) { \
2876 temp.E(2 * i + 1) = Vj->E(2 * i); \
2877 temp.E(2 *i) = Vk->E(2 * i); \
2878 } \
2879 *Vd = temp; \
2880 }
2881
2882 VPACKEV(vpackev_b, 16, B)
2883 VPACKEV(vpackev_h, 32, H)
2884 VPACKEV(vpackev_w, 64, W)
2885 VPACKEV(vpackev_d, 128, D)
2886
2887 #define VPACKOD(NAME, BIT, E) \
2888 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2889 { \
2890 int i; \
2891 VReg temp; \
2892 VReg *Vd = (VReg *)vd; \
2893 VReg *Vj = (VReg *)vj; \
2894 VReg *Vk = (VReg *)vk; \
2895 \
2896 for (i = 0; i < LSX_LEN/BIT; i++) { \
2897 temp.E(2 * i + 1) = Vj->E(2 * i + 1); \
2898 temp.E(2 * i) = Vk->E(2 * i + 1); \
2899 } \
2900 *Vd = temp; \
2901 }
2902
2903 VPACKOD(vpackod_b, 16, B)
2904 VPACKOD(vpackod_h, 32, H)
2905 VPACKOD(vpackod_w, 64, W)
2906 VPACKOD(vpackod_d, 128, D)
2907
2908 #define VPICKEV(NAME, BIT, E) \
2909 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2910 { \
2911 int i; \
2912 VReg temp; \
2913 VReg *Vd = (VReg *)vd; \
2914 VReg *Vj = (VReg *)vj; \
2915 VReg *Vk = (VReg *)vk; \
2916 \
2917 for (i = 0; i < LSX_LEN/BIT; i++) { \
2918 temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \
2919 temp.E(i) = Vk->E(2 * i); \
2920 } \
2921 *Vd = temp; \
2922 }
2923
2924 VPICKEV(vpickev_b, 16, B)
2925 VPICKEV(vpickev_h, 32, H)
2926 VPICKEV(vpickev_w, 64, W)
2927 VPICKEV(vpickev_d, 128, D)
2928
2929 #define VPICKOD(NAME, BIT, E) \
2930 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2931 { \
2932 int i; \
2933 VReg temp; \
2934 VReg *Vd = (VReg *)vd; \
2935 VReg *Vj = (VReg *)vj; \
2936 VReg *Vk = (VReg *)vk; \
2937 \
2938 for (i = 0; i < LSX_LEN/BIT; i++) { \
2939 temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \
2940 temp.E(i) = Vk->E(2 * i + 1); \
2941 } \
2942 *Vd = temp; \
2943 }
2944
2945 VPICKOD(vpickod_b, 16, B)
2946 VPICKOD(vpickod_h, 32, H)
2947 VPICKOD(vpickod_w, 64, W)
2948 VPICKOD(vpickod_d, 128, D)
2949
2950 #define VILVL(NAME, BIT, E) \
2951 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2952 { \
2953 int i; \
2954 VReg temp; \
2955 VReg *Vd = (VReg *)vd; \
2956 VReg *Vj = (VReg *)vj; \
2957 VReg *Vk = (VReg *)vk; \
2958 \
2959 for (i = 0; i < LSX_LEN/BIT; i++) { \
2960 temp.E(2 * i + 1) = Vj->E(i); \
2961 temp.E(2 * i) = Vk->E(i); \
2962 } \
2963 *Vd = temp; \
2964 }
2965
2966 VILVL(vilvl_b, 16, B)
2967 VILVL(vilvl_h, 32, H)
2968 VILVL(vilvl_w, 64, W)
2969 VILVL(vilvl_d, 128, D)
2970
2971 #define VILVH(NAME, BIT, E) \
2972 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2973 { \
2974 int i; \
2975 VReg temp; \
2976 VReg *Vd = (VReg *)vd; \
2977 VReg *Vj = (VReg *)vj; \
2978 VReg *Vk = (VReg *)vk; \
2979 \
2980 for (i = 0; i < LSX_LEN/BIT; i++) { \
2981 temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \
2982 temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \
2983 } \
2984 *Vd = temp; \
2985 }
2986
2987 VILVH(vilvh_b, 16, B)
2988 VILVH(vilvh_h, 32, H)
2989 VILVH(vilvh_w, 64, W)
2990 VILVH(vilvh_d, 128, D)
2991
2992 void HELPER(vshuf_b)(void *vd, void *vj, void *vk, void *va, uint32_t desc)
2993 {
2994 int i, m;
2995 VReg temp;
2996 VReg *Vd = (VReg *)vd;
2997 VReg *Vj = (VReg *)vj;
2998 VReg *Vk = (VReg *)vk;
2999 VReg *Va = (VReg *)va;
3000
3001 m = LSX_LEN/8;
3002 for (i = 0; i < m ; i++) {
3003 uint64_t k = (uint8_t)Va->B(i) % (2 * m);
3004 temp.B(i) = k < m ? Vk->B(k) : Vj->B(k - m);
3005 }
3006 *Vd = temp;
3007 }
3008
3009 #define VSHUF(NAME, BIT, E) \
3010 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3011 { \
3012 int i, m; \
3013 VReg temp; \
3014 VReg *Vd = (VReg *)vd; \
3015 VReg *Vj = (VReg *)vj; \
3016 VReg *Vk = (VReg *)vk; \
3017 \
3018 m = LSX_LEN/BIT; \
3019 for (i = 0; i < m; i++) { \
3020 uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \
3021 temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \
3022 } \
3023 *Vd = temp; \
3024 }
3025
3026 VSHUF(vshuf_h, 16, H)
3027 VSHUF(vshuf_w, 32, W)
3028 VSHUF(vshuf_d, 64, D)
3029
3030 #define VSHUF4I(NAME, BIT, E) \
3031 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
3032 { \
3033 int i; \
3034 VReg temp; \
3035 VReg *Vd = (VReg *)vd; \
3036 VReg *Vj = (VReg *)vj; \
3037 \
3038 for (i = 0; i < LSX_LEN/BIT; i++) { \
3039 temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \
3040 (2 * ((i) & 0x03))) & 0x03)); \
3041 } \
3042 *Vd = temp; \
3043 }
3044
3045 VSHUF4I(vshuf4i_b, 8, B)
3046 VSHUF4I(vshuf4i_h, 16, H)
3047 VSHUF4I(vshuf4i_w, 32, W)
3048
3049 void HELPER(vshuf4i_d)(void *vd, void *vj, uint64_t imm, uint32_t desc)
3050 {
3051 VReg *Vd = (VReg *)vd;
3052 VReg *Vj = (VReg *)vj;
3053
3054 VReg temp;
3055 temp.D(0) = (imm & 2 ? Vj : Vd)->D(imm & 1);
3056 temp.D(1) = (imm & 8 ? Vj : Vd)->D((imm >> 2) & 1);
3057 *Vd = temp;
3058 }
3059
3060 void HELPER(vpermi_w)(void *vd, void *vj, uint64_t imm, uint32_t desc)
3061 {
3062 VReg temp;
3063 VReg *Vd = (VReg *)vd;
3064 VReg *Vj = (VReg *)vj;
3065
3066 temp.W(0) = Vj->W(imm & 0x3);
3067 temp.W(1) = Vj->W((imm >> 2) & 0x3);
3068 temp.W(2) = Vd->W((imm >> 4) & 0x3);
3069 temp.W(3) = Vd->W((imm >> 6) & 0x3);
3070 *Vd = temp;
3071 }
3072
3073 #define VEXTRINS(NAME, BIT, E, MASK) \
3074 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
3075 { \
3076 int ins, extr; \
3077 VReg *Vd = (VReg *)vd; \
3078 VReg *Vj = (VReg *)vj; \
3079 \
3080 ins = (imm >> 4) & MASK; \
3081 extr = imm & MASK; \
3082 Vd->E(ins) = Vj->E(extr); \
3083 }
3084
3085 VEXTRINS(vextrins_b, 8, B, 0xf)
3086 VEXTRINS(vextrins_h, 16, H, 0x7)
3087 VEXTRINS(vextrins_w, 32, W, 0x3)
3088 VEXTRINS(vextrins_d, 64, D, 0x1)