]> git.proxmox.com Git - mirror_qemu.git/blob - target/loongarch/vec_helper.c
target/loongarch: Implement xvbitclr xvbitset xvbitrev
[mirror_qemu.git] / target / loongarch / vec_helper.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch vector helper functions.
4 *
5 * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "exec/exec-all.h"
11 #include "exec/helper-proto.h"
12 #include "fpu/softfloat.h"
13 #include "internals.h"
14 #include "tcg/tcg.h"
15 #include "vec.h"
16 #include "tcg/tcg-gvec-desc.h"
17
18 #define DO_ADD(a, b) (a + b)
19 #define DO_SUB(a, b) (a - b)
20
21 #define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
22 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
23 { \
24 int i; \
25 VReg *Vd = (VReg *)vd; \
26 VReg *Vj = (VReg *)vj; \
27 VReg *Vk = (VReg *)vk; \
28 typedef __typeof(Vd->E1(0)) TD; \
29 int oprsz = simd_oprsz(desc); \
30 \
31 for (i = 0; i < oprsz / (BIT / 8); i++) { \
32 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
33 } \
34 }
35
36 DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD)
37 DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD)
38 DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD)
39
40 void HELPER(vhaddw_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
41 {
42 int i;
43 VReg *Vd = (VReg *)vd;
44 VReg *Vj = (VReg *)vj;
45 VReg *Vk = (VReg *)vk;
46 int oprsz = simd_oprsz(desc);
47
48 for (i = 0; i < oprsz / 16 ; i++) {
49 Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i + 1)),
50 int128_makes64(Vk->D(2 * i)));
51 }
52 }
53
54 DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB)
55 DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB)
56 DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB)
57
58 void HELPER(vhsubw_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
59 {
60 int i;
61 VReg *Vd = (VReg *)vd;
62 VReg *Vj = (VReg *)vj;
63 VReg *Vk = (VReg *)vk;
64 int oprsz = simd_oprsz(desc);
65
66 for (i = 0; i < oprsz / 16; i++) {
67 Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i + 1)),
68 int128_makes64(Vk->D(2 * i)));
69 }
70 }
71
72 DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD)
73 DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD)
74 DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD)
75
76 void HELPER(vhaddw_qu_du)(void *vd, void *vj, void *vk, uint32_t desc)
77 {
78 int i;
79 VReg *Vd = (VReg *)vd;
80 VReg *Vj = (VReg *)vj;
81 VReg *Vk = (VReg *)vk;
82 int oprsz = simd_oprsz(desc);
83
84 for (i = 0; i < oprsz / 16; i ++) {
85 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
86 int128_make64(Vk->UD(2 * i)));
87 }
88 }
89
90 DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB)
91 DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB)
92 DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB)
93
94 void HELPER(vhsubw_qu_du)(void *vd, void *vj, void *vk, uint32_t desc)
95 {
96 int i;
97 VReg *Vd = (VReg *)vd;
98 VReg *Vj = (VReg *)vj;
99 VReg *Vk = (VReg *)vk;
100 int oprsz = simd_oprsz(desc);
101
102 for (i = 0; i < oprsz / 16; i++) {
103 Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i + 1)),
104 int128_make64(Vk->UD(2 * i)));
105 }
106 }
107
108 #define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
109 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
110 { \
111 int i; \
112 VReg *Vd = (VReg *)vd; \
113 VReg *Vj = (VReg *)vj; \
114 VReg *Vk = (VReg *)vk; \
115 typedef __typeof(Vd->E1(0)) TD; \
116 int oprsz = simd_oprsz(desc); \
117 \
118 for (i = 0; i < oprsz / (BIT / 8); i++) { \
119 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
120 } \
121 }
122
123 #define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
124 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
125 { \
126 int i; \
127 VReg *Vd = (VReg *)vd; \
128 VReg *Vj = (VReg *)vj; \
129 VReg *Vk = (VReg *)vk; \
130 typedef __typeof(Vd->E1(0)) TD; \
131 int oprsz = simd_oprsz(desc); \
132 \
133 for (i = 0; i < oprsz / (BIT / 8); i++) { \
134 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
135 } \
136 }
137
138 void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
139 {
140 int i;
141 VReg *Vd = (VReg *)vd;
142 VReg *Vj = (VReg *)vj;
143 VReg *Vk = (VReg *)vk;
144 int oprsz = simd_oprsz(desc);
145
146 for (i = 0; i < oprsz / 16; i++) {
147 Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i)),
148 int128_makes64(Vk->D(2 * i)));
149 }
150 }
151
152 DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD)
153 DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD)
154 DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD)
155
156 void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
157 {
158 int i;
159 VReg *Vd = (VReg *)vd;
160 VReg *Vj = (VReg *)vj;
161 VReg *Vk = (VReg *)vk;
162 int oprsz = simd_oprsz(desc);
163
164 for (i = 0; i < oprsz / 16; i++) {
165 Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i +1)),
166 int128_makes64(Vk->D(2 * i +1)));
167 }
168 }
169
170 DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD)
171 DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD)
172 DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD)
173
174 void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
175 {
176 int i;
177 VReg *Vd = (VReg *)vd;
178 VReg *Vj = (VReg *)vj;
179 VReg *Vk = (VReg *)vk;
180 int oprsz = simd_oprsz(desc);
181
182 for (i = 0; i < oprsz / 16; i++) {
183 Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i)),
184 int128_makes64(Vk->D(2 * i)));
185 }
186 }
187
188 DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB)
189 DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB)
190 DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB)
191
192 void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
193 {
194 int i;
195 VReg *Vd = (VReg *)vd;
196 VReg *Vj = (VReg *)vj;
197 VReg *Vk = (VReg *)vk;
198 int oprsz = simd_oprsz(desc);
199
200 for (i = 0; i < oprsz / 16; i++) {
201 Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i + 1)),
202 int128_makes64(Vk->D(2 * i + 1)));
203 }
204 }
205
206 DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB)
207 DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB)
208 DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB)
209
210 void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
211 {
212 int i;
213 VReg *Vd = (VReg *)vd;
214 VReg *Vj = (VReg *)vj;
215 VReg *Vk = (VReg *)vk;
216 int oprsz = simd_oprsz(desc);
217
218 for (i = 0; i < oprsz / 16; i++) {
219 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i)),
220 int128_make64(Vk->UD(2 * i)));
221 }
222 }
223
224 DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD)
225 DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD)
226 DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD)
227
228 void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
229 {
230 int i;
231 VReg *Vd = (VReg *)vd;
232 VReg *Vj = (VReg *)vj;
233 VReg *Vk = (VReg *)vk;
234 int oprsz = simd_oprsz(desc);
235
236 for (i = 0; i < oprsz / 16; i++) {
237 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
238 int128_make64(Vk->UD(2 * i + 1)));
239 }
240 }
241
242 DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD)
243 DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD)
244 DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD)
245
246 void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
247 {
248 int i;
249 VReg *Vd = (VReg *)vd;
250 VReg *Vj = (VReg *)vj;
251 VReg *Vk = (VReg *)vk;
252 int oprsz = simd_oprsz(desc);
253
254 for (i = 0; i < oprsz / 16; i++) {
255 Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i)),
256 int128_make64(Vk->UD(2 * i)));
257 }
258 }
259
260 DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB)
261 DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB)
262 DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB)
263
264 void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
265 {
266 int i;
267 VReg *Vd = (VReg *)vd;
268 VReg *Vj = (VReg *)vj;
269 VReg *Vk = (VReg *)vk;
270 int oprsz = simd_oprsz(desc);
271
272 for (i = 0; i < oprsz / 16; i++) {
273 Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i + 1)),
274 int128_make64(Vk->UD(2 * i + 1)));
275 }
276 }
277
278 DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB)
279 DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB)
280 DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB)
281
282 #define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
283 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
284 { \
285 int i; \
286 VReg *Vd = (VReg *)vd; \
287 VReg *Vj = (VReg *)vj; \
288 VReg *Vk = (VReg *)vk; \
289 typedef __typeof(Vd->ES1(0)) TDS; \
290 typedef __typeof(Vd->EU1(0)) TDU; \
291 int oprsz = simd_oprsz(desc); \
292 \
293 for (i = 0; i < oprsz / (BIT / 8); i++) { \
294 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
295 } \
296 }
297
298 #define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
299 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
300 { \
301 int i; \
302 VReg *Vd = (VReg *)vd; \
303 VReg *Vj = (VReg *)vj; \
304 VReg *Vk = (VReg *)vk; \
305 typedef __typeof(Vd->ES1(0)) TDS; \
306 typedef __typeof(Vd->EU1(0)) TDU; \
307 int oprsz = simd_oprsz(desc); \
308 \
309 for (i = 0; i < oprsz / (BIT / 8); i++) { \
310 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
311 } \
312 }
313
314 void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc)
315 {
316 int i;
317 VReg *Vd = (VReg *)vd;
318 VReg *Vj = (VReg *)vj;
319 VReg *Vk = (VReg *)vk;
320 int oprsz = simd_oprsz(desc);
321
322 for (i = 0; i < oprsz / 16; i++) {
323 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i)),
324 int128_makes64(Vk->D(2 * i)));
325 }
326 }
327
328 DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD)
329 DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD)
330 DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD)
331
332 void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc)
333 {
334 int i;
335 VReg *Vd = (VReg *)vd;
336 VReg *Vj = (VReg *)vj;
337 VReg *Vk = (VReg *)vk;
338 int oprsz = simd_oprsz(desc);
339
340 for (i = 0; i < oprsz / 16; i++) {
341 Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
342 int128_makes64(Vk->D(2 * i + 1)));
343 }
344 }
345
346 DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD)
347 DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD)
348 DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD)
349
350 #define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
351 #define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
352
353 #define DO_3OP(NAME, BIT, E, DO_OP) \
354 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
355 { \
356 int i; \
357 VReg *Vd = (VReg *)vd; \
358 VReg *Vj = (VReg *)vj; \
359 VReg *Vk = (VReg *)vk; \
360 int oprsz = simd_oprsz(desc); \
361 \
362 for (i = 0; i < oprsz / (BIT / 8); i++) { \
363 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
364 } \
365 }
366
367 DO_3OP(vavg_b, 8, B, DO_VAVG)
368 DO_3OP(vavg_h, 16, H, DO_VAVG)
369 DO_3OP(vavg_w, 32, W, DO_VAVG)
370 DO_3OP(vavg_d, 64, D, DO_VAVG)
371 DO_3OP(vavgr_b, 8, B, DO_VAVGR)
372 DO_3OP(vavgr_h, 16, H, DO_VAVGR)
373 DO_3OP(vavgr_w, 32, W, DO_VAVGR)
374 DO_3OP(vavgr_d, 64, D, DO_VAVGR)
375 DO_3OP(vavg_bu, 8, UB, DO_VAVG)
376 DO_3OP(vavg_hu, 16, UH, DO_VAVG)
377 DO_3OP(vavg_wu, 32, UW, DO_VAVG)
378 DO_3OP(vavg_du, 64, UD, DO_VAVG)
379 DO_3OP(vavgr_bu, 8, UB, DO_VAVGR)
380 DO_3OP(vavgr_hu, 16, UH, DO_VAVGR)
381 DO_3OP(vavgr_wu, 32, UW, DO_VAVGR)
382 DO_3OP(vavgr_du, 64, UD, DO_VAVGR)
383
384 #define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
385
386 DO_3OP(vabsd_b, 8, B, DO_VABSD)
387 DO_3OP(vabsd_h, 16, H, DO_VABSD)
388 DO_3OP(vabsd_w, 32, W, DO_VABSD)
389 DO_3OP(vabsd_d, 64, D, DO_VABSD)
390 DO_3OP(vabsd_bu, 8, UB, DO_VABSD)
391 DO_3OP(vabsd_hu, 16, UH, DO_VABSD)
392 DO_3OP(vabsd_wu, 32, UW, DO_VABSD)
393 DO_3OP(vabsd_du, 64, UD, DO_VABSD)
394
395 #define DO_VABS(a) ((a < 0) ? (-a) : (a))
396
397 #define DO_VADDA(NAME, BIT, E) \
398 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
399 { \
400 int i; \
401 VReg *Vd = (VReg *)vd; \
402 VReg *Vj = (VReg *)vj; \
403 VReg *Vk = (VReg *)vk; \
404 int oprsz = simd_oprsz(desc); \
405 \
406 for (i = 0; i < oprsz / (BIT / 8); i++) { \
407 Vd->E(i) = DO_VABS(Vj->E(i)) + DO_VABS(Vk->E(i)); \
408 } \
409 }
410
411 DO_VADDA(vadda_b, 8, B)
412 DO_VADDA(vadda_h, 16, H)
413 DO_VADDA(vadda_w, 32, W)
414 DO_VADDA(vadda_d, 64, D)
415
416 #define DO_MIN(a, b) (a < b ? a : b)
417 #define DO_MAX(a, b) (a > b ? a : b)
418
419 #define VMINMAXI(NAME, BIT, E, DO_OP) \
420 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
421 { \
422 int i; \
423 VReg *Vd = (VReg *)vd; \
424 VReg *Vj = (VReg *)vj; \
425 typedef __typeof(Vd->E(0)) TD; \
426 int oprsz = simd_oprsz(desc); \
427 \
428 for (i = 0; i < oprsz / (BIT / 8); i++) { \
429 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
430 } \
431 }
432
433 VMINMAXI(vmini_b, 8, B, DO_MIN)
434 VMINMAXI(vmini_h, 16, H, DO_MIN)
435 VMINMAXI(vmini_w, 32, W, DO_MIN)
436 VMINMAXI(vmini_d, 64, D, DO_MIN)
437 VMINMAXI(vmaxi_b, 8, B, DO_MAX)
438 VMINMAXI(vmaxi_h, 16, H, DO_MAX)
439 VMINMAXI(vmaxi_w, 32, W, DO_MAX)
440 VMINMAXI(vmaxi_d, 64, D, DO_MAX)
441 VMINMAXI(vmini_bu, 8, UB, DO_MIN)
442 VMINMAXI(vmini_hu, 16, UH, DO_MIN)
443 VMINMAXI(vmini_wu, 32, UW, DO_MIN)
444 VMINMAXI(vmini_du, 64, UD, DO_MIN)
445 VMINMAXI(vmaxi_bu, 8, UB, DO_MAX)
446 VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
447 VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
448 VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
449
450 #define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
451 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
452 { \
453 int i; \
454 VReg *Vd = (VReg *)vd; \
455 VReg *Vj = (VReg *)vj; \
456 VReg *Vk = (VReg *)vk; \
457 typedef __typeof(Vd->E1(0)) T; \
458 int oprsz = simd_oprsz(desc); \
459 \
460 for (i = 0; i < oprsz / (BIT / 8); i++) { \
461 Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
462 } \
463 }
464
465 void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t desc)
466 {
467 int i;
468 uint64_t l, h;
469 VReg *Vd = (VReg *)vd;
470 VReg *Vj = (VReg *)vj;
471 VReg *Vk = (VReg *)vk;
472 int oprsz = simd_oprsz(desc);
473
474 for (i = 0; i < oprsz / 8; i++) {
475 muls64(&l, &h, Vj->D(i), Vk->D(i));
476 Vd->D(i) = h;
477 }
478 }
479
480 DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
481 DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
482 DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
483
484 void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t desc)
485 {
486 int i;
487 uint64_t l, h;
488 VReg *Vd = (VReg *)vd;
489 VReg *Vj = (VReg *)vj;
490 VReg *Vk = (VReg *)vk;
491 int oprsz = simd_oprsz(desc);
492
493 for (i = 0; i < oprsz / 8; i++) {
494 mulu64(&l, &h, Vj->D(i), Vk->D(i));
495 Vd->D(i) = h;
496 }
497 }
498
499 DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
500 DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH)
501 DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH)
502
503 #define DO_MUL(a, b) (a * b)
504
505 DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL)
506 DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL)
507 DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL)
508
509 DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL)
510 DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL)
511 DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL)
512
513 DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL)
514 DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL)
515 DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL)
516
517 DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL)
518 DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL)
519 DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL)
520
521 DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
522 DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
523 DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
524
525 DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
526 DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
527 DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
528
529 #define DO_MADD(a, b, c) (a + b * c)
530 #define DO_MSUB(a, b, c) (a - b * c)
531
532 #define VMADDSUB(NAME, BIT, E, DO_OP) \
533 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
534 { \
535 int i; \
536 VReg *Vd = (VReg *)vd; \
537 VReg *Vj = (VReg *)vj; \
538 VReg *Vk = (VReg *)vk; \
539 int oprsz = simd_oprsz(desc); \
540 \
541 for (i = 0; i < oprsz / (BIT / 8); i++) { \
542 Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
543 } \
544 }
545
546 VMADDSUB(vmadd_b, 8, B, DO_MADD)
547 VMADDSUB(vmadd_h, 16, H, DO_MADD)
548 VMADDSUB(vmadd_w, 32, W, DO_MADD)
549 VMADDSUB(vmadd_d, 64, D, DO_MADD)
550 VMADDSUB(vmsub_b, 8, B, DO_MSUB)
551 VMADDSUB(vmsub_h, 16, H, DO_MSUB)
552 VMADDSUB(vmsub_w, 32, W, DO_MSUB)
553 VMADDSUB(vmsub_d, 64, D, DO_MSUB)
554
555 #define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
556 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
557 { \
558 int i; \
559 VReg *Vd = (VReg *)vd; \
560 VReg *Vj = (VReg *)vj; \
561 VReg *Vk = (VReg *)vk; \
562 typedef __typeof(Vd->E1(0)) TD; \
563 int oprsz = simd_oprsz(desc); \
564 \
565 for (i = 0; i < oprsz / (BIT / 8); i++) { \
566 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
567 } \
568 }
569
570 VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL)
571 VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL)
572 VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL)
573 VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL)
574 VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL)
575 VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL)
576
577 #define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
578 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
579 { \
580 int i; \
581 VReg *Vd = (VReg *)vd; \
582 VReg *Vj = (VReg *)vj; \
583 VReg *Vk = (VReg *)vk; \
584 typedef __typeof(Vd->E1(0)) TD; \
585 int oprsz = simd_oprsz(desc); \
586 \
587 for (i = 0; i < oprsz / (BIT / 8); i++) { \
588 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
589 (TD)Vk->E2(2 * i + 1)); \
590 } \
591 }
592
593 VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL)
594 VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL)
595 VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL)
596 VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL)
597 VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL)
598 VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL)
599
600 #define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
601 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
602 { \
603 int i; \
604 VReg *Vd = (VReg *)vd; \
605 VReg *Vj = (VReg *)vj; \
606 VReg *Vk = (VReg *)vk; \
607 typedef __typeof(Vd->ES1(0)) TS1; \
608 typedef __typeof(Vd->EU1(0)) TU1; \
609 int oprsz = simd_oprsz(desc); \
610 \
611 for (i = 0; i < oprsz / (BIT / 8); i++) { \
612 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
613 (TS1)Vk->ES2(2 * i)); \
614 } \
615 }
616
617 VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
618 VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
619 VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
620
621 #define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
622 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
623 { \
624 int i; \
625 VReg *Vd = (VReg *)vd; \
626 VReg *Vj = (VReg *)vj; \
627 VReg *Vk = (VReg *)vk; \
628 typedef __typeof(Vd->ES1(0)) TS1; \
629 typedef __typeof(Vd->EU1(0)) TU1; \
630 int oprsz = simd_oprsz(desc); \
631 \
632 for (i = 0; i < oprsz / (BIT / 8); i++) { \
633 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
634 (TS1)Vk->ES2(2 * i + 1)); \
635 } \
636 }
637
638 VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
639 VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
640 VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
641
642 #define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
643 #define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
644 #define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
645 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
646 #define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
647 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
648
649 #define VDIV(NAME, BIT, E, DO_OP) \
650 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
651 { \
652 int i; \
653 VReg *Vd = (VReg *)vd; \
654 VReg *Vj = (VReg *)vj; \
655 VReg *Vk = (VReg *)vk; \
656 int oprsz = simd_oprsz(desc); \
657 \
658 for (i = 0; i < oprsz / (BIT / 8); i++) { \
659 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
660 } \
661 }
662
663 VDIV(vdiv_b, 8, B, DO_DIV)
664 VDIV(vdiv_h, 16, H, DO_DIV)
665 VDIV(vdiv_w, 32, W, DO_DIV)
666 VDIV(vdiv_d, 64, D, DO_DIV)
667 VDIV(vdiv_bu, 8, UB, DO_DIVU)
668 VDIV(vdiv_hu, 16, UH, DO_DIVU)
669 VDIV(vdiv_wu, 32, UW, DO_DIVU)
670 VDIV(vdiv_du, 64, UD, DO_DIVU)
671 VDIV(vmod_b, 8, B, DO_REM)
672 VDIV(vmod_h, 16, H, DO_REM)
673 VDIV(vmod_w, 32, W, DO_REM)
674 VDIV(vmod_d, 64, D, DO_REM)
675 VDIV(vmod_bu, 8, UB, DO_REMU)
676 VDIV(vmod_hu, 16, UH, DO_REMU)
677 VDIV(vmod_wu, 32, UW, DO_REMU)
678 VDIV(vmod_du, 64, UD, DO_REMU)
679
680 #define VSAT_S(NAME, BIT, E) \
681 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t desc) \
682 { \
683 int i; \
684 VReg *Vd = (VReg *)vd; \
685 VReg *Vj = (VReg *)vj; \
686 typedef __typeof(Vd->E(0)) TD; \
687 int oprsz = simd_oprsz(desc); \
688 \
689 for (i = 0; i < oprsz / (BIT / 8); i++) { \
690 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
691 Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
692 } \
693 }
694
695 VSAT_S(vsat_b, 8, B)
696 VSAT_S(vsat_h, 16, H)
697 VSAT_S(vsat_w, 32, W)
698 VSAT_S(vsat_d, 64, D)
699
700 #define VSAT_U(NAME, BIT, E) \
701 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t desc) \
702 { \
703 int i; \
704 VReg *Vd = (VReg *)vd; \
705 VReg *Vj = (VReg *)vj; \
706 typedef __typeof(Vd->E(0)) TD; \
707 int oprsz = simd_oprsz(desc); \
708 \
709 for (i = 0; i < oprsz / (BIT / 8); i++) { \
710 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
711 } \
712 }
713
714 VSAT_U(vsat_bu, 8, UB)
715 VSAT_U(vsat_hu, 16, UH)
716 VSAT_U(vsat_wu, 32, UW)
717 VSAT_U(vsat_du, 64, UD)
718
719 #define VEXTH(NAME, BIT, E1, E2) \
720 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
721 { \
722 int i, j, ofs; \
723 VReg *Vd = (VReg *)vd; \
724 VReg *Vj = (VReg *)vj; \
725 int oprsz = simd_oprsz(desc); \
726 \
727 ofs = LSX_LEN / BIT; \
728 for (i = 0; i < oprsz / 16; i++) { \
729 for (j = 0; j < ofs; j++) { \
730 Vd->E1(j + i * ofs) = Vj->E2(j + ofs + ofs * 2 * i); \
731 } \
732 } \
733 }
734
735 void HELPER(vexth_q_d)(void *vd, void *vj, uint32_t desc)
736 {
737 int i;
738 VReg *Vd = (VReg *)vd;
739 VReg *Vj = (VReg *)vj;
740 int oprsz = simd_oprsz(desc);
741
742 for (i = 0; i < oprsz / 16; i++) {
743 Vd->Q(i) = int128_makes64(Vj->D(2 * i + 1));
744 }
745 }
746
747 void HELPER(vexth_qu_du)(void *vd, void *vj, uint32_t desc)
748 {
749 int i;
750 VReg *Vd = (VReg *)vd;
751 VReg *Vj = (VReg *)vj;
752 int oprsz = simd_oprsz(desc);
753
754 for (i = 0; i < oprsz / 16; i++) {
755 Vd->Q(i) = int128_make64(Vj->UD(2 * i + 1));
756 }
757 }
758
759 VEXTH(vexth_h_b, 16, H, B)
760 VEXTH(vexth_w_h, 32, W, H)
761 VEXTH(vexth_d_w, 64, D, W)
762 VEXTH(vexth_hu_bu, 16, UH, UB)
763 VEXTH(vexth_wu_hu, 32, UW, UH)
764 VEXTH(vexth_du_wu, 64, UD, UW)
765
766 #define VEXT2XV(NAME, BIT, E1, E2) \
767 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
768 { \
769 int i; \
770 VReg temp = {}; \
771 VReg *Vd = (VReg *)vd; \
772 VReg *Vj = (VReg *)vj; \
773 int oprsz = simd_oprsz(desc); \
774 \
775 for (i = 0; i < oprsz / (BIT / 8); i++) { \
776 temp.E1(i) = Vj->E2(i); \
777 } \
778 *Vd = temp; \
779 }
780
781 VEXT2XV(vext2xv_h_b, 16, H, B)
782 VEXT2XV(vext2xv_w_b, 32, W, B)
783 VEXT2XV(vext2xv_d_b, 64, D, B)
784 VEXT2XV(vext2xv_w_h, 32, W, H)
785 VEXT2XV(vext2xv_d_h, 64, D, H)
786 VEXT2XV(vext2xv_d_w, 64, D, W)
787 VEXT2XV(vext2xv_hu_bu, 16, UH, UB)
788 VEXT2XV(vext2xv_wu_bu, 32, UW, UB)
789 VEXT2XV(vext2xv_du_bu, 64, UD, UB)
790 VEXT2XV(vext2xv_wu_hu, 32, UW, UH)
791 VEXT2XV(vext2xv_du_hu, 64, UD, UH)
792 VEXT2XV(vext2xv_du_wu, 64, UD, UW)
793
794 #define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
795
796 DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV)
797 DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV)
798 DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV)
799 DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV)
800
801 static uint64_t do_vmskltz_b(int64_t val)
802 {
803 uint64_t m = 0x8080808080808080ULL;
804 uint64_t c = val & m;
805 c |= c << 7;
806 c |= c << 14;
807 c |= c << 28;
808 return c >> 56;
809 }
810
811 void HELPER(vmskltz_b)(void *vd, void *vj, uint32_t desc)
812 {
813 int i;
814 uint16_t temp = 0;
815 VReg *Vd = (VReg *)vd;
816 VReg *Vj = (VReg *)vj;
817 int oprsz = simd_oprsz(desc);
818
819 for (i = 0; i < oprsz / 16; i++) {
820 temp = 0;
821 temp = do_vmskltz_b(Vj->D(2 * i));
822 temp |= (do_vmskltz_b(Vj->D(2 * i + 1)) << 8);
823 Vd->D(2 * i) = temp;
824 Vd->D(2 * i + 1) = 0;
825 }
826 }
827
828 static uint64_t do_vmskltz_h(int64_t val)
829 {
830 uint64_t m = 0x8000800080008000ULL;
831 uint64_t c = val & m;
832 c |= c << 15;
833 c |= c << 30;
834 return c >> 60;
835 }
836
837 void HELPER(vmskltz_h)(void *vd, void *vj, uint32_t desc)
838 {
839 int i;
840 uint16_t temp = 0;
841 VReg *Vd = (VReg *)vd;
842 VReg *Vj = (VReg *)vj;
843 int oprsz = simd_oprsz(desc);
844
845 for (i = 0; i < oprsz / 16; i++) {
846 temp = 0;
847 temp = do_vmskltz_h(Vj->D(2 * i));
848 temp |= (do_vmskltz_h(Vj->D(2 * i + 1)) << 4);
849 Vd->D(2 * i) = temp;
850 Vd->D(2 * i + 1) = 0;
851 }
852 }
853
854 static uint64_t do_vmskltz_w(int64_t val)
855 {
856 uint64_t m = 0x8000000080000000ULL;
857 uint64_t c = val & m;
858 c |= c << 31;
859 return c >> 62;
860 }
861
862 void HELPER(vmskltz_w)(void *vd, void *vj, uint32_t desc)
863 {
864 int i;
865 uint16_t temp = 0;
866 VReg *Vd = (VReg *)vd;
867 VReg *Vj = (VReg *)vj;
868 int oprsz = simd_oprsz(desc);
869
870 for (i = 0; i < oprsz / 16; i++) {
871 temp = 0;
872 temp = do_vmskltz_w(Vj->D(2 * i));
873 temp |= (do_vmskltz_w(Vj->D(2 * i + 1)) << 2);
874 Vd->D(2 * i) = temp;
875 Vd->D(2 * i + 1) = 0;
876 }
877 }
878
879 static uint64_t do_vmskltz_d(int64_t val)
880 {
881 return (uint64_t)val >> 63;
882 }
883 void HELPER(vmskltz_d)(void *vd, void *vj, uint32_t desc)
884 {
885 int i;
886 uint16_t temp = 0;
887 VReg *Vd = (VReg *)vd;
888 VReg *Vj = (VReg *)vj;
889 int oprsz = simd_oprsz(desc);
890
891 for (i = 0; i < oprsz / 16; i++) {
892 temp = 0;
893 temp = do_vmskltz_d(Vj->D(2 * i));
894 temp |= (do_vmskltz_d(Vj->D(2 * i + 1)) << 1);
895 Vd->D(2 * i) = temp;
896 Vd->D(2 * i + 1) = 0;
897 }
898 }
899
900 void HELPER(vmskgez_b)(void *vd, void *vj, uint32_t desc)
901 {
902 int i;
903 uint16_t temp = 0;
904 VReg *Vd = (VReg *)vd;
905 VReg *Vj = (VReg *)vj;
906 int oprsz = simd_oprsz(desc);
907
908 for (i = 0; i < oprsz / 16; i++) {
909 temp = 0;
910 temp = do_vmskltz_b(Vj->D(2 * i));
911 temp |= (do_vmskltz_b(Vj->D(2 * i + 1)) << 8);
912 Vd->D(2 * i) = (uint16_t)(~temp);
913 Vd->D(2 * i + 1) = 0;
914 }
915 }
916
917 static uint64_t do_vmskez_b(uint64_t a)
918 {
919 uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
920 uint64_t c = ~(((a & m) + m) | a | m);
921 c |= c << 7;
922 c |= c << 14;
923 c |= c << 28;
924 return c >> 56;
925 }
926
927 void HELPER(vmsknz_b)(void *vd, void *vj, uint32_t desc)
928 {
929 int i;
930 uint16_t temp = 0;
931 VReg *Vd = (VReg *)vd;
932 VReg *Vj = (VReg *)vj;
933 int oprsz = simd_oprsz(desc);
934
935 for (i = 0; i < oprsz / 16; i++) {
936 temp = 0;
937 temp = do_vmskez_b(Vj->D(2 * i));
938 temp |= (do_vmskez_b(Vj->D(2 * i + 1)) << 8);
939 Vd->D(2 * i) = (uint16_t)(~temp);
940 Vd->D(2 * i + 1) = 0;
941 }
942 }
943
944 void HELPER(vnori_b)(void *vd, void *vj, uint64_t imm, uint32_t desc)
945 {
946 int i;
947 VReg *Vd = (VReg *)vd;
948 VReg *Vj = (VReg *)vj;
949
950 for (i = 0; i < simd_oprsz(desc); i++) {
951 Vd->B(i) = ~(Vj->B(i) | (uint8_t)imm);
952 }
953 }
954
955 #define VSLLWIL(NAME, BIT, E1, E2) \
956 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
957 { \
958 int i, j, ofs; \
959 VReg temp = {}; \
960 VReg *Vd = (VReg *)vd; \
961 VReg *Vj = (VReg *)vj; \
962 int oprsz = simd_oprsz(desc); \
963 typedef __typeof(temp.E1(0)) TD; \
964 \
965 ofs = LSX_LEN / BIT; \
966 for (i = 0; i < oprsz / 16; i++) { \
967 for (j = 0; j < ofs; j++) { \
968 temp.E1(j + ofs * i) = (TD)Vj->E2(j + ofs * 2 * i) << (imm % BIT); \
969 } \
970 } \
971 *Vd = temp; \
972 }
973
974
975 void HELPER(vextl_q_d)(void *vd, void *vj, uint32_t desc)
976 {
977 int i;
978 VReg *Vd = (VReg *)vd;
979 VReg *Vj = (VReg *)vj;
980 int oprsz = simd_oprsz(desc);
981
982 for (i = 0; i < oprsz / 16; i++) {
983 Vd->Q(i) = int128_makes64(Vj->D(2 * i));
984 }
985 }
986
987 void HELPER(vextl_qu_du)(void *vd, void *vj, uint32_t desc)
988 {
989 int i;
990 VReg *Vd = (VReg *)vd;
991 VReg *Vj = (VReg *)vj;
992 int oprsz = simd_oprsz(desc);
993
994 for (i = 0; i < oprsz / 16; i++) {
995 Vd->Q(i) = int128_make64(Vj->UD(2 * i));
996 }
997 }
998
999 VSLLWIL(vsllwil_h_b, 16, H, B)
1000 VSLLWIL(vsllwil_w_h, 32, W, H)
1001 VSLLWIL(vsllwil_d_w, 64, D, W)
1002 VSLLWIL(vsllwil_hu_bu, 16, UH, UB)
1003 VSLLWIL(vsllwil_wu_hu, 32, UW, UH)
1004 VSLLWIL(vsllwil_du_wu, 64, UD, UW)
1005
1006 #define do_vsrlr(E, T) \
1007 static T do_vsrlr_ ##E(T s1, int sh) \
1008 { \
1009 if (sh == 0) { \
1010 return s1; \
1011 } else { \
1012 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
1013 } \
1014 }
1015
1016 do_vsrlr(B, uint8_t)
1017 do_vsrlr(H, uint16_t)
1018 do_vsrlr(W, uint32_t)
1019 do_vsrlr(D, uint64_t)
1020
1021 #define VSRLR(NAME, BIT, T, E) \
1022 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1023 { \
1024 int i; \
1025 VReg *Vd = (VReg *)vd; \
1026 VReg *Vj = (VReg *)vj; \
1027 VReg *Vk = (VReg *)vk; \
1028 int oprsz = simd_oprsz(desc); \
1029 \
1030 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1031 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
1032 } \
1033 }
1034
1035 VSRLR(vsrlr_b, 8, uint8_t, B)
1036 VSRLR(vsrlr_h, 16, uint16_t, H)
1037 VSRLR(vsrlr_w, 32, uint32_t, W)
1038 VSRLR(vsrlr_d, 64, uint64_t, D)
1039
1040 #define VSRLRI(NAME, BIT, E) \
1041 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1042 { \
1043 int i; \
1044 VReg *Vd = (VReg *)vd; \
1045 VReg *Vj = (VReg *)vj; \
1046 int oprsz = simd_oprsz(desc); \
1047 \
1048 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1049 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
1050 } \
1051 }
1052
1053 VSRLRI(vsrlri_b, 8, B)
1054 VSRLRI(vsrlri_h, 16, H)
1055 VSRLRI(vsrlri_w, 32, W)
1056 VSRLRI(vsrlri_d, 64, D)
1057
1058 #define do_vsrar(E, T) \
1059 static T do_vsrar_ ##E(T s1, int sh) \
1060 { \
1061 if (sh == 0) { \
1062 return s1; \
1063 } else { \
1064 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
1065 } \
1066 }
1067
1068 do_vsrar(B, int8_t)
1069 do_vsrar(H, int16_t)
1070 do_vsrar(W, int32_t)
1071 do_vsrar(D, int64_t)
1072
1073 #define VSRAR(NAME, BIT, T, E) \
1074 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1075 { \
1076 int i; \
1077 VReg *Vd = (VReg *)vd; \
1078 VReg *Vj = (VReg *)vj; \
1079 VReg *Vk = (VReg *)vk; \
1080 int oprsz = simd_oprsz(desc); \
1081 \
1082 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1083 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
1084 } \
1085 }
1086
1087 VSRAR(vsrar_b, 8, uint8_t, B)
1088 VSRAR(vsrar_h, 16, uint16_t, H)
1089 VSRAR(vsrar_w, 32, uint32_t, W)
1090 VSRAR(vsrar_d, 64, uint64_t, D)
1091
1092 #define VSRARI(NAME, BIT, E) \
1093 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1094 { \
1095 int i; \
1096 VReg *Vd = (VReg *)vd; \
1097 VReg *Vj = (VReg *)vj; \
1098 int oprsz = simd_oprsz(desc); \
1099 \
1100 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1101 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
1102 } \
1103 }
1104
1105 VSRARI(vsrari_b, 8, B)
1106 VSRARI(vsrari_h, 16, H)
1107 VSRARI(vsrari_w, 32, W)
1108 VSRARI(vsrari_d, 64, D)
1109
1110 #define R_SHIFT(a, b) (a >> b)
1111
1112 #define VSRLN(NAME, BIT, E1, E2) \
1113 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1114 { \
1115 int i, j, ofs; \
1116 VReg *Vd = (VReg *)vd; \
1117 VReg *Vj = (VReg *)vj; \
1118 VReg *Vk = (VReg *)vk; \
1119 int oprsz = simd_oprsz(desc); \
1120 \
1121 ofs = LSX_LEN / BIT; \
1122 for (i = 0; i < oprsz / 16; i++) { \
1123 for (j = 0; j < ofs; j++) { \
1124 Vd->E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), \
1125 Vk->E2(j + ofs * i) % BIT); \
1126 } \
1127 Vd->D(2 * i + 1) = 0; \
1128 } \
1129 }
1130
1131 VSRLN(vsrln_b_h, 16, B, UH)
1132 VSRLN(vsrln_h_w, 32, H, UW)
1133 VSRLN(vsrln_w_d, 64, W, UD)
1134
1135 #define VSRAN(NAME, BIT, E1, E2, E3) \
1136 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1137 { \
1138 int i, j, ofs; \
1139 VReg *Vd = (VReg *)vd; \
1140 VReg *Vj = (VReg *)vj; \
1141 VReg *Vk = (VReg *)vk; \
1142 int oprsz = simd_oprsz(desc); \
1143 \
1144 ofs = LSX_LEN / BIT; \
1145 for (i = 0; i < oprsz / 16; i++) { \
1146 for (j = 0; j < ofs; j++) { \
1147 Vd->E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), \
1148 Vk->E3(j + ofs * i) % BIT); \
1149 } \
1150 Vd->D(2 * i + 1) = 0; \
1151 } \
1152 }
1153
1154 VSRAN(vsran_b_h, 16, B, H, UH)
1155 VSRAN(vsran_h_w, 32, H, W, UW)
1156 VSRAN(vsran_w_d, 64, W, D, UD)
1157
1158 #define VSRLNI(NAME, BIT, E1, E2) \
1159 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1160 { \
1161 int i, j, ofs; \
1162 VReg temp = {}; \
1163 VReg *Vd = (VReg *)vd; \
1164 VReg *Vj = (VReg *)vj; \
1165 int oprsz = simd_oprsz(desc); \
1166 \
1167 ofs = LSX_LEN / BIT; \
1168 for (i = 0; i < oprsz / 16; i++) { \
1169 for (j = 0; j < ofs; j++) { \
1170 temp.E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), imm); \
1171 temp.E1(j + ofs * (2 * i + 1)) = R_SHIFT(Vd->E2(j + ofs * i), \
1172 imm); \
1173 } \
1174 } \
1175 *Vd = temp; \
1176 }
1177
1178 void HELPER(vsrlni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1179 {
1180 int i;
1181 VReg temp = {};
1182 VReg *Vd = (VReg *)vd;
1183 VReg *Vj = (VReg *)vj;
1184
1185 for (i = 0; i < 2; i++) {
1186 temp.D(2 * i) = int128_getlo(int128_urshift(Vj->Q(i), imm % 128));
1187 temp.D(2 * i +1) = int128_getlo(int128_urshift(Vd->Q(i), imm % 128));
1188 }
1189 *Vd = temp;
1190 }
1191
1192 VSRLNI(vsrlni_b_h, 16, B, UH)
1193 VSRLNI(vsrlni_h_w, 32, H, UW)
1194 VSRLNI(vsrlni_w_d, 64, W, UD)
1195
1196 #define VSRANI(NAME, BIT, E1, E2) \
1197 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1198 { \
1199 int i, j, ofs; \
1200 VReg temp = {}; \
1201 VReg *Vd = (VReg *)vd; \
1202 VReg *Vj = (VReg *)vj; \
1203 int oprsz = simd_oprsz(desc); \
1204 \
1205 ofs = LSX_LEN / BIT; \
1206 for (i = 0; i < oprsz / 16; i++) { \
1207 for (j = 0; j < ofs; j++) { \
1208 temp.E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), imm); \
1209 temp.E1(j + ofs * (2 * i + 1)) = R_SHIFT(Vd->E2(j + ofs * i), \
1210 imm); \
1211 } \
1212 } \
1213 *Vd = temp; \
1214 }
1215
1216 void HELPER(vsrani_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1217 {
1218 int i;
1219 VReg temp = {};
1220 VReg *Vd = (VReg *)vd;
1221 VReg *Vj = (VReg *)vj;
1222
1223 for (i = 0; i < 2; i++) {
1224 temp.D(2 * i) = int128_getlo(int128_rshift(Vj->Q(i), imm % 128));
1225 temp.D(2 * i + 1) = int128_getlo(int128_rshift(Vd->Q(i), imm % 128));
1226 }
1227 *Vd = temp;
1228 }
1229
1230 VSRANI(vsrani_b_h, 16, B, H)
1231 VSRANI(vsrani_h_w, 32, H, W)
1232 VSRANI(vsrani_w_d, 64, W, D)
1233
1234 #define VSRLRN(NAME, BIT, E1, E2, E3) \
1235 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1236 { \
1237 int i, j, ofs; \
1238 VReg *Vd = (VReg *)vd; \
1239 VReg *Vj = (VReg *)vj; \
1240 VReg *Vk = (VReg *)vk; \
1241 int oprsz = simd_oprsz(desc); \
1242 \
1243 ofs = LSX_LEN / BIT; \
1244 for (i = 0; i < oprsz / 16; i++) { \
1245 for (j = 0; j < ofs; j++) { \
1246 Vd->E1(j + ofs * 2 * i) = do_vsrlr_ ##E2(Vj->E2(j + ofs * i), \
1247 Vk->E3(j + ofs * i) % BIT); \
1248 } \
1249 Vd->D(2 * i + 1) = 0; \
1250 } \
1251 }
1252
1253 VSRLRN(vsrlrn_b_h, 16, B, H, UH)
1254 VSRLRN(vsrlrn_h_w, 32, H, W, UW)
1255 VSRLRN(vsrlrn_w_d, 64, W, D, UD)
1256
1257 #define VSRARN(NAME, BIT, E1, E2, E3) \
1258 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1259 { \
1260 int i, j, ofs; \
1261 VReg *Vd = (VReg *)vd; \
1262 VReg *Vj = (VReg *)vj; \
1263 VReg *Vk = (VReg *)vk; \
1264 int oprsz = simd_oprsz(desc); \
1265 \
1266 ofs = LSX_LEN / BIT; \
1267 for (i = 0; i < oprsz / 16; i++) { \
1268 for (j = 0; j < ofs; j++) { \
1269 Vd->E1(j + ofs * 2 * i) = do_vsrar_ ## E2(Vj->E2(j + ofs * i), \
1270 Vk->E3(j + ofs * i) % BIT); \
1271 } \
1272 Vd->D(2 * i + 1) = 0; \
1273 } \
1274 }
1275
1276 VSRARN(vsrarn_b_h, 16, B, H, UH)
1277 VSRARN(vsrarn_h_w, 32, H, W, UW)
1278 VSRARN(vsrarn_w_d, 64, W, D, UD)
1279
1280 #define VSRLRNI(NAME, BIT, E1, E2) \
1281 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1282 { \
1283 int i, j, ofs; \
1284 VReg temp = {}; \
1285 VReg *Vd = (VReg *)vd; \
1286 VReg *Vj = (VReg *)vj; \
1287 int oprsz = simd_oprsz(desc); \
1288 \
1289 ofs = LSX_LEN / BIT; \
1290 for (i = 0; i < oprsz / 16; i++) { \
1291 for (j = 0; j < ofs; j++) { \
1292 temp.E1(j + ofs * 2 * i) = do_vsrlr_ ## E2(Vj->E2(j + ofs * i), imm); \
1293 temp.E1(j + ofs * (2 * i + 1)) = do_vsrlr_ ## E2(Vd->E2(j + ofs * i), \
1294 imm); \
1295 } \
1296 } \
1297 *Vd = temp; \
1298 }
1299
1300 void HELPER(vsrlrni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1301 {
1302 int i;
1303 VReg temp = {};
1304 VReg *Vd = (VReg *)vd;
1305 VReg *Vj = (VReg *)vj;
1306 Int128 r[4];
1307 int oprsz = simd_oprsz(desc);
1308
1309 for (i = 0; i < oprsz / 16; i++) {
1310 if (imm == 0) {
1311 temp.D(2 * i) = int128_getlo(Vj->Q(i));
1312 temp.D(2 * i + 1) = int128_getlo(Vd->Q(i));
1313 } else {
1314 r[2 * i] = int128_and(int128_urshift(Vj->Q(i), (imm - 1)),
1315 int128_one());
1316 r[2 * i + 1] = int128_and(int128_urshift(Vd->Q(i), (imm - 1)),
1317 int128_one());
1318 temp.D(2 * i) = int128_getlo(int128_add(int128_urshift(Vj->Q(i),
1319 imm), r[2 * i]));
1320 temp.D(2 * i + 1) = int128_getlo(int128_add(int128_urshift(Vd->Q(i),
1321 imm), r[ 2 * i + 1]));
1322 }
1323 }
1324 *Vd = temp;
1325 }
1326
1327 VSRLRNI(vsrlrni_b_h, 16, B, H)
1328 VSRLRNI(vsrlrni_h_w, 32, H, W)
1329 VSRLRNI(vsrlrni_w_d, 64, W, D)
1330
1331 #define VSRARNI(NAME, BIT, E1, E2) \
1332 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1333 { \
1334 int i, j, ofs; \
1335 VReg temp = {}; \
1336 VReg *Vd = (VReg *)vd; \
1337 VReg *Vj = (VReg *)vj; \
1338 int oprsz = simd_oprsz(desc); \
1339 \
1340 ofs = LSX_LEN / BIT; \
1341 for (i = 0; i < oprsz / 16; i++) { \
1342 for (j = 0; j < ofs; j++) { \
1343 temp.E1(j + ofs * 2 * i) = do_vsrar_ ## E2(Vj->E2(j + ofs * i), imm); \
1344 temp.E1(j + ofs * (2 * i + 1)) = do_vsrar_ ## E2(Vd->E2(j + ofs * i), \
1345 imm); \
1346 } \
1347 } \
1348 *Vd = temp; \
1349 }
1350
1351 void HELPER(vsrarni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1352 {
1353 int i;
1354 VReg temp = {};
1355 VReg *Vd = (VReg *)vd;
1356 VReg *Vj = (VReg *)vj;
1357 Int128 r[4];
1358 int oprsz = simd_oprsz(desc);
1359
1360 for (i = 0; i < oprsz / 16; i++) {
1361 if (imm == 0) {
1362 temp.D(2 * i) = int128_getlo(Vj->Q(i));
1363 temp.D(2 * i + 1) = int128_getlo(Vd->Q(i));
1364 } else {
1365 r[2 * i] = int128_and(int128_rshift(Vj->Q(i), (imm - 1)),
1366 int128_one());
1367 r[2 * i + 1] = int128_and(int128_rshift(Vd->Q(i), (imm - 1)),
1368 int128_one());
1369 temp.D(2 * i) = int128_getlo(int128_add(int128_rshift(Vj->Q(i),
1370 imm), r[2 * i]));
1371 temp.D(2 * i + 1) = int128_getlo(int128_add(int128_rshift(Vd->Q(i),
1372 imm), r[2 * i + 1]));
1373 }
1374 }
1375 *Vd = temp;
1376 }
1377
1378 VSRARNI(vsrarni_b_h, 16, B, H)
1379 VSRARNI(vsrarni_h_w, 32, H, W)
1380 VSRARNI(vsrarni_w_d, 64, W, D)
1381
1382 #define SSRLNS(NAME, T1, T2, T3) \
1383 static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \
1384 { \
1385 T1 shft_res; \
1386 if (sa == 0) { \
1387 shft_res = e2; \
1388 } else { \
1389 shft_res = (((T1)e2) >> sa); \
1390 } \
1391 T3 mask; \
1392 mask = (1ull << sh) -1; \
1393 if (shft_res > mask) { \
1394 return mask; \
1395 } else { \
1396 return shft_res; \
1397 } \
1398 }
1399
1400 SSRLNS(B, uint16_t, int16_t, uint8_t)
1401 SSRLNS(H, uint32_t, int32_t, uint16_t)
1402 SSRLNS(W, uint64_t, int64_t, uint32_t)
1403
1404 #define VSSRLN(NAME, BIT, E1, E2, E3) \
1405 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1406 { \
1407 int i, j, ofs; \
1408 VReg *Vd = (VReg *)vd; \
1409 VReg *Vj = (VReg *)vj; \
1410 VReg *Vk = (VReg *)vk; \
1411 int oprsz = simd_oprsz(desc); \
1412 \
1413 ofs = LSX_LEN / BIT; \
1414 for (i = 0; i < oprsz / 16; i++) { \
1415 for (j = 0; j < ofs; j++) { \
1416 Vd->E1(j + ofs * 2 * i) = do_ssrlns_ ## E1(Vj->E2(j + ofs * i), \
1417 Vk->E3(j + ofs * i) % BIT, \
1418 BIT / 2 - 1); \
1419 } \
1420 Vd->D(2 * i + 1) = 0; \
1421 } \
1422 }
1423
1424 VSSRLN(vssrln_b_h, 16, B, H, UH)
1425 VSSRLN(vssrln_h_w, 32, H, W, UW)
1426 VSSRLN(vssrln_w_d, 64, W, D, UD)
1427
1428 #define SSRANS(E, T1, T2) \
1429 static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \
1430 { \
1431 T1 shft_res; \
1432 if (sa == 0) { \
1433 shft_res = e2; \
1434 } else { \
1435 shft_res = e2 >> sa; \
1436 } \
1437 T2 mask; \
1438 mask = (1ll << sh) - 1; \
1439 if (shft_res > mask) { \
1440 return mask; \
1441 } else if (shft_res < -(mask + 1)) { \
1442 return ~mask; \
1443 } else { \
1444 return shft_res; \
1445 } \
1446 }
1447
1448 SSRANS(B, int16_t, int8_t)
1449 SSRANS(H, int32_t, int16_t)
1450 SSRANS(W, int64_t, int32_t)
1451
1452 #define VSSRAN(NAME, BIT, E1, E2, E3) \
1453 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1454 { \
1455 int i, j, ofs; \
1456 VReg *Vd = (VReg *)vd; \
1457 VReg *Vj = (VReg *)vj; \
1458 VReg *Vk = (VReg *)vk; \
1459 int oprsz = simd_oprsz(desc); \
1460 \
1461 ofs = LSX_LEN / BIT; \
1462 for (i = 0; i < oprsz / 16; i++) { \
1463 for (j = 0; j < ofs; j++) { \
1464 Vd->E1(j + ofs * 2 * i) = do_ssrans_ ## E1(Vj->E2(j + ofs * i), \
1465 Vk->E3(j + ofs * i) % BIT, \
1466 BIT / 2 - 1); \
1467 } \
1468 Vd->D(2 * i + 1) = 0; \
1469 } \
1470 }
1471
1472 VSSRAN(vssran_b_h, 16, B, H, UH)
1473 VSSRAN(vssran_h_w, 32, H, W, UW)
1474 VSSRAN(vssran_w_d, 64, W, D, UD)
1475
1476 #define SSRLNU(E, T1, T2, T3) \
1477 static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \
1478 { \
1479 T1 shft_res; \
1480 if (sa == 0) { \
1481 shft_res = e2; \
1482 } else { \
1483 shft_res = (((T1)e2) >> sa); \
1484 } \
1485 T2 mask; \
1486 mask = (1ull << sh) - 1; \
1487 if (shft_res > mask) { \
1488 return mask; \
1489 } else { \
1490 return shft_res; \
1491 } \
1492 }
1493
1494 SSRLNU(B, uint16_t, uint8_t, int16_t)
1495 SSRLNU(H, uint32_t, uint16_t, int32_t)
1496 SSRLNU(W, uint64_t, uint32_t, int64_t)
1497
1498 #define VSSRLNU(NAME, BIT, E1, E2, E3) \
1499 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1500 { \
1501 int i, j, ofs; \
1502 VReg *Vd = (VReg *)vd; \
1503 VReg *Vj = (VReg *)vj; \
1504 VReg *Vk = (VReg *)vk; \
1505 int oprsz = simd_oprsz(desc); \
1506 \
1507 ofs = LSX_LEN / BIT; \
1508 for (i = 0; i < oprsz / 16; i++) { \
1509 for (j = 0; j < ofs; j++) { \
1510 Vd->E1(j + ofs * 2 * i) = do_ssrlnu_ ## E1(Vj->E2(j + ofs * i), \
1511 Vk->E3(j + ofs * i) % BIT, \
1512 BIT / 2); \
1513 } \
1514 Vd->D(2 * i + 1) = 0; \
1515 } \
1516 }
1517
1518 VSSRLNU(vssrln_bu_h, 16, B, H, UH)
1519 VSSRLNU(vssrln_hu_w, 32, H, W, UW)
1520 VSSRLNU(vssrln_wu_d, 64, W, D, UD)
1521
1522 #define SSRANU(E, T1, T2, T3) \
1523 static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \
1524 { \
1525 T1 shft_res; \
1526 if (sa == 0) { \
1527 shft_res = e2; \
1528 } else { \
1529 shft_res = e2 >> sa; \
1530 } \
1531 if (e2 < 0) { \
1532 shft_res = 0; \
1533 } \
1534 T2 mask; \
1535 mask = (1ull << sh) - 1; \
1536 if (shft_res > mask) { \
1537 return mask; \
1538 } else { \
1539 return shft_res; \
1540 } \
1541 }
1542
1543 SSRANU(B, uint16_t, uint8_t, int16_t)
1544 SSRANU(H, uint32_t, uint16_t, int32_t)
1545 SSRANU(W, uint64_t, uint32_t, int64_t)
1546
1547 #define VSSRANU(NAME, BIT, E1, E2, E3) \
1548 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1549 { \
1550 int i, j, ofs; \
1551 VReg *Vd = (VReg *)vd; \
1552 VReg *Vj = (VReg *)vj; \
1553 VReg *Vk = (VReg *)vk; \
1554 int oprsz = simd_oprsz(desc); \
1555 \
1556 ofs = LSX_LEN / BIT; \
1557 for (i = 0; i < oprsz / 16; i++) { \
1558 for (j = 0; j < ofs; j++) { \
1559 Vd->E1(j + ofs * 2 * i) = do_ssranu_ ## E1(Vj->E2(j + ofs * i), \
1560 Vk->E3(j + ofs * i) % BIT, \
1561 BIT / 2); \
1562 } \
1563 Vd->D(2 * i + 1) = 0; \
1564 } \
1565 }
1566
1567 VSSRANU(vssran_bu_h, 16, B, H, UH)
1568 VSSRANU(vssran_hu_w, 32, H, W, UW)
1569 VSSRANU(vssran_wu_d, 64, W, D, UD)
1570
1571 #define VSSRLNI(NAME, BIT, E1, E2) \
1572 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1573 { \
1574 int i, j, ofs; \
1575 VReg temp = {}; \
1576 VReg *Vd = (VReg *)vd; \
1577 VReg *Vj = (VReg *)vj; \
1578 int oprsz = simd_oprsz(desc); \
1579 \
1580 ofs = LSX_LEN / BIT; \
1581 for (i = 0; i < oprsz / 16; i++) { \
1582 for (j = 0; j < ofs; j++) { \
1583 temp.E1(j + ofs * 2 * i) = do_ssrlns_ ## E1(Vj->E2(j + ofs * i), \
1584 imm, BIT / 2 - 1); \
1585 temp.E1(j + ofs * (2 * i + 1)) = do_ssrlns_ ## E1(Vd->E2(j + ofs * i), \
1586 imm, BIT / 2 - 1); \
1587 } \
1588 } \
1589 *Vd = temp; \
1590 }
1591
1592 static void do_vssrlni_q(VReg *Vd, VReg *Vj,
1593 uint64_t imm, int idx, Int128 mask)
1594 {
1595 Int128 shft_res1, shft_res2;
1596
1597 if (imm == 0) {
1598 shft_res1 = Vj->Q(idx);
1599 shft_res2 = Vd->Q(idx);
1600 } else {
1601 shft_res1 = int128_urshift(Vj->Q(idx), imm);
1602 shft_res2 = int128_urshift(Vd->Q(idx), imm);
1603 }
1604
1605 if (int128_ult(mask, shft_res1)) {
1606 Vd->D(idx * 2) = int128_getlo(mask);
1607 }else {
1608 Vd->D(idx * 2) = int128_getlo(shft_res1);
1609 }
1610
1611 if (int128_ult(mask, shft_res2)) {
1612 Vd->D(idx * 2 + 1) = int128_getlo(mask);
1613 }else {
1614 Vd->D(idx * 2 + 1) = int128_getlo(shft_res2);
1615 }
1616 }
1617
1618 void HELPER(vssrlni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1619 {
1620 int i;
1621 Int128 mask;
1622 VReg *Vd = (VReg *)vd;
1623 VReg *Vj = (VReg *)vj;
1624 int oprsz = simd_oprsz(desc);
1625
1626 mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
1627
1628 for (i = 0; i < oprsz / 16; i++) {
1629 do_vssrlni_q(Vd, Vj, imm, i, mask);
1630 }
1631 }
1632
1633 VSSRLNI(vssrlni_b_h, 16, B, H)
1634 VSSRLNI(vssrlni_h_w, 32, H, W)
1635 VSSRLNI(vssrlni_w_d, 64, W, D)
1636
1637 #define VSSRANI(NAME, BIT, E1, E2) \
1638 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1639 { \
1640 int i, j, ofs; \
1641 VReg temp = {}; \
1642 VReg *Vd = (VReg *)vd; \
1643 VReg *Vj = (VReg *)vj; \
1644 int oprsz = simd_oprsz(desc); \
1645 \
1646 ofs = LSX_LEN / BIT; \
1647 for (i = 0; i < oprsz / 16; i++) { \
1648 for (j = 0; j < ofs; j++) { \
1649 temp.E1(j + ofs * 2 * i) = do_ssrans_ ## E1(Vj->E2(j + ofs * i), \
1650 imm, BIT / 2 - 1); \
1651 temp.E1(j + ofs * (2 * i + 1)) = do_ssrans_ ## E1(Vd->E2(j + ofs * i), \
1652 imm, BIT / 2 - 1); \
1653 } \
1654 } \
1655 *Vd = temp; \
1656 }
1657
1658 static void do_vssrani_d_q(VReg *Vd, VReg *Vj,
1659 uint64_t imm, int idx, Int128 mask, Int128 min)
1660 {
1661 Int128 shft_res1, shft_res2;
1662
1663 if (imm == 0) {
1664 shft_res1 = Vj->Q(idx);
1665 shft_res2 = Vd->Q(idx);
1666 } else {
1667 shft_res1 = int128_rshift(Vj->Q(idx), imm);
1668 shft_res2 = int128_rshift(Vd->Q(idx), imm);
1669 }
1670
1671 if (int128_gt(shft_res1, mask)) {
1672 Vd->D(idx * 2) = int128_getlo(mask);
1673 } else if (int128_lt(shft_res1, int128_neg(min))) {
1674 Vd->D(idx * 2) = int128_getlo(min);
1675 } else {
1676 Vd->D(idx * 2) = int128_getlo(shft_res1);
1677 }
1678
1679 if (int128_gt(shft_res2, mask)) {
1680 Vd->D(idx * 2 + 1) = int128_getlo(mask);
1681 } else if (int128_lt(shft_res2, int128_neg(min))) {
1682 Vd->D(idx * 2 + 1) = int128_getlo(min);
1683 } else {
1684 Vd->D(idx * 2 + 1) = int128_getlo(shft_res2);
1685 }
1686 }
1687
1688 void HELPER(vssrani_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1689 {
1690 int i;
1691 Int128 mask, min;
1692 VReg *Vd = (VReg *)vd;
1693 VReg *Vj = (VReg *)vj;
1694 int oprsz = simd_oprsz(desc);
1695
1696 mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
1697 min = int128_lshift(int128_one(), 63);
1698
1699 for (i = 0; i < oprsz / 16; i++) {
1700 do_vssrani_d_q(Vd, Vj, imm, i, mask, min);
1701 }
1702 }
1703
1704
1705 VSSRANI(vssrani_b_h, 16, B, H)
1706 VSSRANI(vssrani_h_w, 32, H, W)
1707 VSSRANI(vssrani_w_d, 64, W, D)
1708
1709 #define VSSRLNUI(NAME, BIT, E1, E2) \
1710 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1711 { \
1712 int i, j, ofs; \
1713 VReg temp = {}; \
1714 VReg *Vd = (VReg *)vd; \
1715 VReg *Vj = (VReg *)vj; \
1716 int oprsz = simd_oprsz(desc); \
1717 \
1718 ofs = LSX_LEN / BIT; \
1719 for (i = 0; i < oprsz / 16; i++) { \
1720 for (j = 0; j < ofs; j++) { \
1721 temp.E1(j + ofs * 2 * i) = do_ssrlnu_ ## E1(Vj->E2(j + ofs * i), \
1722 imm, BIT / 2); \
1723 temp.E1(j + ofs * (2 * i + 1)) = do_ssrlnu_ ## E1(Vd->E2(j + ofs * i), \
1724 imm, BIT / 2); \
1725 } \
1726 } \
1727 *Vd = temp; \
1728 }
1729
1730 void HELPER(vssrlni_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1731 {
1732 int i;
1733 Int128 mask;
1734 VReg *Vd = (VReg *)vd;
1735 VReg *Vj = (VReg *)vj;
1736 int oprsz = simd_oprsz(desc);
1737
1738 mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
1739
1740 for (i = 0; i < oprsz / 16; i++) {
1741 do_vssrlni_q(Vd, Vj, imm, i, mask);
1742 }
1743 }
1744
1745 VSSRLNUI(vssrlni_bu_h, 16, B, H)
1746 VSSRLNUI(vssrlni_hu_w, 32, H, W)
1747 VSSRLNUI(vssrlni_wu_d, 64, W, D)
1748
1749 #define VSSRANUI(NAME, BIT, E1, E2) \
1750 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1751 { \
1752 int i, j, ofs; \
1753 VReg temp = {}; \
1754 VReg *Vd = (VReg *)vd; \
1755 VReg *Vj = (VReg *)vj; \
1756 int oprsz = simd_oprsz(desc); \
1757 \
1758 ofs = LSX_LEN / BIT; \
1759 for (i = 0; i < oprsz / 16; i++) { \
1760 for (j = 0; j < ofs; j++) { \
1761 temp.E1(j + ofs * 2 * i) = do_ssranu_ ## E1(Vj->E2(j + ofs * i), \
1762 imm, BIT / 2); \
1763 temp.E1(j + ofs * (2 * i + 1)) = do_ssranu_ ## E1(Vd->E2(j + ofs * i), \
1764 imm, BIT / 2); \
1765 } \
1766 } \
1767 *Vd = temp; \
1768 }
1769
1770 static void do_vssrani_du_q(VReg *Vd, VReg *Vj,
1771 uint64_t imm, int idx, Int128 mask)
1772 {
1773 Int128 shft_res1, shft_res2;
1774
1775 if (imm == 0) {
1776 shft_res1 = Vj->Q(idx);
1777 shft_res2 = Vd->Q(idx);
1778 } else {
1779 shft_res1 = int128_rshift(Vj->Q(idx), imm);
1780 shft_res2 = int128_rshift(Vd->Q(idx), imm);
1781 }
1782
1783 if (int128_lt(Vj->Q(idx), int128_zero())) {
1784 shft_res1 = int128_zero();
1785 }
1786
1787 if (int128_lt(Vd->Q(idx), int128_zero())) {
1788 shft_res2 = int128_zero();
1789 }
1790 if (int128_ult(mask, shft_res1)) {
1791 Vd->D(idx * 2) = int128_getlo(mask);
1792 }else {
1793 Vd->D(idx * 2) = int128_getlo(shft_res1);
1794 }
1795
1796 if (int128_ult(mask, shft_res2)) {
1797 Vd->D(idx * 2 + 1) = int128_getlo(mask);
1798 }else {
1799 Vd->D(idx * 2 + 1) = int128_getlo(shft_res2);
1800 }
1801
1802 }
1803
1804 void HELPER(vssrani_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
1805 {
1806 int i;
1807 Int128 mask;
1808 VReg *Vd = (VReg *)vd;
1809 VReg *Vj = (VReg *)vj;
1810 int oprsz = simd_oprsz(desc);
1811
1812 mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
1813
1814 for (i = 0; i < oprsz / 16; i++) {
1815 do_vssrani_du_q(Vd, Vj, imm, i, mask);
1816 }
1817 }
1818
1819 VSSRANUI(vssrani_bu_h, 16, B, H)
1820 VSSRANUI(vssrani_hu_w, 32, H, W)
1821 VSSRANUI(vssrani_wu_d, 64, W, D)
1822
1823 #define SSRLRNS(E1, E2, T1, T2, T3) \
1824 static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \
1825 { \
1826 T1 shft_res; \
1827 \
1828 shft_res = do_vsrlr_ ## E2(e2, sa); \
1829 T1 mask; \
1830 mask = (1ull << sh) - 1; \
1831 if (shft_res > mask) { \
1832 return mask; \
1833 } else { \
1834 return shft_res; \
1835 } \
1836 }
1837
1838 SSRLRNS(B, H, uint16_t, int16_t, uint8_t)
1839 SSRLRNS(H, W, uint32_t, int32_t, uint16_t)
1840 SSRLRNS(W, D, uint64_t, int64_t, uint32_t)
1841
1842 #define VSSRLRN(NAME, BIT, E1, E2, E3) \
1843 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1844 { \
1845 int i, j, ofs; \
1846 VReg *Vd = (VReg *)vd; \
1847 VReg *Vj = (VReg *)vj; \
1848 VReg *Vk = (VReg *)vk; \
1849 int oprsz = simd_oprsz(desc); \
1850 \
1851 ofs = LSX_LEN / BIT; \
1852 for (i = 0; i < oprsz / 16; i++) { \
1853 for (j = 0; j < ofs; j++) { \
1854 Vd->E1(j + ofs * 2 * i) = do_ssrlrns_ ## E1(Vj->E2(j + ofs * i), \
1855 Vk->E3(j + ofs * i) % BIT, \
1856 BIT / 2 - 1); \
1857 } \
1858 Vd->D(2 * i + 1) = 0; \
1859 } \
1860 }
1861
1862 VSSRLRN(vssrlrn_b_h, 16, B, H, UH)
1863 VSSRLRN(vssrlrn_h_w, 32, H, W, UW)
1864 VSSRLRN(vssrlrn_w_d, 64, W, D, UD)
1865
1866 #define SSRARNS(E1, E2, T1, T2) \
1867 static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \
1868 { \
1869 T1 shft_res; \
1870 \
1871 shft_res = do_vsrar_ ## E2(e2, sa); \
1872 T2 mask; \
1873 mask = (1ll << sh) - 1; \
1874 if (shft_res > mask) { \
1875 return mask; \
1876 } else if (shft_res < -(mask +1)) { \
1877 return ~mask; \
1878 } else { \
1879 return shft_res; \
1880 } \
1881 }
1882
1883 SSRARNS(B, H, int16_t, int8_t)
1884 SSRARNS(H, W, int32_t, int16_t)
1885 SSRARNS(W, D, int64_t, int32_t)
1886
1887 #define VSSRARN(NAME, BIT, E1, E2, E3) \
1888 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1889 { \
1890 int i, j, ofs; \
1891 VReg *Vd = (VReg *)vd; \
1892 VReg *Vj = (VReg *)vj; \
1893 VReg *Vk = (VReg *)vk; \
1894 int oprsz = simd_oprsz(desc); \
1895 \
1896 ofs = LSX_LEN / BIT; \
1897 for (i = 0; i < oprsz / 16; i++) { \
1898 for (j = 0; j < ofs; j++) { \
1899 Vd->E1(j + ofs * 2 * i) = do_ssrarns_ ## E1(Vj->E2(j + ofs * i), \
1900 Vk->E3(j + ofs * i) % BIT, \
1901 BIT/ 2 - 1); \
1902 } \
1903 Vd->D(2 * i + 1) = 0; \
1904 } \
1905 }
1906
1907 VSSRARN(vssrarn_b_h, 16, B, H, UH)
1908 VSSRARN(vssrarn_h_w, 32, H, W, UW)
1909 VSSRARN(vssrarn_w_d, 64, W, D, UD)
1910
1911 #define SSRLRNU(E1, E2, T1, T2, T3) \
1912 static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \
1913 { \
1914 T1 shft_res; \
1915 \
1916 shft_res = do_vsrlr_ ## E2(e2, sa); \
1917 \
1918 T2 mask; \
1919 mask = (1ull << sh) - 1; \
1920 if (shft_res > mask) { \
1921 return mask; \
1922 } else { \
1923 return shft_res; \
1924 } \
1925 }
1926
1927 SSRLRNU(B, H, uint16_t, uint8_t, int16_t)
1928 SSRLRNU(H, W, uint32_t, uint16_t, int32_t)
1929 SSRLRNU(W, D, uint64_t, uint32_t, int64_t)
1930
1931 #define VSSRLRNU(NAME, BIT, E1, E2, E3) \
1932 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1933 { \
1934 int i, j, ofs; \
1935 VReg *Vd = (VReg *)vd; \
1936 VReg *Vj = (VReg *)vj; \
1937 VReg *Vk = (VReg *)vk; \
1938 int oprsz = simd_oprsz(desc); \
1939 \
1940 ofs = LSX_LEN / BIT; \
1941 for (i = 0; i < oprsz / 16; i++) { \
1942 for (j = 0; j < ofs; j++) { \
1943 Vd->E1(j + ofs * 2 * i) = do_ssrlrnu_ ## E1(Vj->E2(j + ofs * i), \
1944 Vk->E3(j + ofs * i) % BIT, \
1945 BIT / 2); \
1946 } \
1947 Vd->D(2 * i + 1) = 0; \
1948 } \
1949 }
1950
1951 VSSRLRNU(vssrlrn_bu_h, 16, B, H, UH)
1952 VSSRLRNU(vssrlrn_hu_w, 32, H, W, UW)
1953 VSSRLRNU(vssrlrn_wu_d, 64, W, D, UD)
1954
1955 #define SSRARNU(E1, E2, T1, T2, T3) \
1956 static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \
1957 { \
1958 T1 shft_res; \
1959 \
1960 if (e2 < 0) { \
1961 shft_res = 0; \
1962 } else { \
1963 shft_res = do_vsrar_ ## E2(e2, sa); \
1964 } \
1965 T2 mask; \
1966 mask = (1ull << sh) - 1; \
1967 if (shft_res > mask) { \
1968 return mask; \
1969 } else { \
1970 return shft_res; \
1971 } \
1972 }
1973
1974 SSRARNU(B, H, uint16_t, uint8_t, int16_t)
1975 SSRARNU(H, W, uint32_t, uint16_t, int32_t)
1976 SSRARNU(W, D, uint64_t, uint32_t, int64_t)
1977
1978 #define VSSRARNU(NAME, BIT, E1, E2, E3) \
1979 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1980 { \
1981 int i, j, ofs; \
1982 VReg *Vd = (VReg *)vd; \
1983 VReg *Vj = (VReg *)vj; \
1984 VReg *Vk = (VReg *)vk; \
1985 int oprsz = simd_oprsz(desc); \
1986 \
1987 ofs = LSX_LEN / BIT; \
1988 for (i = 0; i < oprsz / 16; i++) { \
1989 for (j = 0; j < ofs; j++) { \
1990 Vd->E1(j + ofs * 2 * i) = do_ssrarnu_ ## E1(Vj->E2(j + ofs * i), \
1991 Vk->E3(j + ofs * i) % BIT, \
1992 BIT / 2); \
1993 } \
1994 Vd->D(2 * i + 1) = 0; \
1995 } \
1996 }
1997
1998 VSSRARNU(vssrarn_bu_h, 16, B, H, UH)
1999 VSSRARNU(vssrarn_hu_w, 32, H, W, UW)
2000 VSSRARNU(vssrarn_wu_d, 64, W, D, UD)
2001
2002 #define VSSRLRNI(NAME, BIT, E1, E2) \
2003 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2004 { \
2005 int i, j, ofs; \
2006 VReg temp = {}; \
2007 VReg *Vd = (VReg *)vd; \
2008 VReg *Vj = (VReg *)vj; \
2009 int oprsz = simd_oprsz(desc); \
2010 \
2011 ofs = LSX_LEN / BIT; \
2012 for (i = 0; i < oprsz / 16; i++) { \
2013 for (j = 0; j < ofs; j++) { \
2014 temp.E1(j + ofs * 2 * i) = do_ssrlrns_ ## E1(Vj->E2(j + ofs * i), \
2015 imm, BIT / 2 - 1); \
2016 temp.E1(j + ofs * (2 * i + 1)) = do_ssrlrns_ ## E1(Vd->E2(j + ofs * i), \
2017 imm, BIT / 2 - 1); \
2018 } \
2019 } \
2020 *Vd = temp; \
2021 }
2022
2023 static void do_vssrlrni_q(VReg *Vd, VReg * Vj,
2024 uint64_t imm, int idx, Int128 mask)
2025 {
2026 Int128 shft_res1, shft_res2, r1, r2;
2027 if (imm == 0) {
2028 shft_res1 = Vj->Q(idx);
2029 shft_res2 = Vd->Q(idx);
2030 } else {
2031 r1 = int128_and(int128_urshift(Vj->Q(idx), (imm - 1)), int128_one());
2032 r2 = int128_and(int128_urshift(Vd->Q(idx), (imm - 1)), int128_one());
2033 shft_res1 = (int128_add(int128_urshift(Vj->Q(idx), imm), r1));
2034 shft_res2 = (int128_add(int128_urshift(Vd->Q(idx), imm), r2));
2035 }
2036
2037 if (int128_ult(mask, shft_res1)) {
2038 Vd->D(idx * 2) = int128_getlo(mask);
2039 }else {
2040 Vd->D(idx * 2) = int128_getlo(shft_res1);
2041 }
2042
2043 if (int128_ult(mask, shft_res2)) {
2044 Vd->D(idx * 2 + 1) = int128_getlo(mask);
2045 }else {
2046 Vd->D(idx * 2 + 1) = int128_getlo(shft_res2);
2047 }
2048 }
2049
2050 void HELPER(vssrlrni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
2051 {
2052 int i;
2053 Int128 mask;
2054 VReg *Vd = (VReg *)vd;
2055 VReg *Vj = (VReg *)vj;
2056 int oprsz = simd_oprsz(desc);
2057
2058 mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
2059
2060 for (i = 0; i < oprsz / 16; i++) {
2061 do_vssrlrni_q(Vd, Vj, imm, i, mask);
2062 }
2063 }
2064
2065 VSSRLRNI(vssrlrni_b_h, 16, B, H)
2066 VSSRLRNI(vssrlrni_h_w, 32, H, W)
2067 VSSRLRNI(vssrlrni_w_d, 64, W, D)
2068
2069 #define VSSRARNI(NAME, BIT, E1, E2) \
2070 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2071 { \
2072 int i, j, ofs; \
2073 VReg temp = {}; \
2074 VReg *Vd = (VReg *)vd; \
2075 VReg *Vj = (VReg *)vj; \
2076 int oprsz = simd_oprsz(desc); \
2077 \
2078 ofs = LSX_LEN / BIT; \
2079 for (i = 0; i < oprsz / 16; i++) { \
2080 for (j = 0; j < ofs; j++) { \
2081 temp.E1(j + ofs * 2 * i) = do_ssrarns_ ## E1(Vj->E2(j + ofs * i), \
2082 imm, BIT / 2 - 1); \
2083 temp.E1(j + ofs * (2 * i + 1)) = do_ssrarns_ ## E1(Vd->E2(j + ofs * i), \
2084 imm, BIT / 2 - 1); \
2085 } \
2086 } \
2087 *Vd = temp; \
2088 }
2089
2090 static void do_vssrarni_d_q(VReg *Vd, VReg *Vj,
2091 uint64_t imm, int idx, Int128 mask1, Int128 mask2)
2092 {
2093 Int128 shft_res1, shft_res2, r1, r2;
2094
2095 if (imm == 0) {
2096 shft_res1 = Vj->Q(idx);
2097 shft_res2 = Vd->Q(idx);
2098 } else {
2099 r1 = int128_and(int128_rshift(Vj->Q(idx), (imm - 1)), int128_one());
2100 r2 = int128_and(int128_rshift(Vd->Q(idx), (imm - 1)), int128_one());
2101 shft_res1 = int128_add(int128_rshift(Vj->Q(idx), imm), r1);
2102 shft_res2 = int128_add(int128_rshift(Vd->Q(idx), imm), r2);
2103 }
2104 if (int128_gt(shft_res1, mask1)) {
2105 Vd->D(idx * 2) = int128_getlo(mask1);
2106 } else if (int128_lt(shft_res1, int128_neg(mask2))) {
2107 Vd->D(idx * 2) = int128_getlo(mask2);
2108 } else {
2109 Vd->D(idx * 2) = int128_getlo(shft_res1);
2110 }
2111
2112 if (int128_gt(shft_res2, mask1)) {
2113 Vd->D(idx * 2 + 1) = int128_getlo(mask1);
2114 } else if (int128_lt(shft_res2, int128_neg(mask2))) {
2115 Vd->D(idx * 2 + 1) = int128_getlo(mask2);
2116 } else {
2117 Vd->D(idx * 2 + 1) = int128_getlo(shft_res2);
2118 }
2119 }
2120
2121 void HELPER(vssrarni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
2122 {
2123 int i;
2124 Int128 mask1, mask2;
2125 VReg *Vd = (VReg *)vd;
2126 VReg *Vj = (VReg *)vj;
2127 int oprsz = simd_oprsz(desc);
2128
2129 mask1 = int128_sub(int128_lshift(int128_one(), 63), int128_one());
2130 mask2 = int128_lshift(int128_one(), 63);
2131
2132 for (i = 0; i < oprsz / 16; i++) {
2133 do_vssrarni_d_q(Vd, Vj, imm, i, mask1, mask2);
2134 }
2135 }
2136
2137 VSSRARNI(vssrarni_b_h, 16, B, H)
2138 VSSRARNI(vssrarni_h_w, 32, H, W)
2139 VSSRARNI(vssrarni_w_d, 64, W, D)
2140
2141 #define VSSRLRNUI(NAME, BIT, E1, E2) \
2142 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2143 { \
2144 int i, j, ofs; \
2145 VReg temp = {}; \
2146 VReg *Vd = (VReg *)vd; \
2147 VReg *Vj = (VReg *)vj; \
2148 int oprsz = simd_oprsz(desc); \
2149 \
2150 ofs = LSX_LEN / BIT; \
2151 for (i = 0; i < oprsz / 16; i++) { \
2152 for (j = 0; j < ofs; j++) { \
2153 temp.E1(j + ofs * 2 * i) = do_ssrlrnu_ ## E1(Vj->E2(j + ofs * i), \
2154 imm, BIT / 2); \
2155 temp.E1(j + ofs * (2 * i + 1)) = do_ssrlrnu_ ## E1(Vd->E2(j + ofs * i), \
2156 imm, BIT / 2); \
2157 } \
2158 } \
2159 *Vd = temp; \
2160 }
2161
2162 void HELPER(vssrlrni_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
2163 {
2164 int i;
2165 Int128 mask;
2166 VReg *Vd = (VReg *)vd;
2167 VReg *Vj = (VReg *)vj;
2168 int oprsz = simd_oprsz(desc);
2169
2170 mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
2171
2172 for (i = 0; i < oprsz / 16; i++) {
2173 do_vssrlrni_q(Vd, Vj, imm, i, mask);
2174 }
2175 }
2176
2177 VSSRLRNUI(vssrlrni_bu_h, 16, B, H)
2178 VSSRLRNUI(vssrlrni_hu_w, 32, H, W)
2179 VSSRLRNUI(vssrlrni_wu_d, 64, W, D)
2180
2181 #define VSSRARNUI(NAME, BIT, E1, E2) \
2182 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2183 { \
2184 int i, j, ofs; \
2185 VReg temp = {}; \
2186 VReg *Vd = (VReg *)vd; \
2187 VReg *Vj = (VReg *)vj; \
2188 int oprsz = simd_oprsz(desc); \
2189 \
2190 ofs = LSX_LEN / BIT; \
2191 for (i = 0; i < oprsz / 16; i++) { \
2192 for (j = 0; j < ofs; j++) { \
2193 temp.E1(j + ofs * 2 * i) = do_ssrarnu_ ## E1(Vj->E2(j + ofs * i), \
2194 imm, BIT / 2); \
2195 temp.E1(j + ofs * (2 * i + 1)) = do_ssrarnu_ ## E1(Vd->E2(j + ofs * i), \
2196 imm, BIT / 2); \
2197 } \
2198 } \
2199 *Vd = temp; \
2200 }
2201
2202 static void do_vssrarni_du_q(VReg *Vd, VReg *Vj,
2203 uint64_t imm, int idx, Int128 mask1, Int128 mask2)
2204 {
2205 Int128 shft_res1, shft_res2, r1, r2;
2206
2207 if (imm == 0) {
2208 shft_res1 = Vj->Q(idx);
2209 shft_res2 = Vd->Q(idx);
2210 } else {
2211 r1 = int128_and(int128_rshift(Vj->Q(idx), (imm - 1)), int128_one());
2212 r2 = int128_and(int128_rshift(Vd->Q(idx), (imm - 1)), int128_one());
2213 shft_res1 = int128_add(int128_rshift(Vj->Q(idx), imm), r1);
2214 shft_res2 = int128_add(int128_rshift(Vd->Q(idx), imm), r2);
2215 }
2216
2217 if (int128_lt(Vj->Q(idx), int128_zero())) {
2218 shft_res1 = int128_zero();
2219 }
2220 if (int128_lt(Vd->Q(idx), int128_zero())) {
2221 shft_res2 = int128_zero();
2222 }
2223
2224 if (int128_gt(shft_res1, mask1)) {
2225 Vd->D(idx * 2) = int128_getlo(mask1);
2226 } else if (int128_lt(shft_res1, int128_neg(mask2))) {
2227 Vd->D(idx * 2) = int128_getlo(mask2);
2228 } else {
2229 Vd->D(idx * 2) = int128_getlo(shft_res1);
2230 }
2231
2232 if (int128_gt(shft_res2, mask1)) {
2233 Vd->D(idx * 2 + 1) = int128_getlo(mask1);
2234 } else if (int128_lt(shft_res2, int128_neg(mask2))) {
2235 Vd->D(idx * 2 + 1) = int128_getlo(mask2);
2236 } else {
2237 Vd->D(idx * 2 + 1) = int128_getlo(shft_res2);
2238 }
2239 }
2240
2241 void HELPER(vssrarni_du_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
2242 {
2243 int i;
2244 Int128 mask1, mask2;
2245 VReg *Vd = (VReg *)vd;
2246 VReg *Vj = (VReg *)vj;
2247 int oprsz = simd_oprsz(desc);
2248
2249 mask1 = int128_sub(int128_lshift(int128_one(), 64), int128_one());
2250 mask2 = int128_lshift(int128_one(), 64);
2251
2252 for (i = 0; i < oprsz / 16; i++) {
2253 do_vssrarni_du_q(Vd, Vj, imm, i, mask1, mask2);
2254 }
2255 }
2256
2257 VSSRARNUI(vssrarni_bu_h, 16, B, H)
2258 VSSRARNUI(vssrarni_hu_w, 32, H, W)
2259 VSSRARNUI(vssrarni_wu_d, 64, W, D)
2260
2261 #define DO_2OP(NAME, BIT, E, DO_OP) \
2262 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
2263 { \
2264 int i; \
2265 VReg *Vd = (VReg *)vd; \
2266 VReg *Vj = (VReg *)vj; \
2267 int oprsz = simd_oprsz(desc); \
2268 \
2269 for (i = 0; i < oprsz / (BIT / 8); i++) \
2270 { \
2271 Vd->E(i) = DO_OP(Vj->E(i)); \
2272 } \
2273 }
2274
2275 #define DO_CLO_B(N) (clz32(~N & 0xff) - 24)
2276 #define DO_CLO_H(N) (clz32(~N & 0xffff) - 16)
2277 #define DO_CLO_W(N) (clz32(~N))
2278 #define DO_CLO_D(N) (clz64(~N))
2279 #define DO_CLZ_B(N) (clz32(N) - 24)
2280 #define DO_CLZ_H(N) (clz32(N) - 16)
2281 #define DO_CLZ_W(N) (clz32(N))
2282 #define DO_CLZ_D(N) (clz64(N))
2283
2284 DO_2OP(vclo_b, 8, UB, DO_CLO_B)
2285 DO_2OP(vclo_h, 16, UH, DO_CLO_H)
2286 DO_2OP(vclo_w, 32, UW, DO_CLO_W)
2287 DO_2OP(vclo_d, 64, UD, DO_CLO_D)
2288 DO_2OP(vclz_b, 8, UB, DO_CLZ_B)
2289 DO_2OP(vclz_h, 16, UH, DO_CLZ_H)
2290 DO_2OP(vclz_w, 32, UW, DO_CLZ_W)
2291 DO_2OP(vclz_d, 64, UD, DO_CLZ_D)
2292
2293 #define VPCNT(NAME, BIT, E, FN) \
2294 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
2295 { \
2296 int i; \
2297 VReg *Vd = (VReg *)vd; \
2298 VReg *Vj = (VReg *)vj; \
2299 int oprsz = simd_oprsz(desc); \
2300 \
2301 for (i = 0; i < oprsz / (BIT / 8); i++) \
2302 { \
2303 Vd->E(i) = FN(Vj->E(i)); \
2304 } \
2305 }
2306
2307 VPCNT(vpcnt_b, 8, UB, ctpop8)
2308 VPCNT(vpcnt_h, 16, UH, ctpop16)
2309 VPCNT(vpcnt_w, 32, UW, ctpop32)
2310 VPCNT(vpcnt_d, 64, UD, ctpop64)
2311
2312 #define DO_BITCLR(a, bit) (a & ~(1ull << bit))
2313 #define DO_BITSET(a, bit) (a | 1ull << bit)
2314 #define DO_BITREV(a, bit) (a ^ (1ull << bit))
2315
2316 #define DO_BIT(NAME, BIT, E, DO_OP) \
2317 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2318 { \
2319 int i; \
2320 VReg *Vd = (VReg *)vd; \
2321 VReg *Vj = (VReg *)vj; \
2322 VReg *Vk = (VReg *)vk; \
2323 int oprsz = simd_oprsz(desc); \
2324 \
2325 for (i = 0; i < oprsz / (BIT / 8); i++) { \
2326 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \
2327 } \
2328 }
2329
2330 DO_BIT(vbitclr_b, 8, UB, DO_BITCLR)
2331 DO_BIT(vbitclr_h, 16, UH, DO_BITCLR)
2332 DO_BIT(vbitclr_w, 32, UW, DO_BITCLR)
2333 DO_BIT(vbitclr_d, 64, UD, DO_BITCLR)
2334 DO_BIT(vbitset_b, 8, UB, DO_BITSET)
2335 DO_BIT(vbitset_h, 16, UH, DO_BITSET)
2336 DO_BIT(vbitset_w, 32, UW, DO_BITSET)
2337 DO_BIT(vbitset_d, 64, UD, DO_BITSET)
2338 DO_BIT(vbitrev_b, 8, UB, DO_BITREV)
2339 DO_BIT(vbitrev_h, 16, UH, DO_BITREV)
2340 DO_BIT(vbitrev_w, 32, UW, DO_BITREV)
2341 DO_BIT(vbitrev_d, 64, UD, DO_BITREV)
2342
2343 #define DO_BITI(NAME, BIT, E, DO_OP) \
2344 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2345 { \
2346 int i; \
2347 VReg *Vd = (VReg *)vd; \
2348 VReg *Vj = (VReg *)vj; \
2349 int oprsz = simd_oprsz(desc); \
2350 \
2351 for (i = 0; i < oprsz / (BIT / 8); i++) { \
2352 Vd->E(i) = DO_OP(Vj->E(i), imm); \
2353 } \
2354 }
2355
2356 DO_BITI(vbitclri_b, 8, UB, DO_BITCLR)
2357 DO_BITI(vbitclri_h, 16, UH, DO_BITCLR)
2358 DO_BITI(vbitclri_w, 32, UW, DO_BITCLR)
2359 DO_BITI(vbitclri_d, 64, UD, DO_BITCLR)
2360 DO_BITI(vbitseti_b, 8, UB, DO_BITSET)
2361 DO_BITI(vbitseti_h, 16, UH, DO_BITSET)
2362 DO_BITI(vbitseti_w, 32, UW, DO_BITSET)
2363 DO_BITI(vbitseti_d, 64, UD, DO_BITSET)
2364 DO_BITI(vbitrevi_b, 8, UB, DO_BITREV)
2365 DO_BITI(vbitrevi_h, 16, UH, DO_BITREV)
2366 DO_BITI(vbitrevi_w, 32, UW, DO_BITREV)
2367 DO_BITI(vbitrevi_d, 64, UD, DO_BITREV)
2368
2369 #define VFRSTP(NAME, BIT, MASK, E) \
2370 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2371 { \
2372 int i, m; \
2373 VReg *Vd = (VReg *)vd; \
2374 VReg *Vj = (VReg *)vj; \
2375 VReg *Vk = (VReg *)vk; \
2376 \
2377 for (i = 0; i < LSX_LEN/BIT; i++) { \
2378 if (Vj->E(i) < 0) { \
2379 break; \
2380 } \
2381 } \
2382 m = Vk->E(0) & MASK; \
2383 Vd->E(m) = i; \
2384 }
2385
2386 VFRSTP(vfrstp_b, 8, 0xf, B)
2387 VFRSTP(vfrstp_h, 16, 0x7, H)
2388
2389 #define VFRSTPI(NAME, BIT, E) \
2390 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2391 { \
2392 int i, m; \
2393 VReg *Vd = (VReg *)vd; \
2394 VReg *Vj = (VReg *)vj; \
2395 \
2396 for (i = 0; i < LSX_LEN/BIT; i++) { \
2397 if (Vj->E(i) < 0) { \
2398 break; \
2399 } \
2400 } \
2401 m = imm % (LSX_LEN/BIT); \
2402 Vd->E(m) = i; \
2403 }
2404
2405 VFRSTPI(vfrstpi_b, 8, B)
2406 VFRSTPI(vfrstpi_h, 16, H)
2407
2408 static void vec_update_fcsr0_mask(CPULoongArchState *env,
2409 uintptr_t pc, int mask)
2410 {
2411 int flags = get_float_exception_flags(&env->fp_status);
2412
2413 set_float_exception_flags(0, &env->fp_status);
2414
2415 flags &= ~mask;
2416
2417 if (flags) {
2418 flags = ieee_ex_to_loongarch(flags);
2419 UPDATE_FP_CAUSE(env->fcsr0, flags);
2420 }
2421
2422 if (GET_FP_ENABLES(env->fcsr0) & flags) {
2423 do_raise_exception(env, EXCCODE_FPE, pc);
2424 } else {
2425 UPDATE_FP_FLAGS(env->fcsr0, flags);
2426 }
2427 }
2428
2429 static void vec_update_fcsr0(CPULoongArchState *env, uintptr_t pc)
2430 {
2431 vec_update_fcsr0_mask(env, pc, 0);
2432 }
2433
2434 static inline void vec_clear_cause(CPULoongArchState *env)
2435 {
2436 SET_FP_CAUSE(env->fcsr0, 0);
2437 }
2438
2439 #define DO_3OP_F(NAME, BIT, E, FN) \
2440 void HELPER(NAME)(void *vd, void *vj, void *vk, \
2441 CPULoongArchState *env, uint32_t desc) \
2442 { \
2443 int i; \
2444 VReg *Vd = (VReg *)vd; \
2445 VReg *Vj = (VReg *)vj; \
2446 VReg *Vk = (VReg *)vk; \
2447 \
2448 vec_clear_cause(env); \
2449 for (i = 0; i < LSX_LEN/BIT; i++) { \
2450 Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
2451 vec_update_fcsr0(env, GETPC()); \
2452 } \
2453 }
2454
2455 DO_3OP_F(vfadd_s, 32, UW, float32_add)
2456 DO_3OP_F(vfadd_d, 64, UD, float64_add)
2457 DO_3OP_F(vfsub_s, 32, UW, float32_sub)
2458 DO_3OP_F(vfsub_d, 64, UD, float64_sub)
2459 DO_3OP_F(vfmul_s, 32, UW, float32_mul)
2460 DO_3OP_F(vfmul_d, 64, UD, float64_mul)
2461 DO_3OP_F(vfdiv_s, 32, UW, float32_div)
2462 DO_3OP_F(vfdiv_d, 64, UD, float64_div)
2463 DO_3OP_F(vfmax_s, 32, UW, float32_maxnum)
2464 DO_3OP_F(vfmax_d, 64, UD, float64_maxnum)
2465 DO_3OP_F(vfmin_s, 32, UW, float32_minnum)
2466 DO_3OP_F(vfmin_d, 64, UD, float64_minnum)
2467 DO_3OP_F(vfmaxa_s, 32, UW, float32_maxnummag)
2468 DO_3OP_F(vfmaxa_d, 64, UD, float64_maxnummag)
2469 DO_3OP_F(vfmina_s, 32, UW, float32_minnummag)
2470 DO_3OP_F(vfmina_d, 64, UD, float64_minnummag)
2471
2472 #define DO_4OP_F(NAME, BIT, E, FN, flags) \
2473 void HELPER(NAME)(void *vd, void *vj, void *vk, void *va, \
2474 CPULoongArchState *env, uint32_t desc) \
2475 { \
2476 int i; \
2477 VReg *Vd = (VReg *)vd; \
2478 VReg *Vj = (VReg *)vj; \
2479 VReg *Vk = (VReg *)vk; \
2480 VReg *Va = (VReg *)va; \
2481 \
2482 vec_clear_cause(env); \
2483 for (i = 0; i < LSX_LEN/BIT; i++) { \
2484 Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \
2485 vec_update_fcsr0(env, GETPC()); \
2486 } \
2487 }
2488
2489 DO_4OP_F(vfmadd_s, 32, UW, float32_muladd, 0)
2490 DO_4OP_F(vfmadd_d, 64, UD, float64_muladd, 0)
2491 DO_4OP_F(vfmsub_s, 32, UW, float32_muladd, float_muladd_negate_c)
2492 DO_4OP_F(vfmsub_d, 64, UD, float64_muladd, float_muladd_negate_c)
2493 DO_4OP_F(vfnmadd_s, 32, UW, float32_muladd, float_muladd_negate_result)
2494 DO_4OP_F(vfnmadd_d, 64, UD, float64_muladd, float_muladd_negate_result)
2495 DO_4OP_F(vfnmsub_s, 32, UW, float32_muladd,
2496 float_muladd_negate_c | float_muladd_negate_result)
2497 DO_4OP_F(vfnmsub_d, 64, UD, float64_muladd,
2498 float_muladd_negate_c | float_muladd_negate_result)
2499
2500 #define DO_2OP_F(NAME, BIT, E, FN) \
2501 void HELPER(NAME)(void *vd, void *vj, \
2502 CPULoongArchState *env, uint32_t desc) \
2503 { \
2504 int i; \
2505 VReg *Vd = (VReg *)vd; \
2506 VReg *Vj = (VReg *)vj; \
2507 \
2508 vec_clear_cause(env); \
2509 for (i = 0; i < LSX_LEN/BIT; i++) { \
2510 Vd->E(i) = FN(env, Vj->E(i)); \
2511 } \
2512 }
2513
2514 #define FLOGB(BIT, T) \
2515 static T do_flogb_## BIT(CPULoongArchState *env, T fj) \
2516 { \
2517 T fp, fd; \
2518 float_status *status = &env->fp_status; \
2519 FloatRoundMode old_mode = get_float_rounding_mode(status); \
2520 \
2521 set_float_rounding_mode(float_round_down, status); \
2522 fp = float ## BIT ##_log2(fj, status); \
2523 fd = float ## BIT ##_round_to_int(fp, status); \
2524 set_float_rounding_mode(old_mode, status); \
2525 vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \
2526 return fd; \
2527 }
2528
2529 FLOGB(32, uint32_t)
2530 FLOGB(64, uint64_t)
2531
2532 #define FCLASS(NAME, BIT, E, FN) \
2533 void HELPER(NAME)(void *vd, void *vj, \
2534 CPULoongArchState *env, uint32_t desc) \
2535 { \
2536 int i; \
2537 VReg *Vd = (VReg *)vd; \
2538 VReg *Vj = (VReg *)vj; \
2539 \
2540 for (i = 0; i < LSX_LEN/BIT; i++) { \
2541 Vd->E(i) = FN(env, Vj->E(i)); \
2542 } \
2543 }
2544
2545 FCLASS(vfclass_s, 32, UW, helper_fclass_s)
2546 FCLASS(vfclass_d, 64, UD, helper_fclass_d)
2547
2548 #define FSQRT(BIT, T) \
2549 static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \
2550 { \
2551 T fd; \
2552 fd = float ## BIT ##_sqrt(fj, &env->fp_status); \
2553 vec_update_fcsr0(env, GETPC()); \
2554 return fd; \
2555 }
2556
2557 FSQRT(32, uint32_t)
2558 FSQRT(64, uint64_t)
2559
2560 #define FRECIP(BIT, T) \
2561 static T do_frecip_## BIT(CPULoongArchState *env, T fj) \
2562 { \
2563 T fd; \
2564 fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \
2565 vec_update_fcsr0(env, GETPC()); \
2566 return fd; \
2567 }
2568
2569 FRECIP(32, uint32_t)
2570 FRECIP(64, uint64_t)
2571
2572 #define FRSQRT(BIT, T) \
2573 static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \
2574 { \
2575 T fd, fp; \
2576 fp = float ## BIT ##_sqrt(fj, &env->fp_status); \
2577 fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \
2578 vec_update_fcsr0(env, GETPC()); \
2579 return fd; \
2580 }
2581
2582 FRSQRT(32, uint32_t)
2583 FRSQRT(64, uint64_t)
2584
2585 DO_2OP_F(vflogb_s, 32, UW, do_flogb_32)
2586 DO_2OP_F(vflogb_d, 64, UD, do_flogb_64)
2587 DO_2OP_F(vfsqrt_s, 32, UW, do_fsqrt_32)
2588 DO_2OP_F(vfsqrt_d, 64, UD, do_fsqrt_64)
2589 DO_2OP_F(vfrecip_s, 32, UW, do_frecip_32)
2590 DO_2OP_F(vfrecip_d, 64, UD, do_frecip_64)
2591 DO_2OP_F(vfrsqrt_s, 32, UW, do_frsqrt_32)
2592 DO_2OP_F(vfrsqrt_d, 64, UD, do_frsqrt_64)
2593
2594 static uint32_t float16_cvt_float32(uint16_t h, float_status *status)
2595 {
2596 return float16_to_float32(h, true, status);
2597 }
2598 static uint64_t float32_cvt_float64(uint32_t s, float_status *status)
2599 {
2600 return float32_to_float64(s, status);
2601 }
2602
2603 static uint16_t float32_cvt_float16(uint32_t s, float_status *status)
2604 {
2605 return float32_to_float16(s, true, status);
2606 }
2607 static uint32_t float64_cvt_float32(uint64_t d, float_status *status)
2608 {
2609 return float64_to_float32(d, status);
2610 }
2611
2612 void HELPER(vfcvtl_s_h)(void *vd, void *vj,
2613 CPULoongArchState *env, uint32_t desc)
2614 {
2615 int i;
2616 VReg temp;
2617 VReg *Vd = (VReg *)vd;
2618 VReg *Vj = (VReg *)vj;
2619
2620 vec_clear_cause(env);
2621 for (i = 0; i < LSX_LEN/32; i++) {
2622 temp.UW(i) = float16_cvt_float32(Vj->UH(i), &env->fp_status);
2623 vec_update_fcsr0(env, GETPC());
2624 }
2625 *Vd = temp;
2626 }
2627
2628 void HELPER(vfcvtl_d_s)(void *vd, void *vj,
2629 CPULoongArchState *env, uint32_t desc)
2630 {
2631 int i;
2632 VReg temp;
2633 VReg *Vd = (VReg *)vd;
2634 VReg *Vj = (VReg *)vj;
2635
2636 vec_clear_cause(env);
2637 for (i = 0; i < LSX_LEN/64; i++) {
2638 temp.UD(i) = float32_cvt_float64(Vj->UW(i), &env->fp_status);
2639 vec_update_fcsr0(env, GETPC());
2640 }
2641 *Vd = temp;
2642 }
2643
2644 void HELPER(vfcvth_s_h)(void *vd, void *vj,
2645 CPULoongArchState *env, uint32_t desc)
2646 {
2647 int i;
2648 VReg temp;
2649 VReg *Vd = (VReg *)vd;
2650 VReg *Vj = (VReg *)vj;
2651
2652 vec_clear_cause(env);
2653 for (i = 0; i < LSX_LEN/32; i++) {
2654 temp.UW(i) = float16_cvt_float32(Vj->UH(i + 4), &env->fp_status);
2655 vec_update_fcsr0(env, GETPC());
2656 }
2657 *Vd = temp;
2658 }
2659
2660 void HELPER(vfcvth_d_s)(void *vd, void *vj,
2661 CPULoongArchState *env, uint32_t desc)
2662 {
2663 int i;
2664 VReg temp;
2665 VReg *Vd = (VReg *)vd;
2666 VReg *Vj = (VReg *)vj;
2667
2668 vec_clear_cause(env);
2669 for (i = 0; i < LSX_LEN/64; i++) {
2670 temp.UD(i) = float32_cvt_float64(Vj->UW(i + 2), &env->fp_status);
2671 vec_update_fcsr0(env, GETPC());
2672 }
2673 *Vd = temp;
2674 }
2675
2676 void HELPER(vfcvt_h_s)(void *vd, void *vj, void *vk,
2677 CPULoongArchState *env, uint32_t desc)
2678 {
2679 int i;
2680 VReg temp;
2681 VReg *Vd = (VReg *)vd;
2682 VReg *Vj = (VReg *)vj;
2683 VReg *Vk = (VReg *)vk;
2684
2685 vec_clear_cause(env);
2686 for(i = 0; i < LSX_LEN/32; i++) {
2687 temp.UH(i + 4) = float32_cvt_float16(Vj->UW(i), &env->fp_status);
2688 temp.UH(i) = float32_cvt_float16(Vk->UW(i), &env->fp_status);
2689 vec_update_fcsr0(env, GETPC());
2690 }
2691 *Vd = temp;
2692 }
2693
2694 void HELPER(vfcvt_s_d)(void *vd, void *vj, void *vk,
2695 CPULoongArchState *env, uint32_t desc)
2696 {
2697 int i;
2698 VReg temp;
2699 VReg *Vd = (VReg *)vd;
2700 VReg *Vj = (VReg *)vj;
2701 VReg *Vk = (VReg *)vk;
2702
2703 vec_clear_cause(env);
2704 for(i = 0; i < LSX_LEN/64; i++) {
2705 temp.UW(i + 2) = float64_cvt_float32(Vj->UD(i), &env->fp_status);
2706 temp.UW(i) = float64_cvt_float32(Vk->UD(i), &env->fp_status);
2707 vec_update_fcsr0(env, GETPC());
2708 }
2709 *Vd = temp;
2710 }
2711
2712 void HELPER(vfrint_s)(void *vd, void *vj,
2713 CPULoongArchState *env, uint32_t desc)
2714 {
2715 int i;
2716 VReg *Vd = (VReg *)vd;
2717 VReg *Vj = (VReg *)vj;
2718
2719 vec_clear_cause(env);
2720 for (i = 0; i < 4; i++) {
2721 Vd->W(i) = float32_round_to_int(Vj->UW(i), &env->fp_status);
2722 vec_update_fcsr0(env, GETPC());
2723 }
2724 }
2725
2726 void HELPER(vfrint_d)(void *vd, void *vj,
2727 CPULoongArchState *env, uint32_t desc)
2728 {
2729 int i;
2730 VReg *Vd = (VReg *)vd;
2731 VReg *Vj = (VReg *)vj;
2732
2733 vec_clear_cause(env);
2734 for (i = 0; i < 2; i++) {
2735 Vd->D(i) = float64_round_to_int(Vj->UD(i), &env->fp_status);
2736 vec_update_fcsr0(env, GETPC());
2737 }
2738 }
2739
2740 #define FCVT_2OP(NAME, BIT, E, MODE) \
2741 void HELPER(NAME)(void *vd, void *vj, \
2742 CPULoongArchState *env, uint32_t desc) \
2743 { \
2744 int i; \
2745 VReg *Vd = (VReg *)vd; \
2746 VReg *Vj = (VReg *)vj; \
2747 \
2748 vec_clear_cause(env); \
2749 for (i = 0; i < LSX_LEN/BIT; i++) { \
2750 FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
2751 set_float_rounding_mode(MODE, &env->fp_status); \
2752 Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \
2753 set_float_rounding_mode(old_mode, &env->fp_status); \
2754 vec_update_fcsr0(env, GETPC()); \
2755 } \
2756 }
2757
2758 FCVT_2OP(vfrintrne_s, 32, UW, float_round_nearest_even)
2759 FCVT_2OP(vfrintrne_d, 64, UD, float_round_nearest_even)
2760 FCVT_2OP(vfrintrz_s, 32, UW, float_round_to_zero)
2761 FCVT_2OP(vfrintrz_d, 64, UD, float_round_to_zero)
2762 FCVT_2OP(vfrintrp_s, 32, UW, float_round_up)
2763 FCVT_2OP(vfrintrp_d, 64, UD, float_round_up)
2764 FCVT_2OP(vfrintrm_s, 32, UW, float_round_down)
2765 FCVT_2OP(vfrintrm_d, 64, UD, float_round_down)
2766
2767 #define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \
2768 static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \
2769 { \
2770 T2 fd; \
2771 FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
2772 \
2773 set_float_rounding_mode(MODE, &env->fp_status); \
2774 fd = do_## FMT1 ##_to_## FMT2(env, fj); \
2775 set_float_rounding_mode(old_mode, &env->fp_status); \
2776 return fd; \
2777 }
2778
2779 #define DO_FTINT(FMT1, FMT2, T1, T2) \
2780 static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \
2781 { \
2782 T2 fd; \
2783 \
2784 fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
2785 if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \
2786 if (FMT1 ##_is_any_nan(fj)) { \
2787 fd = 0; \
2788 } \
2789 } \
2790 vec_update_fcsr0(env, GETPC()); \
2791 return fd; \
2792 }
2793
2794 DO_FTINT(float32, int32, uint32_t, uint32_t)
2795 DO_FTINT(float64, int64, uint64_t, uint64_t)
2796 DO_FTINT(float32, uint32, uint32_t, uint32_t)
2797 DO_FTINT(float64, uint64, uint64_t, uint64_t)
2798 DO_FTINT(float64, int32, uint64_t, uint32_t)
2799 DO_FTINT(float32, int64, uint32_t, uint64_t)
2800
2801 FTINT(rne_w_s, float32, int32, uint32_t, uint32_t, float_round_nearest_even)
2802 FTINT(rne_l_d, float64, int64, uint64_t, uint64_t, float_round_nearest_even)
2803 FTINT(rp_w_s, float32, int32, uint32_t, uint32_t, float_round_up)
2804 FTINT(rp_l_d, float64, int64, uint64_t, uint64_t, float_round_up)
2805 FTINT(rz_w_s, float32, int32, uint32_t, uint32_t, float_round_to_zero)
2806 FTINT(rz_l_d, float64, int64, uint64_t, uint64_t, float_round_to_zero)
2807 FTINT(rm_w_s, float32, int32, uint32_t, uint32_t, float_round_down)
2808 FTINT(rm_l_d, float64, int64, uint64_t, uint64_t, float_round_down)
2809
2810 DO_2OP_F(vftintrne_w_s, 32, UW, do_ftintrne_w_s)
2811 DO_2OP_F(vftintrne_l_d, 64, UD, do_ftintrne_l_d)
2812 DO_2OP_F(vftintrp_w_s, 32, UW, do_ftintrp_w_s)
2813 DO_2OP_F(vftintrp_l_d, 64, UD, do_ftintrp_l_d)
2814 DO_2OP_F(vftintrz_w_s, 32, UW, do_ftintrz_w_s)
2815 DO_2OP_F(vftintrz_l_d, 64, UD, do_ftintrz_l_d)
2816 DO_2OP_F(vftintrm_w_s, 32, UW, do_ftintrm_w_s)
2817 DO_2OP_F(vftintrm_l_d, 64, UD, do_ftintrm_l_d)
2818 DO_2OP_F(vftint_w_s, 32, UW, do_float32_to_int32)
2819 DO_2OP_F(vftint_l_d, 64, UD, do_float64_to_int64)
2820
2821 FTINT(rz_wu_s, float32, uint32, uint32_t, uint32_t, float_round_to_zero)
2822 FTINT(rz_lu_d, float64, uint64, uint64_t, uint64_t, float_round_to_zero)
2823
2824 DO_2OP_F(vftintrz_wu_s, 32, UW, do_ftintrz_wu_s)
2825 DO_2OP_F(vftintrz_lu_d, 64, UD, do_ftintrz_lu_d)
2826 DO_2OP_F(vftint_wu_s, 32, UW, do_float32_to_uint32)
2827 DO_2OP_F(vftint_lu_d, 64, UD, do_float64_to_uint64)
2828
2829 FTINT(rm_w_d, float64, int32, uint64_t, uint32_t, float_round_down)
2830 FTINT(rp_w_d, float64, int32, uint64_t, uint32_t, float_round_up)
2831 FTINT(rz_w_d, float64, int32, uint64_t, uint32_t, float_round_to_zero)
2832 FTINT(rne_w_d, float64, int32, uint64_t, uint32_t, float_round_nearest_even)
2833
2834 #define FTINT_W_D(NAME, FN) \
2835 void HELPER(NAME)(void *vd, void *vj, void *vk, \
2836 CPULoongArchState *env, uint32_t desc) \
2837 { \
2838 int i; \
2839 VReg temp; \
2840 VReg *Vd = (VReg *)vd; \
2841 VReg *Vj = (VReg *)vj; \
2842 VReg *Vk = (VReg *)vk; \
2843 \
2844 vec_clear_cause(env); \
2845 for (i = 0; i < 2; i++) { \
2846 temp.W(i + 2) = FN(env, Vj->UD(i)); \
2847 temp.W(i) = FN(env, Vk->UD(i)); \
2848 } \
2849 *Vd = temp; \
2850 }
2851
2852 FTINT_W_D(vftint_w_d, do_float64_to_int32)
2853 FTINT_W_D(vftintrm_w_d, do_ftintrm_w_d)
2854 FTINT_W_D(vftintrp_w_d, do_ftintrp_w_d)
2855 FTINT_W_D(vftintrz_w_d, do_ftintrz_w_d)
2856 FTINT_W_D(vftintrne_w_d, do_ftintrne_w_d)
2857
2858 FTINT(rml_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
2859 FTINT(rpl_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
2860 FTINT(rzl_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
2861 FTINT(rnel_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
2862 FTINT(rmh_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
2863 FTINT(rph_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
2864 FTINT(rzh_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
2865 FTINT(rneh_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
2866
2867 #define FTINTL_L_S(NAME, FN) \
2868 void HELPER(NAME)(void *vd, void *vj, \
2869 CPULoongArchState *env, uint32_t desc) \
2870 { \
2871 int i; \
2872 VReg temp; \
2873 VReg *Vd = (VReg *)vd; \
2874 VReg *Vj = (VReg *)vj; \
2875 \
2876 vec_clear_cause(env); \
2877 for (i = 0; i < 2; i++) { \
2878 temp.D(i) = FN(env, Vj->UW(i)); \
2879 } \
2880 *Vd = temp; \
2881 }
2882
2883 FTINTL_L_S(vftintl_l_s, do_float32_to_int64)
2884 FTINTL_L_S(vftintrml_l_s, do_ftintrml_l_s)
2885 FTINTL_L_S(vftintrpl_l_s, do_ftintrpl_l_s)
2886 FTINTL_L_S(vftintrzl_l_s, do_ftintrzl_l_s)
2887 FTINTL_L_S(vftintrnel_l_s, do_ftintrnel_l_s)
2888
2889 #define FTINTH_L_S(NAME, FN) \
2890 void HELPER(NAME)(void *vd, void *vj, \
2891 CPULoongArchState *env, uint32_t desc) \
2892 { \
2893 int i; \
2894 VReg temp; \
2895 VReg *Vd = (VReg *)vd; \
2896 VReg *Vj = (VReg *)vj; \
2897 \
2898 vec_clear_cause(env); \
2899 for (i = 0; i < 2; i++) { \
2900 temp.D(i) = FN(env, Vj->UW(i + 2)); \
2901 } \
2902 *Vd = temp; \
2903 }
2904
2905 FTINTH_L_S(vftinth_l_s, do_float32_to_int64)
2906 FTINTH_L_S(vftintrmh_l_s, do_ftintrmh_l_s)
2907 FTINTH_L_S(vftintrph_l_s, do_ftintrph_l_s)
2908 FTINTH_L_S(vftintrzh_l_s, do_ftintrzh_l_s)
2909 FTINTH_L_S(vftintrneh_l_s, do_ftintrneh_l_s)
2910
2911 #define FFINT(NAME, FMT1, FMT2, T1, T2) \
2912 static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \
2913 { \
2914 T2 fd; \
2915 \
2916 fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
2917 vec_update_fcsr0(env, GETPC()); \
2918 return fd; \
2919 }
2920
2921 FFINT(s_w, int32, float32, int32_t, uint32_t)
2922 FFINT(d_l, int64, float64, int64_t, uint64_t)
2923 FFINT(s_wu, uint32, float32, uint32_t, uint32_t)
2924 FFINT(d_lu, uint64, float64, uint64_t, uint64_t)
2925
2926 DO_2OP_F(vffint_s_w, 32, W, do_ffint_s_w)
2927 DO_2OP_F(vffint_d_l, 64, D, do_ffint_d_l)
2928 DO_2OP_F(vffint_s_wu, 32, UW, do_ffint_s_wu)
2929 DO_2OP_F(vffint_d_lu, 64, UD, do_ffint_d_lu)
2930
2931 void HELPER(vffintl_d_w)(void *vd, void *vj,
2932 CPULoongArchState *env, uint32_t desc)
2933 {
2934 int i;
2935 VReg temp;
2936 VReg *Vd = (VReg *)vd;
2937 VReg *Vj = (VReg *)vj;
2938
2939 vec_clear_cause(env);
2940 for (i = 0; i < 2; i++) {
2941 temp.D(i) = int32_to_float64(Vj->W(i), &env->fp_status);
2942 vec_update_fcsr0(env, GETPC());
2943 }
2944 *Vd = temp;
2945 }
2946
2947 void HELPER(vffinth_d_w)(void *vd, void *vj,
2948 CPULoongArchState *env, uint32_t desc)
2949 {
2950 int i;
2951 VReg temp;
2952 VReg *Vd = (VReg *)vd;
2953 VReg *Vj = (VReg *)vj;
2954
2955 vec_clear_cause(env);
2956 for (i = 0; i < 2; i++) {
2957 temp.D(i) = int32_to_float64(Vj->W(i + 2), &env->fp_status);
2958 vec_update_fcsr0(env, GETPC());
2959 }
2960 *Vd = temp;
2961 }
2962
2963 void HELPER(vffint_s_l)(void *vd, void *vj, void *vk,
2964 CPULoongArchState *env, uint32_t desc)
2965 {
2966 int i;
2967 VReg temp;
2968 VReg *Vd = (VReg *)vd;
2969 VReg *Vj = (VReg *)vj;
2970 VReg *Vk = (VReg *)vk;
2971
2972 vec_clear_cause(env);
2973 for (i = 0; i < 2; i++) {
2974 temp.W(i + 2) = int64_to_float32(Vj->D(i), &env->fp_status);
2975 temp.W(i) = int64_to_float32(Vk->D(i), &env->fp_status);
2976 vec_update_fcsr0(env, GETPC());
2977 }
2978 *Vd = temp;
2979 }
2980
2981 #define VSEQ(a, b) (a == b ? -1 : 0)
2982 #define VSLE(a, b) (a <= b ? -1 : 0)
2983 #define VSLT(a, b) (a < b ? -1 : 0)
2984
2985 #define VCMPI(NAME, BIT, E, DO_OP) \
2986 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
2987 { \
2988 int i; \
2989 VReg *Vd = (VReg *)vd; \
2990 VReg *Vj = (VReg *)vj; \
2991 typedef __typeof(Vd->E(0)) TD; \
2992 \
2993 for (i = 0; i < LSX_LEN/BIT; i++) { \
2994 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
2995 } \
2996 }
2997
2998 VCMPI(vseqi_b, 8, B, VSEQ)
2999 VCMPI(vseqi_h, 16, H, VSEQ)
3000 VCMPI(vseqi_w, 32, W, VSEQ)
3001 VCMPI(vseqi_d, 64, D, VSEQ)
3002 VCMPI(vslei_b, 8, B, VSLE)
3003 VCMPI(vslei_h, 16, H, VSLE)
3004 VCMPI(vslei_w, 32, W, VSLE)
3005 VCMPI(vslei_d, 64, D, VSLE)
3006 VCMPI(vslei_bu, 8, UB, VSLE)
3007 VCMPI(vslei_hu, 16, UH, VSLE)
3008 VCMPI(vslei_wu, 32, UW, VSLE)
3009 VCMPI(vslei_du, 64, UD, VSLE)
3010 VCMPI(vslti_b, 8, B, VSLT)
3011 VCMPI(vslti_h, 16, H, VSLT)
3012 VCMPI(vslti_w, 32, W, VSLT)
3013 VCMPI(vslti_d, 64, D, VSLT)
3014 VCMPI(vslti_bu, 8, UB, VSLT)
3015 VCMPI(vslti_hu, 16, UH, VSLT)
3016 VCMPI(vslti_wu, 32, UW, VSLT)
3017 VCMPI(vslti_du, 64, UD, VSLT)
3018
3019 static uint64_t vfcmp_common(CPULoongArchState *env,
3020 FloatRelation cmp, uint32_t flags)
3021 {
3022 uint64_t ret = 0;
3023
3024 switch (cmp) {
3025 case float_relation_less:
3026 ret = (flags & FCMP_LT);
3027 break;
3028 case float_relation_equal:
3029 ret = (flags & FCMP_EQ);
3030 break;
3031 case float_relation_greater:
3032 ret = (flags & FCMP_GT);
3033 break;
3034 case float_relation_unordered:
3035 ret = (flags & FCMP_UN);
3036 break;
3037 default:
3038 g_assert_not_reached();
3039 }
3040
3041 if (ret) {
3042 ret = -1;
3043 }
3044
3045 return ret;
3046 }
3047
3048 #define VFCMP(NAME, BIT, E, FN) \
3049 void HELPER(NAME)(CPULoongArchState *env, \
3050 uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \
3051 { \
3052 int i; \
3053 VReg t; \
3054 VReg *Vd = &(env->fpr[vd].vreg); \
3055 VReg *Vj = &(env->fpr[vj].vreg); \
3056 VReg *Vk = &(env->fpr[vk].vreg); \
3057 \
3058 vec_clear_cause(env); \
3059 for (i = 0; i < LSX_LEN/BIT ; i++) { \
3060 FloatRelation cmp; \
3061 cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
3062 t.E(i) = vfcmp_common(env, cmp, flags); \
3063 vec_update_fcsr0(env, GETPC()); \
3064 } \
3065 *Vd = t; \
3066 }
3067
3068 VFCMP(vfcmp_c_s, 32, UW, float32_compare_quiet)
3069 VFCMP(vfcmp_s_s, 32, UW, float32_compare)
3070 VFCMP(vfcmp_c_d, 64, UD, float64_compare_quiet)
3071 VFCMP(vfcmp_s_d, 64, UD, float64_compare)
3072
3073 void HELPER(vbitseli_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
3074 {
3075 int i;
3076 VReg *Vd = (VReg *)vd;
3077 VReg *Vj = (VReg *)vj;
3078
3079 for (i = 0; i < 16; i++) {
3080 Vd->B(i) = (~Vd->B(i) & Vj->B(i)) | (Vd->B(i) & imm);
3081 }
3082 }
3083
3084 /* Copy from target/arm/tcg/sve_helper.c */
3085 static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
3086 {
3087 uint64_t bits = 8 << esz;
3088 uint64_t ones = dup_const(esz, 1);
3089 uint64_t signs = ones << (bits - 1);
3090 uint64_t cmp0, cmp1;
3091
3092 cmp1 = dup_const(esz, n);
3093 cmp0 = cmp1 ^ m0;
3094 cmp1 = cmp1 ^ m1;
3095 cmp0 = (cmp0 - ones) & ~cmp0;
3096 cmp1 = (cmp1 - ones) & ~cmp1;
3097 return (cmp0 | cmp1) & signs;
3098 }
3099
3100 #define SETANYEQZ(NAME, MO) \
3101 void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
3102 { \
3103 VReg *Vj = &(env->fpr[vj].vreg); \
3104 \
3105 env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \
3106 }
3107 SETANYEQZ(vsetanyeqz_b, MO_8)
3108 SETANYEQZ(vsetanyeqz_h, MO_16)
3109 SETANYEQZ(vsetanyeqz_w, MO_32)
3110 SETANYEQZ(vsetanyeqz_d, MO_64)
3111
3112 #define SETALLNEZ(NAME, MO) \
3113 void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
3114 { \
3115 VReg *Vj = &(env->fpr[vj].vreg); \
3116 \
3117 env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \
3118 }
3119 SETALLNEZ(vsetallnez_b, MO_8)
3120 SETALLNEZ(vsetallnez_h, MO_16)
3121 SETALLNEZ(vsetallnez_w, MO_32)
3122 SETALLNEZ(vsetallnez_d, MO_64)
3123
3124 #define VPACKEV(NAME, BIT, E) \
3125 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3126 { \
3127 int i; \
3128 VReg temp; \
3129 VReg *Vd = (VReg *)vd; \
3130 VReg *Vj = (VReg *)vj; \
3131 VReg *Vk = (VReg *)vk; \
3132 \
3133 for (i = 0; i < LSX_LEN/BIT; i++) { \
3134 temp.E(2 * i + 1) = Vj->E(2 * i); \
3135 temp.E(2 *i) = Vk->E(2 * i); \
3136 } \
3137 *Vd = temp; \
3138 }
3139
3140 VPACKEV(vpackev_b, 16, B)
3141 VPACKEV(vpackev_h, 32, H)
3142 VPACKEV(vpackev_w, 64, W)
3143 VPACKEV(vpackev_d, 128, D)
3144
3145 #define VPACKOD(NAME, BIT, E) \
3146 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3147 { \
3148 int i; \
3149 VReg temp; \
3150 VReg *Vd = (VReg *)vd; \
3151 VReg *Vj = (VReg *)vj; \
3152 VReg *Vk = (VReg *)vk; \
3153 \
3154 for (i = 0; i < LSX_LEN/BIT; i++) { \
3155 temp.E(2 * i + 1) = Vj->E(2 * i + 1); \
3156 temp.E(2 * i) = Vk->E(2 * i + 1); \
3157 } \
3158 *Vd = temp; \
3159 }
3160
3161 VPACKOD(vpackod_b, 16, B)
3162 VPACKOD(vpackod_h, 32, H)
3163 VPACKOD(vpackod_w, 64, W)
3164 VPACKOD(vpackod_d, 128, D)
3165
3166 #define VPICKEV(NAME, BIT, E) \
3167 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3168 { \
3169 int i; \
3170 VReg temp; \
3171 VReg *Vd = (VReg *)vd; \
3172 VReg *Vj = (VReg *)vj; \
3173 VReg *Vk = (VReg *)vk; \
3174 \
3175 for (i = 0; i < LSX_LEN/BIT; i++) { \
3176 temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \
3177 temp.E(i) = Vk->E(2 * i); \
3178 } \
3179 *Vd = temp; \
3180 }
3181
3182 VPICKEV(vpickev_b, 16, B)
3183 VPICKEV(vpickev_h, 32, H)
3184 VPICKEV(vpickev_w, 64, W)
3185 VPICKEV(vpickev_d, 128, D)
3186
3187 #define VPICKOD(NAME, BIT, E) \
3188 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3189 { \
3190 int i; \
3191 VReg temp; \
3192 VReg *Vd = (VReg *)vd; \
3193 VReg *Vj = (VReg *)vj; \
3194 VReg *Vk = (VReg *)vk; \
3195 \
3196 for (i = 0; i < LSX_LEN/BIT; i++) { \
3197 temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \
3198 temp.E(i) = Vk->E(2 * i + 1); \
3199 } \
3200 *Vd = temp; \
3201 }
3202
3203 VPICKOD(vpickod_b, 16, B)
3204 VPICKOD(vpickod_h, 32, H)
3205 VPICKOD(vpickod_w, 64, W)
3206 VPICKOD(vpickod_d, 128, D)
3207
3208 #define VILVL(NAME, BIT, E) \
3209 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3210 { \
3211 int i; \
3212 VReg temp; \
3213 VReg *Vd = (VReg *)vd; \
3214 VReg *Vj = (VReg *)vj; \
3215 VReg *Vk = (VReg *)vk; \
3216 \
3217 for (i = 0; i < LSX_LEN/BIT; i++) { \
3218 temp.E(2 * i + 1) = Vj->E(i); \
3219 temp.E(2 * i) = Vk->E(i); \
3220 } \
3221 *Vd = temp; \
3222 }
3223
3224 VILVL(vilvl_b, 16, B)
3225 VILVL(vilvl_h, 32, H)
3226 VILVL(vilvl_w, 64, W)
3227 VILVL(vilvl_d, 128, D)
3228
3229 #define VILVH(NAME, BIT, E) \
3230 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3231 { \
3232 int i; \
3233 VReg temp; \
3234 VReg *Vd = (VReg *)vd; \
3235 VReg *Vj = (VReg *)vj; \
3236 VReg *Vk = (VReg *)vk; \
3237 \
3238 for (i = 0; i < LSX_LEN/BIT; i++) { \
3239 temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \
3240 temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \
3241 } \
3242 *Vd = temp; \
3243 }
3244
3245 VILVH(vilvh_b, 16, B)
3246 VILVH(vilvh_h, 32, H)
3247 VILVH(vilvh_w, 64, W)
3248 VILVH(vilvh_d, 128, D)
3249
3250 void HELPER(vshuf_b)(void *vd, void *vj, void *vk, void *va, uint32_t desc)
3251 {
3252 int i, m;
3253 VReg temp;
3254 VReg *Vd = (VReg *)vd;
3255 VReg *Vj = (VReg *)vj;
3256 VReg *Vk = (VReg *)vk;
3257 VReg *Va = (VReg *)va;
3258
3259 m = LSX_LEN/8;
3260 for (i = 0; i < m ; i++) {
3261 uint64_t k = (uint8_t)Va->B(i) % (2 * m);
3262 temp.B(i) = k < m ? Vk->B(k) : Vj->B(k - m);
3263 }
3264 *Vd = temp;
3265 }
3266
3267 #define VSHUF(NAME, BIT, E) \
3268 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3269 { \
3270 int i, m; \
3271 VReg temp; \
3272 VReg *Vd = (VReg *)vd; \
3273 VReg *Vj = (VReg *)vj; \
3274 VReg *Vk = (VReg *)vk; \
3275 \
3276 m = LSX_LEN/BIT; \
3277 for (i = 0; i < m; i++) { \
3278 uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \
3279 temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \
3280 } \
3281 *Vd = temp; \
3282 }
3283
3284 VSHUF(vshuf_h, 16, H)
3285 VSHUF(vshuf_w, 32, W)
3286 VSHUF(vshuf_d, 64, D)
3287
3288 #define VSHUF4I(NAME, BIT, E) \
3289 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
3290 { \
3291 int i; \
3292 VReg temp; \
3293 VReg *Vd = (VReg *)vd; \
3294 VReg *Vj = (VReg *)vj; \
3295 \
3296 for (i = 0; i < LSX_LEN/BIT; i++) { \
3297 temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \
3298 (2 * ((i) & 0x03))) & 0x03)); \
3299 } \
3300 *Vd = temp; \
3301 }
3302
3303 VSHUF4I(vshuf4i_b, 8, B)
3304 VSHUF4I(vshuf4i_h, 16, H)
3305 VSHUF4I(vshuf4i_w, 32, W)
3306
3307 void HELPER(vshuf4i_d)(void *vd, void *vj, uint64_t imm, uint32_t desc)
3308 {
3309 VReg *Vd = (VReg *)vd;
3310 VReg *Vj = (VReg *)vj;
3311
3312 VReg temp;
3313 temp.D(0) = (imm & 2 ? Vj : Vd)->D(imm & 1);
3314 temp.D(1) = (imm & 8 ? Vj : Vd)->D((imm >> 2) & 1);
3315 *Vd = temp;
3316 }
3317
3318 void HELPER(vpermi_w)(void *vd, void *vj, uint64_t imm, uint32_t desc)
3319 {
3320 VReg temp;
3321 VReg *Vd = (VReg *)vd;
3322 VReg *Vj = (VReg *)vj;
3323
3324 temp.W(0) = Vj->W(imm & 0x3);
3325 temp.W(1) = Vj->W((imm >> 2) & 0x3);
3326 temp.W(2) = Vd->W((imm >> 4) & 0x3);
3327 temp.W(3) = Vd->W((imm >> 6) & 0x3);
3328 *Vd = temp;
3329 }
3330
3331 #define VEXTRINS(NAME, BIT, E, MASK) \
3332 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
3333 { \
3334 int ins, extr; \
3335 VReg *Vd = (VReg *)vd; \
3336 VReg *Vj = (VReg *)vj; \
3337 \
3338 ins = (imm >> 4) & MASK; \
3339 extr = imm & MASK; \
3340 Vd->E(ins) = Vj->E(extr); \
3341 }
3342
3343 VEXTRINS(vextrins_b, 8, B, 0xf)
3344 VEXTRINS(vextrins_h, 16, H, 0x7)
3345 VEXTRINS(vextrins_w, 32, W, 0x3)
3346 VEXTRINS(vextrins_d, 64, D, 0x1)