2 * M-profile MVE Operations
4 * Copyright (c) 2021 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "internals.h"
23 #include "vec_internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/exec-all.h"
28 #include "fpu/softfloat.h"
29 #include "crypto/clmul.h"
31 static uint16_t mve_eci_mask(CPUARMState
*env
)
34 * Return the mask of which elements in the MVE vector correspond
35 * to beats being executed. The mask has 1 bits for executed lanes
36 * and 0 bits where ECI says this beat was already executed.
40 if ((env
->condexec_bits
& 0xf) != 0) {
44 eci
= env
->condexec_bits
>> 4;
56 g_assert_not_reached();
60 static uint16_t mve_element_mask(CPUARMState
*env
)
63 * Return the mask of which elements in the MVE vector should be
64 * updated. This is a combination of multiple things:
65 * (1) by default, we update every lane in the vector
66 * (2) VPT predication stores its state in the VPR register;
67 * (3) low-overhead-branch tail predication will mask out part
68 * the vector on the final iteration of the loop
69 * (4) if EPSR.ECI is set then we must execute only some beats
71 * We combine all these into a 16-bit result with the same semantics
72 * as VPR.P0: 0 to mask the lane, 1 if it is active.
73 * 8-bit vector ops will look at all bits of the result;
74 * 16-bit ops will look at bits 0, 2, 4, ...;
75 * 32-bit ops will look at bits 0, 4, 8 and 12.
76 * Compare pseudocode GetCurInstrBeat(), though that only returns
77 * the 4-bit slice of the mask corresponding to a single beat.
79 uint16_t mask
= FIELD_EX32(env
->v7m
.vpr
, V7M_VPR
, P0
);
81 if (!(env
->v7m
.vpr
& R_V7M_VPR_MASK01_MASK
)) {
84 if (!(env
->v7m
.vpr
& R_V7M_VPR_MASK23_MASK
)) {
88 if (env
->v7m
.ltpsize
< 4 &&
89 env
->regs
[14] <= (1 << (4 - env
->v7m
.ltpsize
))) {
91 * Tail predication active, and this is the last loop iteration.
92 * The element size is (1 << ltpsize), and we only want to process
93 * loopcount elements, so we want to retain the least significant
94 * (loopcount * esize) predicate bits and zero out bits above that.
96 int masklen
= env
->regs
[14] << env
->v7m
.ltpsize
;
97 assert(masklen
<= 16);
98 uint16_t ltpmask
= masklen
? MAKE_64BIT_MASK(0, masklen
) : 0;
103 * ECI bits indicate which beats are already executed;
104 * we handle this by effectively predicating them out.
106 mask
&= mve_eci_mask(env
);
110 static void mve_advance_vpt(CPUARMState
*env
)
112 /* Advance the VPT and ECI state if necessary */
113 uint32_t vpr
= env
->v7m
.vpr
;
114 unsigned mask01
, mask23
;
116 uint16_t eci_mask
= mve_eci_mask(env
);
118 if ((env
->condexec_bits
& 0xf) == 0) {
119 env
->condexec_bits
= (env
->condexec_bits
== (ECI_A0A1A2B0
<< 4)) ?
120 (ECI_A0
<< 4) : (ECI_NONE
<< 4);
123 if (!(vpr
& (R_V7M_VPR_MASK01_MASK
| R_V7M_VPR_MASK23_MASK
))) {
124 /* VPT not enabled, nothing to do */
128 /* Invert P0 bits if needed, but only for beats we actually executed */
129 mask01
= FIELD_EX32(vpr
, V7M_VPR
, MASK01
);
130 mask23
= FIELD_EX32(vpr
, V7M_VPR
, MASK23
);
131 /* Start by assuming we invert all bits corresponding to executed beats */
134 /* MASK01 says don't invert low half of P0 */
138 /* MASK23 says don't invert high half of P0 */
142 /* Only update MASK01 if beat 1 executed */
143 if (eci_mask
& 0xf0) {
144 vpr
= FIELD_DP32(vpr
, V7M_VPR
, MASK01
, mask01
<< 1);
146 /* Beat 3 always executes, so update MASK23 */
147 vpr
= FIELD_DP32(vpr
, V7M_VPR
, MASK23
, mask23
<< 1);
151 /* For loads, predicated lanes are zeroed instead of keeping their old values */
152 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
153 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
156 uint16_t mask = mve_element_mask(env); \
157 uint16_t eci_mask = mve_eci_mask(env); \
160 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
161 * beats so we don't care if we update part of the dest and \
162 * then take an exception. \
164 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
165 if (eci_mask & (1 << b)) { \
166 d[H##ESIZE(e)] = (mask & (1 << b)) ? \
167 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
171 mve_advance_vpt(env); \
174 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
175 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
178 uint16_t mask = mve_element_mask(env); \
180 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
181 if (mask & (1 << b)) { \
182 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
186 mve_advance_vpt(env); \
189 DO_VLDR(vldrb
, 1, ldub
, 1, uint8_t)
190 DO_VLDR(vldrh
, 2, lduw
, 2, uint16_t)
191 DO_VLDR(vldrw
, 4, ldl
, 4, uint32_t)
193 DO_VSTR(vstrb
, 1, stb
, 1, uint8_t)
194 DO_VSTR(vstrh
, 2, stw
, 2, uint16_t)
195 DO_VSTR(vstrw
, 4, stl
, 4, uint32_t)
197 DO_VLDR(vldrb_sh
, 1, ldsb
, 2, int16_t)
198 DO_VLDR(vldrb_sw
, 1, ldsb
, 4, int32_t)
199 DO_VLDR(vldrb_uh
, 1, ldub
, 2, uint16_t)
200 DO_VLDR(vldrb_uw
, 1, ldub
, 4, uint32_t)
201 DO_VLDR(vldrh_sw
, 2, ldsw
, 4, int32_t)
202 DO_VLDR(vldrh_uw
, 2, lduw
, 4, uint32_t)
204 DO_VSTR(vstrb_h
, 1, stb
, 2, int16_t)
205 DO_VSTR(vstrb_w
, 1, stb
, 4, int32_t)
206 DO_VSTR(vstrh_w
, 2, stw
, 4, int32_t)
212 * Gather loads/scatter stores. Here each element of Qm specifies
213 * an offset to use from the base register Rm. In the _os_ versions
214 * that offset is scaled by the element size.
215 * For loads, predicated lanes are zeroed instead of retaining
216 * their previous values.
218 #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \
219 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
224 uint16_t mask = mve_element_mask(env); \
225 uint16_t eci_mask = mve_eci_mask(env); \
228 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
229 if (!(eci_mask & 1)) { \
232 addr = ADDRFN(base, m[H##ESIZE(e)]); \
233 d[H##ESIZE(e)] = (mask & 1) ? \
234 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
236 m[H##ESIZE(e)] = addr; \
239 mve_advance_vpt(env); \
242 /* We know here TYPE is unsigned so always the same as the offset type */
243 #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
244 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
249 uint16_t mask = mve_element_mask(env); \
250 uint16_t eci_mask = mve_eci_mask(env); \
253 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
254 if (!(eci_mask & 1)) { \
257 addr = ADDRFN(base, m[H##ESIZE(e)]); \
259 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
262 m[H##ESIZE(e)] = addr; \
265 mve_advance_vpt(env); \
269 * 64-bit accesses are slightly different: they are done as two 32-bit
270 * accesses, controlled by the predicate mask for the relevant beat,
271 * and with a single 32-bit offset in the first of the two Qm elements.
272 * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little).
273 * Address writeback happens on the odd beats and updates the address
274 * stored in the even-beat element.
276 #define DO_VLDR64_SG(OP, ADDRFN, WB) \
277 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
282 uint16_t mask = mve_element_mask(env); \
283 uint16_t eci_mask = mve_eci_mask(env); \
286 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
287 if (!(eci_mask & 1)) { \
290 addr = ADDRFN(base, m[H4(e & ~1)]); \
291 addr += 4 * (e & 1); \
292 d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
293 if (WB && (e & 1)) { \
294 m[H4(e & ~1)] = addr - 4; \
297 mve_advance_vpt(env); \
300 #define DO_VSTR64_SG(OP, ADDRFN, WB) \
301 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
306 uint16_t mask = mve_element_mask(env); \
307 uint16_t eci_mask = mve_eci_mask(env); \
310 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
311 if (!(eci_mask & 1)) { \
314 addr = ADDRFN(base, m[H4(e & ~1)]); \
315 addr += 4 * (e & 1); \
317 cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
319 if (WB && (e & 1)) { \
320 m[H4(e & ~1)] = addr - 4; \
323 mve_advance_vpt(env); \
326 #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET))
327 #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1))
328 #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
329 #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
331 DO_VLDR_SG(vldrb_sg_sh
, ldsb
, 2, int16_t, uint16_t, ADDR_ADD
, false)
332 DO_VLDR_SG(vldrb_sg_sw
, ldsb
, 4, int32_t, uint32_t, ADDR_ADD
, false)
333 DO_VLDR_SG(vldrh_sg_sw
, ldsw
, 4, int32_t, uint32_t, ADDR_ADD
, false)
335 DO_VLDR_SG(vldrb_sg_ub
, ldub
, 1, uint8_t, uint8_t, ADDR_ADD
, false)
336 DO_VLDR_SG(vldrb_sg_uh
, ldub
, 2, uint16_t, uint16_t, ADDR_ADD
, false)
337 DO_VLDR_SG(vldrb_sg_uw
, ldub
, 4, uint32_t, uint32_t, ADDR_ADD
, false)
338 DO_VLDR_SG(vldrh_sg_uh
, lduw
, 2, uint16_t, uint16_t, ADDR_ADD
, false)
339 DO_VLDR_SG(vldrh_sg_uw
, lduw
, 4, uint32_t, uint32_t, ADDR_ADD
, false)
340 DO_VLDR_SG(vldrw_sg_uw
, ldl
, 4, uint32_t, uint32_t, ADDR_ADD
, false)
341 DO_VLDR64_SG(vldrd_sg_ud
, ADDR_ADD
, false)
343 DO_VLDR_SG(vldrh_sg_os_sw
, ldsw
, 4, int32_t, uint32_t, ADDR_ADD_OSH
, false)
344 DO_VLDR_SG(vldrh_sg_os_uh
, lduw
, 2, uint16_t, uint16_t, ADDR_ADD_OSH
, false)
345 DO_VLDR_SG(vldrh_sg_os_uw
, lduw
, 4, uint32_t, uint32_t, ADDR_ADD_OSH
, false)
346 DO_VLDR_SG(vldrw_sg_os_uw
, ldl
, 4, uint32_t, uint32_t, ADDR_ADD_OSW
, false)
347 DO_VLDR64_SG(vldrd_sg_os_ud
, ADDR_ADD_OSD
, false)
349 DO_VSTR_SG(vstrb_sg_ub
, stb
, 1, uint8_t, ADDR_ADD
, false)
350 DO_VSTR_SG(vstrb_sg_uh
, stb
, 2, uint16_t, ADDR_ADD
, false)
351 DO_VSTR_SG(vstrb_sg_uw
, stb
, 4, uint32_t, ADDR_ADD
, false)
352 DO_VSTR_SG(vstrh_sg_uh
, stw
, 2, uint16_t, ADDR_ADD
, false)
353 DO_VSTR_SG(vstrh_sg_uw
, stw
, 4, uint32_t, ADDR_ADD
, false)
354 DO_VSTR_SG(vstrw_sg_uw
, stl
, 4, uint32_t, ADDR_ADD
, false)
355 DO_VSTR64_SG(vstrd_sg_ud
, ADDR_ADD
, false)
357 DO_VSTR_SG(vstrh_sg_os_uh
, stw
, 2, uint16_t, ADDR_ADD_OSH
, false)
358 DO_VSTR_SG(vstrh_sg_os_uw
, stw
, 4, uint32_t, ADDR_ADD_OSH
, false)
359 DO_VSTR_SG(vstrw_sg_os_uw
, stl
, 4, uint32_t, ADDR_ADD_OSW
, false)
360 DO_VSTR64_SG(vstrd_sg_os_ud
, ADDR_ADD_OSD
, false)
362 DO_VLDR_SG(vldrw_sg_wb_uw
, ldl
, 4, uint32_t, uint32_t, ADDR_ADD
, true)
363 DO_VLDR64_SG(vldrd_sg_wb_ud
, ADDR_ADD
, true)
364 DO_VSTR_SG(vstrw_sg_wb_uw
, stl
, 4, uint32_t, ADDR_ADD
, true)
365 DO_VSTR64_SG(vstrd_sg_wb_ud
, ADDR_ADD
, true)
368 * Deinterleaving loads/interleaving stores.
370 * For these helpers we are passed the index of the first Qreg
371 * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3)
372 * and the value of the base address register Rn.
373 * The helpers are specialized for pattern and element size, so
374 * for instance vld42h is VLD4 with pattern 2, element size MO_16.
376 * These insns are beatwise but not predicated, so we must honour ECI,
377 * but need not look at mve_element_mask().
379 * The pseudocode implements these insns with multiple memory accesses
380 * of the element size, but rules R_VVVG and R_FXDM permit us to make
381 * one 32-bit memory access per beat.
383 #define DO_VLD4B(OP, O1, O2, O3, O4) \
384 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
388 uint16_t mask = mve_eci_mask(env); \
389 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
390 uint32_t addr, data; \
391 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
392 if ((mask & 1) == 0) { \
393 /* ECI says skip this beat */ \
396 addr = base + off[beat] * 4; \
397 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
398 for (e = 0; e < 4; e++, data >>= 8) { \
399 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
400 qd[H1(off[beat])] = data; \
405 #define DO_VLD4H(OP, O1, O2) \
406 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
410 uint16_t mask = mve_eci_mask(env); \
411 static const uint8_t off[4] = { O1, O1, O2, O2 }; \
412 uint32_t addr, data; \
413 int y; /* y counts 0 2 0 2 */ \
415 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
416 if ((mask & 1) == 0) { \
417 /* ECI says skip this beat */ \
420 addr = base + off[beat] * 8 + (beat & 1) * 4; \
421 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
422 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
423 qd[H2(off[beat])] = data; \
425 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
426 qd[H2(off[beat])] = data; \
430 #define DO_VLD4W(OP, O1, O2, O3, O4) \
431 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
435 uint16_t mask = mve_eci_mask(env); \
436 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
437 uint32_t addr, data; \
440 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
441 if ((mask & 1) == 0) { \
442 /* ECI says skip this beat */ \
445 addr = base + off[beat] * 4; \
446 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
447 y = (beat + (O1 & 2)) & 3; \
448 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
449 qd[H4(off[beat] >> 2)] = data; \
453 DO_VLD4B(vld40b
, 0, 1, 10, 11)
454 DO_VLD4B(vld41b
, 2, 3, 12, 13)
455 DO_VLD4B(vld42b
, 4, 5, 14, 15)
456 DO_VLD4B(vld43b
, 6, 7, 8, 9)
458 DO_VLD4H(vld40h
, 0, 5)
459 DO_VLD4H(vld41h
, 1, 6)
460 DO_VLD4H(vld42h
, 2, 7)
461 DO_VLD4H(vld43h
, 3, 4)
463 DO_VLD4W(vld40w
, 0, 1, 10, 11)
464 DO_VLD4W(vld41w
, 2, 3, 12, 13)
465 DO_VLD4W(vld42w
, 4, 5, 14, 15)
466 DO_VLD4W(vld43w
, 6, 7, 8, 9)
468 #define DO_VLD2B(OP, O1, O2, O3, O4) \
469 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
473 uint16_t mask = mve_eci_mask(env); \
474 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
475 uint32_t addr, data; \
477 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
478 if ((mask & 1) == 0) { \
479 /* ECI says skip this beat */ \
482 addr = base + off[beat] * 2; \
483 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
484 for (e = 0; e < 4; e++, data >>= 8) { \
485 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
486 qd[H1(off[beat] + (e >> 1))] = data; \
491 #define DO_VLD2H(OP, O1, O2, O3, O4) \
492 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
496 uint16_t mask = mve_eci_mask(env); \
497 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
498 uint32_t addr, data; \
501 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
502 if ((mask & 1) == 0) { \
503 /* ECI says skip this beat */ \
506 addr = base + off[beat] * 4; \
507 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
508 for (e = 0; e < 2; e++, data >>= 16) { \
509 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
510 qd[H2(off[beat])] = data; \
515 #define DO_VLD2W(OP, O1, O2, O3, O4) \
516 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
520 uint16_t mask = mve_eci_mask(env); \
521 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
522 uint32_t addr, data; \
524 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
525 if ((mask & 1) == 0) { \
526 /* ECI says skip this beat */ \
529 addr = base + off[beat]; \
530 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
531 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
532 qd[H4(off[beat] >> 3)] = data; \
536 DO_VLD2B(vld20b
, 0, 2, 12, 14)
537 DO_VLD2B(vld21b
, 4, 6, 8, 10)
539 DO_VLD2H(vld20h
, 0, 1, 6, 7)
540 DO_VLD2H(vld21h
, 2, 3, 4, 5)
542 DO_VLD2W(vld20w
, 0, 4, 24, 28)
543 DO_VLD2W(vld21w
, 8, 12, 16, 20)
545 #define DO_VST4B(OP, O1, O2, O3, O4) \
546 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
550 uint16_t mask = mve_eci_mask(env); \
551 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
552 uint32_t addr, data; \
553 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
554 if ((mask & 1) == 0) { \
555 /* ECI says skip this beat */ \
558 addr = base + off[beat] * 4; \
560 for (e = 3; e >= 0; e--) { \
561 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
562 data = (data << 8) | qd[H1(off[beat])]; \
564 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
568 #define DO_VST4H(OP, O1, O2) \
569 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
573 uint16_t mask = mve_eci_mask(env); \
574 static const uint8_t off[4] = { O1, O1, O2, O2 }; \
575 uint32_t addr, data; \
576 int y; /* y counts 0 2 0 2 */ \
578 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
579 if ((mask & 1) == 0) { \
580 /* ECI says skip this beat */ \
583 addr = base + off[beat] * 8 + (beat & 1) * 4; \
584 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
585 data = qd[H2(off[beat])]; \
586 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
587 data |= qd[H2(off[beat])] << 16; \
588 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
592 #define DO_VST4W(OP, O1, O2, O3, O4) \
593 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
597 uint16_t mask = mve_eci_mask(env); \
598 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
599 uint32_t addr, data; \
602 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
603 if ((mask & 1) == 0) { \
604 /* ECI says skip this beat */ \
607 addr = base + off[beat] * 4; \
608 y = (beat + (O1 & 2)) & 3; \
609 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
610 data = qd[H4(off[beat] >> 2)]; \
611 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
615 DO_VST4B(vst40b
, 0, 1, 10, 11)
616 DO_VST4B(vst41b
, 2, 3, 12, 13)
617 DO_VST4B(vst42b
, 4, 5, 14, 15)
618 DO_VST4B(vst43b
, 6, 7, 8, 9)
620 DO_VST4H(vst40h
, 0, 5)
621 DO_VST4H(vst41h
, 1, 6)
622 DO_VST4H(vst42h
, 2, 7)
623 DO_VST4H(vst43h
, 3, 4)
625 DO_VST4W(vst40w
, 0, 1, 10, 11)
626 DO_VST4W(vst41w
, 2, 3, 12, 13)
627 DO_VST4W(vst42w
, 4, 5, 14, 15)
628 DO_VST4W(vst43w
, 6, 7, 8, 9)
630 #define DO_VST2B(OP, O1, O2, O3, O4) \
631 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
635 uint16_t mask = mve_eci_mask(env); \
636 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
637 uint32_t addr, data; \
639 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
640 if ((mask & 1) == 0) { \
641 /* ECI says skip this beat */ \
644 addr = base + off[beat] * 2; \
646 for (e = 3; e >= 0; e--) { \
647 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
648 data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \
650 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
654 #define DO_VST2H(OP, O1, O2, O3, O4) \
655 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
659 uint16_t mask = mve_eci_mask(env); \
660 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
661 uint32_t addr, data; \
664 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
665 if ((mask & 1) == 0) { \
666 /* ECI says skip this beat */ \
669 addr = base + off[beat] * 4; \
671 for (e = 1; e >= 0; e--) { \
672 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
673 data = (data << 16) | qd[H2(off[beat])]; \
675 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
679 #define DO_VST2W(OP, O1, O2, O3, O4) \
680 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
684 uint16_t mask = mve_eci_mask(env); \
685 static const uint8_t off[4] = { O1, O2, O3, O4 }; \
686 uint32_t addr, data; \
688 for (beat = 0; beat < 4; beat++, mask >>= 4) { \
689 if ((mask & 1) == 0) { \
690 /* ECI says skip this beat */ \
693 addr = base + off[beat]; \
694 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
695 data = qd[H4(off[beat] >> 3)]; \
696 cpu_stl_le_data_ra(env, addr, data, GETPC()); \
700 DO_VST2B(vst20b
, 0, 2, 12, 14)
701 DO_VST2B(vst21b
, 4, 6, 8, 10)
703 DO_VST2H(vst20h
, 0, 1, 6, 7)
704 DO_VST2H(vst21h
, 2, 3, 4, 5)
706 DO_VST2W(vst20w
, 0, 4, 24, 28)
707 DO_VST2W(vst21w
, 8, 12, 16, 20)
710 * The mergemask(D, R, M) macro performs the operation "*D = R" but
711 * storing only the bytes which correspond to 1 bits in M,
712 * leaving other bytes in *D unchanged. We use _Generic
713 * to select the correct implementation based on the type of D.
716 static void mergemask_ub(uint8_t *d
, uint8_t r
, uint16_t mask
)
723 static void mergemask_sb(int8_t *d
, int8_t r
, uint16_t mask
)
725 mergemask_ub((uint8_t *)d
, r
, mask
);
728 static void mergemask_uh(uint16_t *d
, uint16_t r
, uint16_t mask
)
730 uint16_t bmask
= expand_pred_b(mask
);
731 *d
= (*d
& ~bmask
) | (r
& bmask
);
734 static void mergemask_sh(int16_t *d
, int16_t r
, uint16_t mask
)
736 mergemask_uh((uint16_t *)d
, r
, mask
);
739 static void mergemask_uw(uint32_t *d
, uint32_t r
, uint16_t mask
)
741 uint32_t bmask
= expand_pred_b(mask
);
742 *d
= (*d
& ~bmask
) | (r
& bmask
);
745 static void mergemask_sw(int32_t *d
, int32_t r
, uint16_t mask
)
747 mergemask_uw((uint32_t *)d
, r
, mask
);
750 static void mergemask_uq(uint64_t *d
, uint64_t r
, uint16_t mask
)
752 uint64_t bmask
= expand_pred_b(mask
);
753 *d
= (*d
& ~bmask
) | (r
& bmask
);
756 static void mergemask_sq(int64_t *d
, int64_t r
, uint16_t mask
)
758 mergemask_uq((uint64_t *)d
, r
, mask
);
761 #define mergemask(D, R, M) \
763 uint8_t *: mergemask_ub, \
764 int8_t *: mergemask_sb, \
765 uint16_t *: mergemask_uh, \
766 int16_t *: mergemask_sh, \
767 uint32_t *: mergemask_uw, \
768 int32_t *: mergemask_sw, \
769 uint64_t *: mergemask_uq, \
770 int64_t *: mergemask_sq)(D, R, M)
772 void HELPER(mve_vdup
)(CPUARMState
*env
, void *vd
, uint32_t val
)
775 * The generated code already replicated an 8 or 16 bit constant
776 * into the 32-bit value, so we only need to write the 32-bit
777 * value to all elements of the Qreg, allowing for predication.
780 uint16_t mask
= mve_element_mask(env
);
782 for (e
= 0; e
< 16 / 4; e
++, mask
>>= 4) {
783 mergemask(&d
[H4(e
)], val
, mask
);
785 mve_advance_vpt(env
);
788 #define DO_1OP(OP, ESIZE, TYPE, FN) \
789 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
791 TYPE *d = vd, *m = vm; \
792 uint16_t mask = mve_element_mask(env); \
794 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
795 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
797 mve_advance_vpt(env); \
800 #define DO_CLS_B(N) (clrsb32(N) - 24)
801 #define DO_CLS_H(N) (clrsb32(N) - 16)
803 DO_1OP(vclsb
, 1, int8_t, DO_CLS_B
)
804 DO_1OP(vclsh
, 2, int16_t, DO_CLS_H
)
805 DO_1OP(vclsw
, 4, int32_t, clrsb32
)
807 #define DO_CLZ_B(N) (clz32(N) - 24)
808 #define DO_CLZ_H(N) (clz32(N) - 16)
810 DO_1OP(vclzb
, 1, uint8_t, DO_CLZ_B
)
811 DO_1OP(vclzh
, 2, uint16_t, DO_CLZ_H
)
812 DO_1OP(vclzw
, 4, uint32_t, clz32
)
814 DO_1OP(vrev16b
, 2, uint16_t, bswap16
)
815 DO_1OP(vrev32b
, 4, uint32_t, bswap32
)
816 DO_1OP(vrev32h
, 4, uint32_t, hswap32
)
817 DO_1OP(vrev64b
, 8, uint64_t, bswap64
)
818 DO_1OP(vrev64h
, 8, uint64_t, hswap64
)
819 DO_1OP(vrev64w
, 8, uint64_t, wswap64
)
821 #define DO_NOT(N) (~(N))
823 DO_1OP(vmvn
, 8, uint64_t, DO_NOT
)
825 #define DO_ABS(N) ((N) < 0 ? -(N) : (N))
826 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
827 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
829 DO_1OP(vabsb
, 1, int8_t, DO_ABS
)
830 DO_1OP(vabsh
, 2, int16_t, DO_ABS
)
831 DO_1OP(vabsw
, 4, int32_t, DO_ABS
)
833 /* We can do these 64 bits at a time */
834 DO_1OP(vfabsh
, 8, uint64_t, DO_FABSH
)
835 DO_1OP(vfabss
, 8, uint64_t, DO_FABSS
)
837 #define DO_NEG(N) (-(N))
838 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
839 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
841 DO_1OP(vnegb
, 1, int8_t, DO_NEG
)
842 DO_1OP(vnegh
, 2, int16_t, DO_NEG
)
843 DO_1OP(vnegw
, 4, int32_t, DO_NEG
)
845 /* We can do these 64 bits at a time */
846 DO_1OP(vfnegh
, 8, uint64_t, DO_FNEGH
)
847 DO_1OP(vfnegs
, 8, uint64_t, DO_FNEGS
)
850 * 1 operand immediates: Vda is destination and possibly also one source.
851 * All these insns work at 64-bit widths.
853 #define DO_1OP_IMM(OP, FN) \
854 void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
856 uint64_t *da = vda; \
857 uint16_t mask = mve_element_mask(env); \
859 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
860 mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
862 mve_advance_vpt(env); \
865 #define DO_MOVI(N, I) (I)
866 #define DO_ANDI(N, I) ((N) & (I))
867 #define DO_ORRI(N, I) ((N) | (I))
869 DO_1OP_IMM(vmovi
, DO_MOVI
)
870 DO_1OP_IMM(vandi
, DO_ANDI
)
871 DO_1OP_IMM(vorri
, DO_ORRI
)
873 #define DO_2OP(OP, ESIZE, TYPE, FN) \
874 void HELPER(glue(mve_, OP))(CPUARMState *env, \
875 void *vd, void *vn, void *vm) \
877 TYPE *d = vd, *n = vn, *m = vm; \
878 uint16_t mask = mve_element_mask(env); \
880 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
881 mergemask(&d[H##ESIZE(e)], \
882 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
884 mve_advance_vpt(env); \
887 /* provide unsigned 2-op helpers for all sizes */
888 #define DO_2OP_U(OP, FN) \
889 DO_2OP(OP##b, 1, uint8_t, FN) \
890 DO_2OP(OP##h, 2, uint16_t, FN) \
891 DO_2OP(OP##w, 4, uint32_t, FN)
893 /* provide signed 2-op helpers for all sizes */
894 #define DO_2OP_S(OP, FN) \
895 DO_2OP(OP##b, 1, int8_t, FN) \
896 DO_2OP(OP##h, 2, int16_t, FN) \
897 DO_2OP(OP##w, 4, int32_t, FN)
900 * "Long" operations where two half-sized inputs (taken from either the
901 * top or the bottom of the input vector) produce a double-width result.
902 * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
904 #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
905 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
908 TYPE *n = vn, *m = vm; \
909 uint16_t mask = mve_element_mask(env); \
911 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
912 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
913 m[H##ESIZE(le * 2 + TOP)]); \
914 mergemask(&d[H##LESIZE(le)], r, mask); \
916 mve_advance_vpt(env); \
919 #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
920 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
922 TYPE *d = vd, *n = vn, *m = vm; \
923 uint16_t mask = mve_element_mask(env); \
926 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
928 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
929 mergemask(&d[H##ESIZE(e)], r, mask); \
930 qc |= sat & mask & 1; \
933 env->vfp.qc[0] = qc; \
935 mve_advance_vpt(env); \
938 /* provide unsigned 2-op helpers for all sizes */
939 #define DO_2OP_SAT_U(OP, FN) \
940 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
941 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
942 DO_2OP_SAT(OP##w, 4, uint32_t, FN)
944 /* provide signed 2-op helpers for all sizes */
945 #define DO_2OP_SAT_S(OP, FN) \
946 DO_2OP_SAT(OP##b, 1, int8_t, FN) \
947 DO_2OP_SAT(OP##h, 2, int16_t, FN) \
948 DO_2OP_SAT(OP##w, 4, int32_t, FN)
950 #define DO_AND(N, M) ((N) & (M))
951 #define DO_BIC(N, M) ((N) & ~(M))
952 #define DO_ORR(N, M) ((N) | (M))
953 #define DO_ORN(N, M) ((N) | ~(M))
954 #define DO_EOR(N, M) ((N) ^ (M))
956 DO_2OP(vand
, 8, uint64_t, DO_AND
)
957 DO_2OP(vbic
, 8, uint64_t, DO_BIC
)
958 DO_2OP(vorr
, 8, uint64_t, DO_ORR
)
959 DO_2OP(vorn
, 8, uint64_t, DO_ORN
)
960 DO_2OP(veor
, 8, uint64_t, DO_EOR
)
962 #define DO_ADD(N, M) ((N) + (M))
963 #define DO_SUB(N, M) ((N) - (M))
964 #define DO_MUL(N, M) ((N) * (M))
966 DO_2OP_U(vadd
, DO_ADD
)
967 DO_2OP_U(vsub
, DO_SUB
)
968 DO_2OP_U(vmul
, DO_MUL
)
970 DO_2OP_L(vmullbsb
, 0, 1, int8_t, 2, int16_t, DO_MUL
)
971 DO_2OP_L(vmullbsh
, 0, 2, int16_t, 4, int32_t, DO_MUL
)
972 DO_2OP_L(vmullbsw
, 0, 4, int32_t, 8, int64_t, DO_MUL
)
973 DO_2OP_L(vmullbub
, 0, 1, uint8_t, 2, uint16_t, DO_MUL
)
974 DO_2OP_L(vmullbuh
, 0, 2, uint16_t, 4, uint32_t, DO_MUL
)
975 DO_2OP_L(vmullbuw
, 0, 4, uint32_t, 8, uint64_t, DO_MUL
)
977 DO_2OP_L(vmulltsb
, 1, 1, int8_t, 2, int16_t, DO_MUL
)
978 DO_2OP_L(vmulltsh
, 1, 2, int16_t, 4, int32_t, DO_MUL
)
979 DO_2OP_L(vmulltsw
, 1, 4, int32_t, 8, int64_t, DO_MUL
)
980 DO_2OP_L(vmulltub
, 1, 1, uint8_t, 2, uint16_t, DO_MUL
)
981 DO_2OP_L(vmulltuh
, 1, 2, uint16_t, 4, uint32_t, DO_MUL
)
982 DO_2OP_L(vmulltuw
, 1, 4, uint32_t, 8, uint64_t, DO_MUL
)
985 * Polynomial multiply. We can always do this generating 64 bits
986 * of the result at a time, so we don't need to use DO_2OP_L.
988 #define VMULLPW_MASK 0x0000ffff0000ffffULL
989 #define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK)
990 #define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16)
992 DO_2OP(vmullpbh
, 8, uint64_t, clmul_8x4_even
)
993 DO_2OP(vmullpth
, 8, uint64_t, clmul_8x4_odd
)
994 DO_2OP(vmullpbw
, 8, uint64_t, DO_VMULLPBW
)
995 DO_2OP(vmullptw
, 8, uint64_t, DO_VMULLPTW
)
998 * Because the computation type is at least twice as large as required,
999 * these work for both signed and unsigned source types.
1001 static inline uint8_t do_mulh_b(int32_t n
, int32_t m
)
1003 return (n
* m
) >> 8;
1006 static inline uint16_t do_mulh_h(int32_t n
, int32_t m
)
1008 return (n
* m
) >> 16;
1011 static inline uint32_t do_mulh_w(int64_t n
, int64_t m
)
1013 return (n
* m
) >> 32;
1016 static inline uint8_t do_rmulh_b(int32_t n
, int32_t m
)
1018 return (n
* m
+ (1U << 7)) >> 8;
1021 static inline uint16_t do_rmulh_h(int32_t n
, int32_t m
)
1023 return (n
* m
+ (1U << 15)) >> 16;
1026 static inline uint32_t do_rmulh_w(int64_t n
, int64_t m
)
1028 return (n
* m
+ (1U << 31)) >> 32;
1031 DO_2OP(vmulhsb
, 1, int8_t, do_mulh_b
)
1032 DO_2OP(vmulhsh
, 2, int16_t, do_mulh_h
)
1033 DO_2OP(vmulhsw
, 4, int32_t, do_mulh_w
)
1034 DO_2OP(vmulhub
, 1, uint8_t, do_mulh_b
)
1035 DO_2OP(vmulhuh
, 2, uint16_t, do_mulh_h
)
1036 DO_2OP(vmulhuw
, 4, uint32_t, do_mulh_w
)
1038 DO_2OP(vrmulhsb
, 1, int8_t, do_rmulh_b
)
1039 DO_2OP(vrmulhsh
, 2, int16_t, do_rmulh_h
)
1040 DO_2OP(vrmulhsw
, 4, int32_t, do_rmulh_w
)
1041 DO_2OP(vrmulhub
, 1, uint8_t, do_rmulh_b
)
1042 DO_2OP(vrmulhuh
, 2, uint16_t, do_rmulh_h
)
1043 DO_2OP(vrmulhuw
, 4, uint32_t, do_rmulh_w
)
1045 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
1046 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
1048 DO_2OP_S(vmaxs
, DO_MAX
)
1049 DO_2OP_U(vmaxu
, DO_MAX
)
1050 DO_2OP_S(vmins
, DO_MIN
)
1051 DO_2OP_U(vminu
, DO_MIN
)
1053 #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
1055 DO_2OP_S(vabds
, DO_ABD
)
1056 DO_2OP_U(vabdu
, DO_ABD
)
1058 static inline uint32_t do_vhadd_u(uint32_t n
, uint32_t m
)
1060 return ((uint64_t)n
+ m
) >> 1;
1063 static inline int32_t do_vhadd_s(int32_t n
, int32_t m
)
1065 return ((int64_t)n
+ m
) >> 1;
1068 static inline uint32_t do_vhsub_u(uint32_t n
, uint32_t m
)
1070 return ((uint64_t)n
- m
) >> 1;
1073 static inline int32_t do_vhsub_s(int32_t n
, int32_t m
)
1075 return ((int64_t)n
- m
) >> 1;
1078 DO_2OP_S(vhadds
, do_vhadd_s
)
1079 DO_2OP_U(vhaddu
, do_vhadd_u
)
1080 DO_2OP_S(vhsubs
, do_vhsub_s
)
1081 DO_2OP_U(vhsubu
, do_vhsub_u
)
1083 #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
1084 #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
1085 #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
1086 #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
1088 DO_2OP_S(vshls
, DO_VSHLS
)
1089 DO_2OP_U(vshlu
, DO_VSHLU
)
1090 DO_2OP_S(vrshls
, DO_VRSHLS
)
1091 DO_2OP_U(vrshlu
, DO_VRSHLU
)
1093 #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
1094 #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
1096 DO_2OP_S(vrhadds
, DO_RHADD_S
)
1097 DO_2OP_U(vrhaddu
, DO_RHADD_U
)
1099 static void do_vadc(CPUARMState
*env
, uint32_t *d
, uint32_t *n
, uint32_t *m
,
1100 uint32_t inv
, uint32_t carry_in
, bool update_flags
)
1102 uint16_t mask
= mve_element_mask(env
);
1105 /* If any additions trigger, we will update flags. */
1106 if (mask
& 0x1111) {
1107 update_flags
= true;
1110 for (e
= 0; e
< 16 / 4; e
++, mask
>>= 4) {
1111 uint64_t r
= carry_in
;
1113 r
+= m
[H4(e
)] ^ inv
;
1117 mergemask(&d
[H4(e
)], r
, mask
);
1121 /* Store C, clear NZV. */
1122 env
->vfp
.xregs
[ARM_VFP_FPSCR
] &= ~FPCR_NZCV_MASK
;
1123 env
->vfp
.xregs
[ARM_VFP_FPSCR
] |= carry_in
* FPCR_C
;
1125 mve_advance_vpt(env
);
1128 void HELPER(mve_vadc
)(CPUARMState
*env
, void *vd
, void *vn
, void *vm
)
1130 bool carry_in
= env
->vfp
.xregs
[ARM_VFP_FPSCR
] & FPCR_C
;
1131 do_vadc(env
, vd
, vn
, vm
, 0, carry_in
, false);
1134 void HELPER(mve_vsbc
)(CPUARMState
*env
, void *vd
, void *vn
, void *vm
)
1136 bool carry_in
= env
->vfp
.xregs
[ARM_VFP_FPSCR
] & FPCR_C
;
1137 do_vadc(env
, vd
, vn
, vm
, -1, carry_in
, false);
1141 void HELPER(mve_vadci
)(CPUARMState
*env
, void *vd
, void *vn
, void *vm
)
1143 do_vadc(env
, vd
, vn
, vm
, 0, 0, true);
1146 void HELPER(mve_vsbci
)(CPUARMState
*env
, void *vd
, void *vn
, void *vm
)
1148 do_vadc(env
, vd
, vn
, vm
, -1, 1, true);
1151 #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
1152 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
1154 TYPE *d = vd, *n = vn, *m = vm; \
1155 uint16_t mask = mve_element_mask(env); \
1157 TYPE r[16 / ESIZE]; \
1158 /* Calculate all results first to avoid overwriting inputs */ \
1159 for (e = 0; e < 16 / ESIZE; e++) { \
1161 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
1163 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
1166 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1167 mergemask(&d[H##ESIZE(e)], r[e], mask); \
1169 mve_advance_vpt(env); \
1172 #define DO_VCADD_ALL(OP, FN0, FN1) \
1173 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
1174 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
1175 DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
1177 DO_VCADD_ALL(vcadd90
, DO_SUB
, DO_ADD
)
1178 DO_VCADD_ALL(vcadd270
, DO_ADD
, DO_SUB
)
1179 DO_VCADD_ALL(vhcadd90
, do_vhsub_s
, do_vhadd_s
)
1180 DO_VCADD_ALL(vhcadd270
, do_vhadd_s
, do_vhsub_s
)
1182 static inline int32_t do_sat_bhw(int64_t val
, int64_t min
, int64_t max
, bool *s
)
1187 } else if (val
< min
) {
1194 #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
1195 #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
1196 #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
1198 #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
1199 #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
1200 #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
1202 #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
1203 #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
1204 #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
1206 #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
1207 #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
1208 #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
1211 * For QDMULH and QRDMULH we simplify "double and shift by esize" into
1212 * "shift by esize-1", adjusting the QRDMULH rounding constant to match.
1214 #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
1215 INT8_MIN, INT8_MAX, s)
1216 #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
1217 INT16_MIN, INT16_MAX, s)
1218 #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
1219 INT32_MIN, INT32_MAX, s)
1221 #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
1222 INT8_MIN, INT8_MAX, s)
1223 #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
1224 INT16_MIN, INT16_MAX, s)
1225 #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
1226 INT32_MIN, INT32_MAX, s)
1228 DO_2OP_SAT(vqdmulhb
, 1, int8_t, DO_QDMULH_B
)
1229 DO_2OP_SAT(vqdmulhh
, 2, int16_t, DO_QDMULH_H
)
1230 DO_2OP_SAT(vqdmulhw
, 4, int32_t, DO_QDMULH_W
)
1232 DO_2OP_SAT(vqrdmulhb
, 1, int8_t, DO_QRDMULH_B
)
1233 DO_2OP_SAT(vqrdmulhh
, 2, int16_t, DO_QRDMULH_H
)
1234 DO_2OP_SAT(vqrdmulhw
, 4, int32_t, DO_QRDMULH_W
)
1236 DO_2OP_SAT(vqaddub
, 1, uint8_t, DO_UQADD_B
)
1237 DO_2OP_SAT(vqadduh
, 2, uint16_t, DO_UQADD_H
)
1238 DO_2OP_SAT(vqadduw
, 4, uint32_t, DO_UQADD_W
)
1239 DO_2OP_SAT(vqaddsb
, 1, int8_t, DO_SQADD_B
)
1240 DO_2OP_SAT(vqaddsh
, 2, int16_t, DO_SQADD_H
)
1241 DO_2OP_SAT(vqaddsw
, 4, int32_t, DO_SQADD_W
)
1243 DO_2OP_SAT(vqsubub
, 1, uint8_t, DO_UQSUB_B
)
1244 DO_2OP_SAT(vqsubuh
, 2, uint16_t, DO_UQSUB_H
)
1245 DO_2OP_SAT(vqsubuw
, 4, uint32_t, DO_UQSUB_W
)
1246 DO_2OP_SAT(vqsubsb
, 1, int8_t, DO_SQSUB_B
)
1247 DO_2OP_SAT(vqsubsh
, 2, int16_t, DO_SQSUB_H
)
1248 DO_2OP_SAT(vqsubsw
, 4, int32_t, DO_SQSUB_W
)
1251 * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
1252 * and friends wanting a uint32_t* sat and our needing a bool*.
1254 #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
1256 uint32_t su32 = 0; \
1257 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
1264 #define DO_SQSHL_OP(N, M, satp) \
1265 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
1266 #define DO_UQSHL_OP(N, M, satp) \
1267 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
1268 #define DO_SQRSHL_OP(N, M, satp) \
1269 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
1270 #define DO_UQRSHL_OP(N, M, satp) \
1271 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
1272 #define DO_SUQSHL_OP(N, M, satp) \
1273 WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
1275 DO_2OP_SAT_S(vqshls
, DO_SQSHL_OP
)
1276 DO_2OP_SAT_U(vqshlu
, DO_UQSHL_OP
)
1277 DO_2OP_SAT_S(vqrshls
, DO_SQRSHL_OP
)
1278 DO_2OP_SAT_U(vqrshlu
, DO_UQRSHL_OP
)
1281 * Multiply add dual returning high half
1282 * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
1283 * whether to add the rounding constant, and the pointer to the
1284 * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
1285 * saturate to twice the input size and return the high half; or
1286 * (A * B - C * D) etc for VQDMLSDH.
1288 #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
1289 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1292 TYPE *d = vd, *n = vn, *m = vm; \
1293 uint16_t mask = mve_element_mask(env); \
1296 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1298 if ((e & 1) == XCHG) { \
1299 TYPE r = FN(n[H##ESIZE(e)], \
1300 m[H##ESIZE(e - XCHG)], \
1301 n[H##ESIZE(e + (1 - 2 * XCHG))], \
1302 m[H##ESIZE(e + (1 - XCHG))], \
1304 mergemask(&d[H##ESIZE(e)], r, mask); \
1305 qc |= sat & mask & 1; \
1309 env->vfp.qc[0] = qc; \
1311 mve_advance_vpt(env); \
1314 static int8_t do_vqdmladh_b(int8_t a
, int8_t b
, int8_t c
, int8_t d
,
1315 int round
, bool *sat
)
1317 int64_t r
= ((int64_t)a
* b
+ (int64_t)c
* d
) * 2 + (round
<< 7);
1318 return do_sat_bhw(r
, INT16_MIN
, INT16_MAX
, sat
) >> 8;
1321 static int16_t do_vqdmladh_h(int16_t a
, int16_t b
, int16_t c
, int16_t d
,
1322 int round
, bool *sat
)
1324 int64_t r
= ((int64_t)a
* b
+ (int64_t)c
* d
) * 2 + (round
<< 15);
1325 return do_sat_bhw(r
, INT32_MIN
, INT32_MAX
, sat
) >> 16;
1328 static int32_t do_vqdmladh_w(int32_t a
, int32_t b
, int32_t c
, int32_t d
,
1329 int round
, bool *sat
)
1331 int64_t m1
= (int64_t)a
* b
;
1332 int64_t m2
= (int64_t)c
* d
;
1335 * Architecturally we should do the entire add, double, round
1336 * and then check for saturation. We do three saturating adds,
1337 * but we need to be careful about the order. If the first
1338 * m1 + m2 saturates then it's impossible for the *2+rc to
1339 * bring it back into the non-saturated range. However, if
1340 * m1 + m2 is negative then it's possible that doing the doubling
1341 * would take the intermediate result below INT64_MAX and the
1342 * addition of the rounding constant then brings it back in range.
1343 * So we add half the rounding constant before doubling rather
1344 * than adding the rounding constant after the doubling.
1346 if (sadd64_overflow(m1
, m2
, &r
) ||
1347 sadd64_overflow(r
, (round
<< 30), &r
) ||
1348 sadd64_overflow(r
, r
, &r
)) {
1350 return r
< 0 ? INT32_MAX
: INT32_MIN
;
1355 static int8_t do_vqdmlsdh_b(int8_t a
, int8_t b
, int8_t c
, int8_t d
,
1356 int round
, bool *sat
)
1358 int64_t r
= ((int64_t)a
* b
- (int64_t)c
* d
) * 2 + (round
<< 7);
1359 return do_sat_bhw(r
, INT16_MIN
, INT16_MAX
, sat
) >> 8;
1362 static int16_t do_vqdmlsdh_h(int16_t a
, int16_t b
, int16_t c
, int16_t d
,
1363 int round
, bool *sat
)
1365 int64_t r
= ((int64_t)a
* b
- (int64_t)c
* d
) * 2 + (round
<< 15);
1366 return do_sat_bhw(r
, INT32_MIN
, INT32_MAX
, sat
) >> 16;
1369 static int32_t do_vqdmlsdh_w(int32_t a
, int32_t b
, int32_t c
, int32_t d
,
1370 int round
, bool *sat
)
1372 int64_t m1
= (int64_t)a
* b
;
1373 int64_t m2
= (int64_t)c
* d
;
1375 /* The same ordering issue as in do_vqdmladh_w applies here too */
1376 if (ssub64_overflow(m1
, m2
, &r
) ||
1377 sadd64_overflow(r
, (round
<< 30), &r
) ||
1378 sadd64_overflow(r
, r
, &r
)) {
1380 return r
< 0 ? INT32_MAX
: INT32_MIN
;
1385 DO_VQDMLADH_OP(vqdmladhb
, 1, int8_t, 0, 0, do_vqdmladh_b
)
1386 DO_VQDMLADH_OP(vqdmladhh
, 2, int16_t, 0, 0, do_vqdmladh_h
)
1387 DO_VQDMLADH_OP(vqdmladhw
, 4, int32_t, 0, 0, do_vqdmladh_w
)
1388 DO_VQDMLADH_OP(vqdmladhxb
, 1, int8_t, 1, 0, do_vqdmladh_b
)
1389 DO_VQDMLADH_OP(vqdmladhxh
, 2, int16_t, 1, 0, do_vqdmladh_h
)
1390 DO_VQDMLADH_OP(vqdmladhxw
, 4, int32_t, 1, 0, do_vqdmladh_w
)
1392 DO_VQDMLADH_OP(vqrdmladhb
, 1, int8_t, 0, 1, do_vqdmladh_b
)
1393 DO_VQDMLADH_OP(vqrdmladhh
, 2, int16_t, 0, 1, do_vqdmladh_h
)
1394 DO_VQDMLADH_OP(vqrdmladhw
, 4, int32_t, 0, 1, do_vqdmladh_w
)
1395 DO_VQDMLADH_OP(vqrdmladhxb
, 1, int8_t, 1, 1, do_vqdmladh_b
)
1396 DO_VQDMLADH_OP(vqrdmladhxh
, 2, int16_t, 1, 1, do_vqdmladh_h
)
1397 DO_VQDMLADH_OP(vqrdmladhxw
, 4, int32_t, 1, 1, do_vqdmladh_w
)
1399 DO_VQDMLADH_OP(vqdmlsdhb
, 1, int8_t, 0, 0, do_vqdmlsdh_b
)
1400 DO_VQDMLADH_OP(vqdmlsdhh
, 2, int16_t, 0, 0, do_vqdmlsdh_h
)
1401 DO_VQDMLADH_OP(vqdmlsdhw
, 4, int32_t, 0, 0, do_vqdmlsdh_w
)
1402 DO_VQDMLADH_OP(vqdmlsdhxb
, 1, int8_t, 1, 0, do_vqdmlsdh_b
)
1403 DO_VQDMLADH_OP(vqdmlsdhxh
, 2, int16_t, 1, 0, do_vqdmlsdh_h
)
1404 DO_VQDMLADH_OP(vqdmlsdhxw
, 4, int32_t, 1, 0, do_vqdmlsdh_w
)
1406 DO_VQDMLADH_OP(vqrdmlsdhb
, 1, int8_t, 0, 1, do_vqdmlsdh_b
)
1407 DO_VQDMLADH_OP(vqrdmlsdhh
, 2, int16_t, 0, 1, do_vqdmlsdh_h
)
1408 DO_VQDMLADH_OP(vqrdmlsdhw
, 4, int32_t, 0, 1, do_vqdmlsdh_w
)
1409 DO_VQDMLADH_OP(vqrdmlsdhxb
, 1, int8_t, 1, 1, do_vqdmlsdh_b
)
1410 DO_VQDMLADH_OP(vqrdmlsdhxh
, 2, int16_t, 1, 1, do_vqdmlsdh_h
)
1411 DO_VQDMLADH_OP(vqrdmlsdhxw
, 4, int32_t, 1, 1, do_vqdmlsdh_w
)
1413 #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
1414 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1417 TYPE *d = vd, *n = vn; \
1419 uint16_t mask = mve_element_mask(env); \
1421 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1422 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
1424 mve_advance_vpt(env); \
1427 #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
1428 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1431 TYPE *d = vd, *n = vn; \
1433 uint16_t mask = mve_element_mask(env); \
1436 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1438 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
1440 qc |= sat & mask & 1; \
1443 env->vfp.qc[0] = qc; \
1445 mve_advance_vpt(env); \
1448 /* "accumulating" version where FN takes d as well as n and m */
1449 #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
1450 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1453 TYPE *d = vd, *n = vn; \
1455 uint16_t mask = mve_element_mask(env); \
1457 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1458 mergemask(&d[H##ESIZE(e)], \
1459 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
1461 mve_advance_vpt(env); \
1464 #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
1465 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1468 TYPE *d = vd, *n = vn; \
1470 uint16_t mask = mve_element_mask(env); \
1473 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1475 mergemask(&d[H##ESIZE(e)], \
1476 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \
1478 qc |= sat & mask & 1; \
1481 env->vfp.qc[0] = qc; \
1483 mve_advance_vpt(env); \
1486 /* provide unsigned 2-op scalar helpers for all sizes */
1487 #define DO_2OP_SCALAR_U(OP, FN) \
1488 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
1489 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
1490 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
1491 #define DO_2OP_SCALAR_S(OP, FN) \
1492 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
1493 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
1494 DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
1496 #define DO_2OP_ACC_SCALAR_U(OP, FN) \
1497 DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
1498 DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
1499 DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
1501 DO_2OP_SCALAR_U(vadd_scalar
, DO_ADD
)
1502 DO_2OP_SCALAR_U(vsub_scalar
, DO_SUB
)
1503 DO_2OP_SCALAR_U(vmul_scalar
, DO_MUL
)
1504 DO_2OP_SCALAR_S(vhadds_scalar
, do_vhadd_s
)
1505 DO_2OP_SCALAR_U(vhaddu_scalar
, do_vhadd_u
)
1506 DO_2OP_SCALAR_S(vhsubs_scalar
, do_vhsub_s
)
1507 DO_2OP_SCALAR_U(vhsubu_scalar
, do_vhsub_u
)
1509 DO_2OP_SAT_SCALAR(vqaddu_scalarb
, 1, uint8_t, DO_UQADD_B
)
1510 DO_2OP_SAT_SCALAR(vqaddu_scalarh
, 2, uint16_t, DO_UQADD_H
)
1511 DO_2OP_SAT_SCALAR(vqaddu_scalarw
, 4, uint32_t, DO_UQADD_W
)
1512 DO_2OP_SAT_SCALAR(vqadds_scalarb
, 1, int8_t, DO_SQADD_B
)
1513 DO_2OP_SAT_SCALAR(vqadds_scalarh
, 2, int16_t, DO_SQADD_H
)
1514 DO_2OP_SAT_SCALAR(vqadds_scalarw
, 4, int32_t, DO_SQADD_W
)
1516 DO_2OP_SAT_SCALAR(vqsubu_scalarb
, 1, uint8_t, DO_UQSUB_B
)
1517 DO_2OP_SAT_SCALAR(vqsubu_scalarh
, 2, uint16_t, DO_UQSUB_H
)
1518 DO_2OP_SAT_SCALAR(vqsubu_scalarw
, 4, uint32_t, DO_UQSUB_W
)
1519 DO_2OP_SAT_SCALAR(vqsubs_scalarb
, 1, int8_t, DO_SQSUB_B
)
1520 DO_2OP_SAT_SCALAR(vqsubs_scalarh
, 2, int16_t, DO_SQSUB_H
)
1521 DO_2OP_SAT_SCALAR(vqsubs_scalarw
, 4, int32_t, DO_SQSUB_W
)
1523 DO_2OP_SAT_SCALAR(vqdmulh_scalarb
, 1, int8_t, DO_QDMULH_B
)
1524 DO_2OP_SAT_SCALAR(vqdmulh_scalarh
, 2, int16_t, DO_QDMULH_H
)
1525 DO_2OP_SAT_SCALAR(vqdmulh_scalarw
, 4, int32_t, DO_QDMULH_W
)
1526 DO_2OP_SAT_SCALAR(vqrdmulh_scalarb
, 1, int8_t, DO_QRDMULH_B
)
1527 DO_2OP_SAT_SCALAR(vqrdmulh_scalarh
, 2, int16_t, DO_QRDMULH_H
)
1528 DO_2OP_SAT_SCALAR(vqrdmulh_scalarw
, 4, int32_t, DO_QRDMULH_W
)
1530 static int8_t do_vqdmlah_b(int8_t a
, int8_t b
, int8_t c
, int round
, bool *sat
)
1532 int64_t r
= (int64_t)a
* b
* 2 + ((int64_t)c
<< 8) + (round
<< 7);
1533 return do_sat_bhw(r
, INT16_MIN
, INT16_MAX
, sat
) >> 8;
1536 static int16_t do_vqdmlah_h(int16_t a
, int16_t b
, int16_t c
,
1537 int round
, bool *sat
)
1539 int64_t r
= (int64_t)a
* b
* 2 + ((int64_t)c
<< 16) + (round
<< 15);
1540 return do_sat_bhw(r
, INT32_MIN
, INT32_MAX
, sat
) >> 16;
1543 static int32_t do_vqdmlah_w(int32_t a
, int32_t b
, int32_t c
,
1544 int round
, bool *sat
)
1547 * Architecturally we should do the entire add, double, round
1548 * and then check for saturation. We do three saturating adds,
1549 * but we need to be careful about the order. If the first
1550 * m1 + m2 saturates then it's impossible for the *2+rc to
1551 * bring it back into the non-saturated range. However, if
1552 * m1 + m2 is negative then it's possible that doing the doubling
1553 * would take the intermediate result below INT64_MAX and the
1554 * addition of the rounding constant then brings it back in range.
1555 * So we add half the rounding constant and half the "c << esize"
1556 * before doubling rather than adding the rounding constant after
1559 int64_t m1
= (int64_t)a
* b
;
1560 int64_t m2
= (int64_t)c
<< 31;
1562 if (sadd64_overflow(m1
, m2
, &r
) ||
1563 sadd64_overflow(r
, (round
<< 30), &r
) ||
1564 sadd64_overflow(r
, r
, &r
)) {
1566 return r
< 0 ? INT32_MAX
: INT32_MIN
;
1572 * The *MLAH insns are vector * scalar + vector;
1573 * the *MLASH insns are vector * vector + scalar
1575 #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
1576 #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
1577 #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
1578 #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
1579 #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
1580 #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
1582 #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
1583 #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
1584 #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
1585 #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
1586 #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
1587 #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
1589 DO_2OP_SAT_ACC_SCALAR(vqdmlahb
, 1, int8_t, DO_VQDMLAH_B
)
1590 DO_2OP_SAT_ACC_SCALAR(vqdmlahh
, 2, int16_t, DO_VQDMLAH_H
)
1591 DO_2OP_SAT_ACC_SCALAR(vqdmlahw
, 4, int32_t, DO_VQDMLAH_W
)
1592 DO_2OP_SAT_ACC_SCALAR(vqrdmlahb
, 1, int8_t, DO_VQRDMLAH_B
)
1593 DO_2OP_SAT_ACC_SCALAR(vqrdmlahh
, 2, int16_t, DO_VQRDMLAH_H
)
1594 DO_2OP_SAT_ACC_SCALAR(vqrdmlahw
, 4, int32_t, DO_VQRDMLAH_W
)
1596 DO_2OP_SAT_ACC_SCALAR(vqdmlashb
, 1, int8_t, DO_VQDMLASH_B
)
1597 DO_2OP_SAT_ACC_SCALAR(vqdmlashh
, 2, int16_t, DO_VQDMLASH_H
)
1598 DO_2OP_SAT_ACC_SCALAR(vqdmlashw
, 4, int32_t, DO_VQDMLASH_W
)
1599 DO_2OP_SAT_ACC_SCALAR(vqrdmlashb
, 1, int8_t, DO_VQRDMLASH_B
)
1600 DO_2OP_SAT_ACC_SCALAR(vqrdmlashh
, 2, int16_t, DO_VQRDMLASH_H
)
1601 DO_2OP_SAT_ACC_SCALAR(vqrdmlashw
, 4, int32_t, DO_VQRDMLASH_W
)
1603 /* Vector by scalar plus vector */
1604 #define DO_VMLA(D, N, M) ((N) * (M) + (D))
1606 DO_2OP_ACC_SCALAR_U(vmla
, DO_VMLA
)
1608 /* Vector by vector plus scalar */
1609 #define DO_VMLAS(D, N, M) ((N) * (D) + (M))
1611 DO_2OP_ACC_SCALAR_U(vmlas
, DO_VMLAS
)
1614 * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
1615 * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
1616 * SATMASK specifies which bits of the predicate mask matter for determining
1617 * whether to propagate a saturation indication into FPSCR.QC -- for
1618 * the 16x16->32 case we must check only the bit corresponding to the T or B
1619 * half that we used, but for the 32x32->64 case we propagate if the mask
1620 * bit is set for either half.
1622 #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
1623 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1629 uint16_t mask = mve_element_mask(env); \
1632 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1634 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
1635 mergemask(&d[H##LESIZE(le)], r, mask); \
1636 qc |= sat && (mask & SATMASK); \
1639 env->vfp.qc[0] = qc; \
1641 mve_advance_vpt(env); \
1644 static inline int32_t do_qdmullh(int16_t n
, int16_t m
, bool *sat
)
1646 int64_t r
= ((int64_t)n
* m
) * 2;
1647 return do_sat_bhw(r
, INT32_MIN
, INT32_MAX
, sat
);
1650 static inline int64_t do_qdmullw(int32_t n
, int32_t m
, bool *sat
)
1652 /* The multiply can't overflow, but the doubling might */
1653 int64_t r
= (int64_t)n
* m
;
1654 if (r
> INT64_MAX
/ 2) {
1657 } else if (r
< INT64_MIN
/ 2) {
1665 #define SATMASK16B 1
1666 #define SATMASK16T (1 << 2)
1667 #define SATMASK32 ((1 << 4) | 1)
1669 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh
, 0, 2, int16_t, 4, int32_t, \
1670 do_qdmullh
, SATMASK16B
)
1671 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw
, 0, 4, int32_t, 8, int64_t, \
1672 do_qdmullw
, SATMASK32
)
1673 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh
, 1, 2, int16_t, 4, int32_t, \
1674 do_qdmullh
, SATMASK16T
)
1675 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw
, 1, 4, int32_t, 8, int64_t, \
1676 do_qdmullw
, SATMASK32
)
1679 * Long saturating ops
1681 #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
1682 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
1686 TYPE *n = vn, *m = vm; \
1687 uint16_t mask = mve_element_mask(env); \
1690 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1692 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
1693 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
1694 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
1695 qc |= sat && (mask & SATMASK); \
1698 env->vfp.qc[0] = qc; \
1700 mve_advance_vpt(env); \
1703 DO_2OP_SAT_L(vqdmullbh
, 0, 2, int16_t, 4, int32_t, do_qdmullh
, SATMASK16B
)
1704 DO_2OP_SAT_L(vqdmullbw
, 0, 4, int32_t, 8, int64_t, do_qdmullw
, SATMASK32
)
1705 DO_2OP_SAT_L(vqdmullth
, 1, 2, int16_t, 4, int32_t, do_qdmullh
, SATMASK16T
)
1706 DO_2OP_SAT_L(vqdmulltw
, 1, 4, int32_t, 8, int64_t, do_qdmullw
, SATMASK32
)
1708 static inline uint32_t do_vbrsrb(uint32_t n
, uint32_t m
)
1721 static inline uint32_t do_vbrsrh(uint32_t n
, uint32_t m
)
1734 static inline uint32_t do_vbrsrw(uint32_t n
, uint32_t m
)
1747 DO_2OP_SCALAR(vbrsrb
, 1, uint8_t, do_vbrsrb
)
1748 DO_2OP_SCALAR(vbrsrh
, 2, uint16_t, do_vbrsrh
)
1749 DO_2OP_SCALAR(vbrsrw
, 4, uint32_t, do_vbrsrw
)
1752 * Multiply add long dual accumulate ops.
1754 #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1755 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1756 void *vm, uint64_t a) \
1758 uint16_t mask = mve_element_mask(env); \
1760 TYPE *n = vn, *m = vm; \
1761 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1765 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1768 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1772 mve_advance_vpt(env); \
1776 DO_LDAV(vmlaldavsh
, 2, int16_t, false, +=, +=)
1777 DO_LDAV(vmlaldavxsh
, 2, int16_t, true, +=, +=)
1778 DO_LDAV(vmlaldavsw
, 4, int32_t, false, +=, +=)
1779 DO_LDAV(vmlaldavxsw
, 4, int32_t, true, +=, +=)
1781 DO_LDAV(vmlaldavuh
, 2, uint16_t, false, +=, +=)
1782 DO_LDAV(vmlaldavuw
, 4, uint32_t, false, +=, +=)
1784 DO_LDAV(vmlsldavsh
, 2, int16_t, false, +=, -=)
1785 DO_LDAV(vmlsldavxsh
, 2, int16_t, true, +=, -=)
1786 DO_LDAV(vmlsldavsw
, 4, int32_t, false, +=, -=)
1787 DO_LDAV(vmlsldavxsw
, 4, int32_t, true, +=, -=)
1790 * Multiply add dual accumulate ops
1792 #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1793 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1794 void *vm, uint32_t a) \
1796 uint16_t mask = mve_element_mask(env); \
1798 TYPE *n = vn, *m = vm; \
1799 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1803 n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1806 n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1810 mve_advance_vpt(env); \
1814 #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
1815 DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
1816 DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
1817 DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
1819 #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
1820 DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
1821 DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
1822 DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
1824 DO_DAV_S(vmladavs
, false, +=, +=)
1825 DO_DAV_U(vmladavu
, false, +=, +=)
1826 DO_DAV_S(vmlsdav
, false, +=, -=)
1827 DO_DAV_S(vmladavsx
, true, +=, +=)
1828 DO_DAV_S(vmlsdavx
, true, +=, -=)
1831 * Rounding multiply add long dual accumulate high. In the pseudocode
1832 * this is implemented with a 72-bit internal accumulator value of which
1833 * the top 64 bits are returned. We optimize this to avoid having to
1834 * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
1835 * is squashed back into 64-bits after each beat.
1837 #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
1838 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1839 void *vm, uint64_t a) \
1841 uint16_t mask = mve_element_mask(env); \
1843 TYPE *n = vn, *m = vm; \
1844 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1848 mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
1853 mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
1855 mul = (mul >> 8) + ((mul >> 7) & 1); \
1859 mve_advance_vpt(env); \
1863 DO_LDAVH(vrmlaldavhsw
, int32_t, int64_t, false, false)
1864 DO_LDAVH(vrmlaldavhxsw
, int32_t, int64_t, true, false)
1866 DO_LDAVH(vrmlaldavhuw
, uint32_t, uint64_t, false, false)
1868 DO_LDAVH(vrmlsldavhsw
, int32_t, int64_t, false, true)
1869 DO_LDAVH(vrmlsldavhxsw
, int32_t, int64_t, true, true)
1871 /* Vector add across vector */
1872 #define DO_VADDV(OP, ESIZE, TYPE) \
1873 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1876 uint16_t mask = mve_element_mask(env); \
1879 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1881 ra += m[H##ESIZE(e)]; \
1884 mve_advance_vpt(env); \
1888 DO_VADDV(vaddvsb, 1, int8_t)
1889 DO_VADDV(vaddvsh
, 2, int16_t)
1890 DO_VADDV(vaddvsw
, 4, int32_t)
1891 DO_VADDV(vaddvub
, 1, uint8_t)
1892 DO_VADDV(vaddvuh
, 2, uint16_t)
1893 DO_VADDV(vaddvuw
, 4, uint32_t)
1896 * Vector max/min across vector. Unlike VADDV, we must
1897 * read ra as the element size, not its full width.
1898 * We work with int64_t internally for simplicity.
1900 #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \
1901 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1904 uint16_t mask = mve_element_mask(env); \
1907 int64_t ra = (RATYPE)ra_in; \
1908 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1910 ra = FN(ra, m[H##ESIZE(e)]); \
1913 mve_advance_vpt(env); \
1917 #define DO_VMAXMINV_U(INSN, FN) \
1918 DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \
1919 DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \
1920 DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN)
1921 #define DO_VMAXMINV_S(INSN, FN) \
1922 DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \
1923 DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \
1924 DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN)
1927 * Helpers for max and min of absolute values across vector:
1928 * note that we only take the absolute value of 'm', not 'n'
1930 static int64_t do_maxa(int64_t n
, int64_t m
)
1938 static int64_t do_mina(int64_t n
, int64_t m
)
1946 DO_VMAXMINV_S(vmaxvs
, DO_MAX
)
1947 DO_VMAXMINV_U(vmaxvu
, DO_MAX
)
1948 DO_VMAXMINV_S(vminvs
, DO_MIN
)
1949 DO_VMAXMINV_U(vminvu
, DO_MIN
)
1951 * VMAXAV, VMINAV treat the general purpose input as unsigned
1952 * and the vector elements as signed.
1954 DO_VMAXMINV(vmaxavb
, 1, int8_t, uint8_t, do_maxa
)
1955 DO_VMAXMINV(vmaxavh
, 2, int16_t, uint16_t, do_maxa
)
1956 DO_VMAXMINV(vmaxavw
, 4, int32_t, uint32_t, do_maxa
)
1957 DO_VMAXMINV(vminavb
, 1, int8_t, uint8_t, do_mina
)
1958 DO_VMAXMINV(vminavh
, 2, int16_t, uint16_t, do_mina
)
1959 DO_VMAXMINV(vminavw
, 4, int32_t, uint32_t, do_mina
)
1961 #define DO_VABAV(OP, ESIZE, TYPE) \
1962 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1963 void *vm, uint32_t ra) \
1965 uint16_t mask = mve_element_mask(env); \
1967 TYPE *m = vm, *n = vn; \
1968 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1970 int64_t n0 = n[H##ESIZE(e)]; \
1971 int64_t m0 = m[H##ESIZE(e)]; \
1972 uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
1976 mve_advance_vpt(env); \
1980 DO_VABAV(vabavsb
, 1, int8_t)
1981 DO_VABAV(vabavsh
, 2, int16_t)
1982 DO_VABAV(vabavsw
, 4, int32_t)
1983 DO_VABAV(vabavub
, 1, uint8_t)
1984 DO_VABAV(vabavuh
, 2, uint16_t)
1985 DO_VABAV(vabavuw
, 4, uint32_t)
1987 #define DO_VADDLV(OP, TYPE, LTYPE) \
1988 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1991 uint16_t mask = mve_element_mask(env); \
1994 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
1996 ra += (LTYPE)m[H4(e)]; \
1999 mve_advance_vpt(env); \
2003 DO_VADDLV(vaddlv_s, int32_t, int64_t)
2004 DO_VADDLV(vaddlv_u
, uint32_t, uint64_t)
2006 /* Shifts by immediate */
2007 #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
2008 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2009 void *vm, uint32_t shift) \
2011 TYPE *d = vd, *m = vm; \
2012 uint16_t mask = mve_element_mask(env); \
2014 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2015 mergemask(&d[H##ESIZE(e)], \
2016 FN(m[H##ESIZE(e)], shift), mask); \
2018 mve_advance_vpt(env); \
2021 #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
2022 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2023 void *vm, uint32_t shift) \
2025 TYPE *d = vd, *m = vm; \
2026 uint16_t mask = mve_element_mask(env); \
2029 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2031 mergemask(&d[H##ESIZE(e)], \
2032 FN(m[H##ESIZE(e)], shift, &sat), mask); \
2033 qc |= sat & mask & 1; \
2036 env->vfp.qc[0] = qc; \
2038 mve_advance_vpt(env); \
2041 /* provide unsigned 2-op shift helpers for all sizes */
2042 #define DO_2SHIFT_U(OP, FN) \
2043 DO_2SHIFT(OP##b, 1, uint8_t, FN) \
2044 DO_2SHIFT(OP##h, 2, uint16_t, FN) \
2045 DO_2SHIFT(OP##w, 4, uint32_t, FN)
2046 #define DO_2SHIFT_S(OP, FN) \
2047 DO_2SHIFT(OP##b, 1, int8_t, FN) \
2048 DO_2SHIFT(OP##h, 2, int16_t, FN) \
2049 DO_2SHIFT(OP##w, 4, int32_t, FN)
2051 #define DO_2SHIFT_SAT_U(OP, FN) \
2052 DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
2053 DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
2054 DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
2055 #define DO_2SHIFT_SAT_S(OP, FN) \
2056 DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
2057 DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
2058 DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
2060 DO_2SHIFT_U(vshli_u
, DO_VSHLU
)
2061 DO_2SHIFT_S(vshli_s
, DO_VSHLS
)
2062 DO_2SHIFT_SAT_U(vqshli_u
, DO_UQSHL_OP
)
2063 DO_2SHIFT_SAT_S(vqshli_s
, DO_SQSHL_OP
)
2064 DO_2SHIFT_SAT_S(vqshlui_s
, DO_SUQSHL_OP
)
2065 DO_2SHIFT_U(vrshli_u
, DO_VRSHLU
)
2066 DO_2SHIFT_S(vrshli_s
, DO_VRSHLS
)
2067 DO_2SHIFT_SAT_U(vqrshli_u
, DO_UQRSHL_OP
)
2068 DO_2SHIFT_SAT_S(vqrshli_s
, DO_SQRSHL_OP
)
2070 /* Shift-and-insert; we always work with 64 bits at a time */
2071 #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
2072 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2073 void *vm, uint32_t shift) \
2075 uint64_t *d = vd, *m = vm; \
2077 uint64_t shiftmask; \
2079 if (shift == ESIZE * 8) { \
2081 * Only VSRI can shift by <dt>; it should mean "don't \
2082 * update the destination". The generic logic can't handle \
2083 * this because it would try to shift by an out-of-range \
2084 * amount, so special case it here. \
2088 assert(shift < ESIZE * 8); \
2089 mask = mve_element_mask(env); \
2090 /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
2091 shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
2092 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
2093 uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
2094 (d[H8(e)] & ~shiftmask); \
2095 mergemask(&d[H8(e)], r, mask); \
2098 mve_advance_vpt(env); \
2101 #define DO_SHL(N, SHIFT) ((N) << (SHIFT))
2102 #define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
2103 #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
2104 #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
2106 DO_2SHIFT_INSERT(vsrib
, 1, DO_SHR
, SHR_MASK
)
2107 DO_2SHIFT_INSERT(vsrih
, 2, DO_SHR
, SHR_MASK
)
2108 DO_2SHIFT_INSERT(vsriw
, 4, DO_SHR
, SHR_MASK
)
2109 DO_2SHIFT_INSERT(vslib
, 1, DO_SHL
, SHL_MASK
)
2110 DO_2SHIFT_INSERT(vslih
, 2, DO_SHL
, SHL_MASK
)
2111 DO_2SHIFT_INSERT(vsliw
, 4, DO_SHL
, SHL_MASK
)
2114 * Long shifts taking half-sized inputs from top or bottom of the input
2115 * vector and producing a double-width result. ESIZE, TYPE are for
2116 * the input, and LESIZE, LTYPE for the output.
2117 * Unlike the normal shift helpers, we do not handle negative shift counts,
2118 * because the long shift is strictly left-only.
2120 #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
2121 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2122 void *vm, uint32_t shift) \
2126 uint16_t mask = mve_element_mask(env); \
2128 assert(shift <= 16); \
2129 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2130 LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
2131 mergemask(&d[H##LESIZE(le)], r, mask); \
2133 mve_advance_vpt(env); \
2136 #define DO_VSHLL_ALL(OP, TOP) \
2137 DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
2138 DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
2139 DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
2140 DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
2142 DO_VSHLL_ALL(vshllb, false)
2143 DO_VSHLL_ALL(vshllt
, true)
2146 * Narrowing right shifts, taking a double sized input, shifting it
2147 * and putting the result in either the top or bottom half of the output.
2148 * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
2150 #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2151 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2152 void *vm, uint32_t shift) \
2156 uint16_t mask = mve_element_mask(env); \
2158 mask >>= ESIZE * TOP; \
2159 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2160 TYPE r = FN(m[H##LESIZE(le)], shift); \
2161 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2163 mve_advance_vpt(env); \
2166 #define DO_VSHRN_ALL(OP, FN) \
2167 DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
2168 DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
2169 DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
2170 DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
2172 static inline uint64_t do_urshr(uint64_t x
, unsigned sh
)
2174 if (likely(sh
< 64)) {
2175 return (x
>> sh
) + ((x
>> (sh
- 1)) & 1);
2176 } else if (sh
== 64) {
2183 static inline int64_t do_srshr(int64_t x
, unsigned sh
)
2185 if (likely(sh
< 64)) {
2186 return (x
>> sh
) + ((x
>> (sh
- 1)) & 1);
2188 /* Rounding the sign bit always produces 0. */
2193 DO_VSHRN_ALL(vshrn
, DO_SHR
)
2194 DO_VSHRN_ALL(vrshrn
, do_urshr
)
2196 static inline int32_t do_sat_bhs(int64_t val
, int64_t min
, int64_t max
,
2202 } else if (val
< min
) {
2210 /* Saturating narrowing right shifts */
2211 #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2212 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
2213 void *vm, uint32_t shift) \
2217 uint16_t mask = mve_element_mask(env); \
2220 mask >>= ESIZE * TOP; \
2221 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2223 TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
2224 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2225 qc |= sat & mask & 1; \
2228 env->vfp.qc[0] = qc; \
2230 mve_advance_vpt(env); \
2233 #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
2234 DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
2235 DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
2237 #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
2238 DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
2239 DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
2241 #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
2242 DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
2243 DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
2245 #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
2246 DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
2247 DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
2249 #define DO_SHRN_SB(N, M, SATP) \
2250 do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
2251 #define DO_SHRN_UB(N, M, SATP) \
2252 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
2253 #define DO_SHRUN_B(N, M, SATP) \
2254 do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
2256 #define DO_SHRN_SH(N, M, SATP) \
2257 do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
2258 #define DO_SHRN_UH(N, M, SATP) \
2259 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
2260 #define DO_SHRUN_H(N, M, SATP) \
2261 do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
2263 #define DO_RSHRN_SB(N, M, SATP) \
2264 do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
2265 #define DO_RSHRN_UB(N, M, SATP) \
2266 do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
2267 #define DO_RSHRUN_B(N, M, SATP) \
2268 do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
2270 #define DO_RSHRN_SH(N, M, SATP) \
2271 do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
2272 #define DO_RSHRN_UH(N, M, SATP) \
2273 do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
2274 #define DO_RSHRUN_H(N, M, SATP) \
2275 do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
2277 DO_VSHRN_SAT_SB(vqshrnb_sb
, vqshrnt_sb
, DO_SHRN_SB
)
2278 DO_VSHRN_SAT_SH(vqshrnb_sh
, vqshrnt_sh
, DO_SHRN_SH
)
2279 DO_VSHRN_SAT_UB(vqshrnb_ub
, vqshrnt_ub
, DO_SHRN_UB
)
2280 DO_VSHRN_SAT_UH(vqshrnb_uh
, vqshrnt_uh
, DO_SHRN_UH
)
2281 DO_VSHRN_SAT_SB(vqshrunbb
, vqshruntb
, DO_SHRUN_B
)
2282 DO_VSHRN_SAT_SH(vqshrunbh
, vqshrunth
, DO_SHRUN_H
)
2284 DO_VSHRN_SAT_SB(vqrshrnb_sb
, vqrshrnt_sb
, DO_RSHRN_SB
)
2285 DO_VSHRN_SAT_SH(vqrshrnb_sh
, vqrshrnt_sh
, DO_RSHRN_SH
)
2286 DO_VSHRN_SAT_UB(vqrshrnb_ub
, vqrshrnt_ub
, DO_RSHRN_UB
)
2287 DO_VSHRN_SAT_UH(vqrshrnb_uh
, vqrshrnt_uh
, DO_RSHRN_UH
)
2288 DO_VSHRN_SAT_SB(vqrshrunbb
, vqrshruntb
, DO_RSHRUN_B
)
2289 DO_VSHRN_SAT_SH(vqrshrunbh
, vqrshrunth
, DO_RSHRUN_H
)
2291 #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
2292 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2296 uint16_t mask = mve_element_mask(env); \
2298 mask >>= ESIZE * TOP; \
2299 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2300 mergemask(&d[H##ESIZE(le * 2 + TOP)], \
2301 m[H##LESIZE(le)], mask); \
2303 mve_advance_vpt(env); \
2306 DO_VMOVN(vmovnbb
, false, 1, uint8_t, 2, uint16_t)
2307 DO_VMOVN(vmovnbh
, false, 2, uint16_t, 4, uint32_t)
2308 DO_VMOVN(vmovntb
, true, 1, uint8_t, 2, uint16_t)
2309 DO_VMOVN(vmovnth
, true, 2, uint16_t, 4, uint32_t)
2311 #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
2312 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2316 uint16_t mask = mve_element_mask(env); \
2319 mask >>= ESIZE * TOP; \
2320 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
2322 TYPE r = FN(m[H##LESIZE(le)], &sat); \
2323 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
2324 qc |= sat & mask & 1; \
2327 env->vfp.qc[0] = qc; \
2329 mve_advance_vpt(env); \
2332 #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \
2333 DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
2334 DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
2336 #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \
2337 DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
2338 DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
2340 #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \
2341 DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
2342 DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
2344 #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \
2345 DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
2346 DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
2348 #define DO_VQMOVN_SB(N, SATP) \
2349 do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP)
2350 #define DO_VQMOVN_UB(N, SATP) \
2351 do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP)
2352 #define DO_VQMOVUN_B(N, SATP) \
2353 do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP)
2355 #define DO_VQMOVN_SH(N, SATP) \
2356 do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP)
2357 #define DO_VQMOVN_UH(N, SATP) \
2358 do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP)
2359 #define DO_VQMOVUN_H(N, SATP) \
2360 do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP)
2362 DO_VMOVN_SAT_SB(vqmovnbsb
, vqmovntsb
, DO_VQMOVN_SB
)
2363 DO_VMOVN_SAT_SH(vqmovnbsh
, vqmovntsh
, DO_VQMOVN_SH
)
2364 DO_VMOVN_SAT_UB(vqmovnbub
, vqmovntub
, DO_VQMOVN_UB
)
2365 DO_VMOVN_SAT_UH(vqmovnbuh
, vqmovntuh
, DO_VQMOVN_UH
)
2366 DO_VMOVN_SAT_SB(vqmovunbb
, vqmovuntb
, DO_VQMOVUN_B
)
2367 DO_VMOVN_SAT_SH(vqmovunbh
, vqmovunth
, DO_VQMOVUN_H
)
2369 uint32_t HELPER(mve_vshlc
)(CPUARMState
*env
, void *vd
, uint32_t rdm
,
2373 uint16_t mask
= mve_element_mask(env
);
2378 * For each 32-bit element, we shift it left, bringing in the
2379 * low 'shift' bits of rdm at the bottom. Bits shifted out at
2380 * the top become the new rdm, if the predicate mask permits.
2381 * The final rdm value is returned to update the register.
2382 * shift == 0 here means "shift by 32 bits".
2385 for (e
= 0; e
< 16 / 4; e
++, mask
>>= 4) {
2390 mergemask(&d
[H4(e
)], r
, mask
);
2393 uint32_t shiftmask
= MAKE_64BIT_MASK(0, shift
);
2395 for (e
= 0; e
< 16 / 4; e
++, mask
>>= 4) {
2396 r
= (d
[H4(e
)] << shift
) | (rdm
& shiftmask
);
2398 rdm
= d
[H4(e
)] >> (32 - shift
);
2400 mergemask(&d
[H4(e
)], r
, mask
);
2403 mve_advance_vpt(env
);
2407 uint64_t HELPER(mve_sshrl
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2409 return do_sqrshl_d(n
, -(int8_t)shift
, false, NULL
);
2412 uint64_t HELPER(mve_ushll
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2414 return do_uqrshl_d(n
, (int8_t)shift
, false, NULL
);
2417 uint64_t HELPER(mve_sqshll
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2419 return do_sqrshl_d(n
, (int8_t)shift
, false, &env
->QF
);
2422 uint64_t HELPER(mve_uqshll
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2424 return do_uqrshl_d(n
, (int8_t)shift
, false, &env
->QF
);
2427 uint64_t HELPER(mve_sqrshrl
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2429 return do_sqrshl_d(n
, -(int8_t)shift
, true, &env
->QF
);
2432 uint64_t HELPER(mve_uqrshll
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2434 return do_uqrshl_d(n
, (int8_t)shift
, true, &env
->QF
);
2437 /* Operate on 64-bit values, but saturate at 48 bits */
2438 static inline int64_t do_sqrshl48_d(int64_t src
, int64_t shift
,
2439 bool round
, uint32_t *sat
)
2441 int64_t val
, extval
;
2444 /* Rounding the sign bit always produces 0. */
2449 } else if (shift
< 0) {
2452 val
= (src
>> 1) + (src
& 1);
2454 val
= src
>> -shift
;
2456 extval
= sextract64(val
, 0, 48);
2457 if (!sat
|| val
== extval
) {
2460 } else if (shift
< 48) {
2461 int64_t extval
= sextract64(src
<< shift
, 0, 48);
2462 if (!sat
|| src
== (extval
>> shift
)) {
2465 } else if (!sat
|| src
== 0) {
2470 return src
>= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17);
2473 /* Operate on 64-bit values, but saturate at 48 bits */
2474 static inline uint64_t do_uqrshl48_d(uint64_t src
, int64_t shift
,
2475 bool round
, uint32_t *sat
)
2477 uint64_t val
, extval
;
2479 if (shift
<= -(48 + round
)) {
2481 } else if (shift
< 0) {
2483 val
= src
>> (-shift
- 1);
2484 val
= (val
>> 1) + (val
& 1);
2486 val
= src
>> -shift
;
2488 extval
= extract64(val
, 0, 48);
2489 if (!sat
|| val
== extval
) {
2492 } else if (shift
< 48) {
2493 uint64_t extval
= extract64(src
<< shift
, 0, 48);
2494 if (!sat
|| src
== (extval
>> shift
)) {
2497 } else if (!sat
|| src
== 0) {
2502 return MAKE_64BIT_MASK(0, 48);
2505 uint64_t HELPER(mve_sqrshrl48
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2507 return do_sqrshl48_d(n
, -(int8_t)shift
, true, &env
->QF
);
2510 uint64_t HELPER(mve_uqrshll48
)(CPUARMState
*env
, uint64_t n
, uint32_t shift
)
2512 return do_uqrshl48_d(n
, (int8_t)shift
, true, &env
->QF
);
2515 uint32_t HELPER(mve_uqshl
)(CPUARMState
*env
, uint32_t n
, uint32_t shift
)
2517 return do_uqrshl_bhs(n
, (int8_t)shift
, 32, false, &env
->QF
);
2520 uint32_t HELPER(mve_sqshl
)(CPUARMState
*env
, uint32_t n
, uint32_t shift
)
2522 return do_sqrshl_bhs(n
, (int8_t)shift
, 32, false, &env
->QF
);
2525 uint32_t HELPER(mve_uqrshl
)(CPUARMState
*env
, uint32_t n
, uint32_t shift
)
2527 return do_uqrshl_bhs(n
, (int8_t)shift
, 32, true, &env
->QF
);
2530 uint32_t HELPER(mve_sqrshr
)(CPUARMState
*env
, uint32_t n
, uint32_t shift
)
2532 return do_sqrshl_bhs(n
, -(int8_t)shift
, 32, true, &env
->QF
);
2535 #define DO_VIDUP(OP, ESIZE, TYPE, FN) \
2536 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
2537 uint32_t offset, uint32_t imm) \
2540 uint16_t mask = mve_element_mask(env); \
2542 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2543 mergemask(&d[H##ESIZE(e)], offset, mask); \
2544 offset = FN(offset, imm); \
2546 mve_advance_vpt(env); \
2550 #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
2551 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
2552 uint32_t offset, uint32_t wrap, \
2556 uint16_t mask = mve_element_mask(env); \
2558 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2559 mergemask(&d[H##ESIZE(e)], offset, mask); \
2560 offset = FN(offset, wrap, imm); \
2562 mve_advance_vpt(env); \
2566 #define DO_VIDUP_ALL(OP, FN) \
2567 DO_VIDUP(OP##b, 1, int8_t, FN) \
2568 DO_VIDUP(OP##h, 2, int16_t, FN) \
2569 DO_VIDUP(OP##w, 4, int32_t, FN)
2571 #define DO_VIWDUP_ALL(OP, FN) \
2572 DO_VIWDUP(OP##b, 1, int8_t, FN) \
2573 DO_VIWDUP(OP##h, 2, int16_t, FN) \
2574 DO_VIWDUP(OP##w, 4, int32_t, FN)
2576 static uint32_t do_add_wrap(uint32_t offset
, uint32_t wrap
, uint32_t imm
)
2579 if (offset
== wrap
) {
2585 static uint32_t do_sub_wrap(uint32_t offset
, uint32_t wrap
, uint32_t imm
)
2594 DO_VIDUP_ALL(vidup
, DO_ADD
)
2595 DO_VIWDUP_ALL(viwdup
, do_add_wrap
)
2596 DO_VIWDUP_ALL(vdwdup
, do_sub_wrap
)
2599 * Vector comparison.
2600 * P0 bits for non-executed beats (where eci_mask is 0) are unchanged.
2601 * P0 bits for predicated lanes in executed beats (where mask is 0) are 0.
2602 * P0 bits otherwise are updated with the results of the comparisons.
2603 * We must also keep unchanged the MASK fields at the top of v7m.vpr.
2605 #define DO_VCMP(OP, ESIZE, TYPE, FN) \
2606 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
2608 TYPE *n = vn, *m = vm; \
2609 uint16_t mask = mve_element_mask(env); \
2610 uint16_t eci_mask = mve_eci_mask(env); \
2611 uint16_t beatpred = 0; \
2612 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
2614 for (e = 0; e < 16 / ESIZE; e++) { \
2615 bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \
2616 /* Comparison sets 0/1 bits for each byte in the element */ \
2617 beatpred |= r * emask; \
2621 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
2622 (beatpred & eci_mask); \
2623 mve_advance_vpt(env); \
2626 #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \
2627 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
2631 uint16_t mask = mve_element_mask(env); \
2632 uint16_t eci_mask = mve_eci_mask(env); \
2633 uint16_t beatpred = 0; \
2634 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
2636 for (e = 0; e < 16 / ESIZE; e++) { \
2637 bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \
2638 /* Comparison sets 0/1 bits for each byte in the element */ \
2639 beatpred |= r * emask; \
2643 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
2644 (beatpred & eci_mask); \
2645 mve_advance_vpt(env); \
2648 #define DO_VCMP_S(OP, FN) \
2649 DO_VCMP(OP##b, 1, int8_t, FN) \
2650 DO_VCMP(OP##h, 2, int16_t, FN) \
2651 DO_VCMP(OP##w, 4, int32_t, FN) \
2652 DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \
2653 DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \
2654 DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN)
2656 #define DO_VCMP_U(OP, FN) \
2657 DO_VCMP(OP##b, 1, uint8_t, FN) \
2658 DO_VCMP(OP##h, 2, uint16_t, FN) \
2659 DO_VCMP(OP##w, 4, uint32_t, FN) \
2660 DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \
2661 DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \
2662 DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN)
2664 #define DO_EQ(N, M) ((N) == (M))
2665 #define DO_NE(N, M) ((N) != (M))
2666 #define DO_EQ(N, M) ((N) == (M))
2667 #define DO_EQ(N, M) ((N) == (M))
2668 #define DO_GE(N, M) ((N) >= (M))
2669 #define DO_LT(N, M) ((N) < (M))
2670 #define DO_GT(N, M) ((N) > (M))
2671 #define DO_LE(N, M) ((N) <= (M))
2673 DO_VCMP_U(vcmpeq
, DO_EQ
)
2674 DO_VCMP_U(vcmpne
, DO_NE
)
2675 DO_VCMP_U(vcmpcs
, DO_GE
)
2676 DO_VCMP_U(vcmphi
, DO_GT
)
2677 DO_VCMP_S(vcmpge
, DO_GE
)
2678 DO_VCMP_S(vcmplt
, DO_LT
)
2679 DO_VCMP_S(vcmpgt
, DO_GT
)
2680 DO_VCMP_S(vcmple
, DO_LE
)
2682 void HELPER(mve_vpsel
)(CPUARMState
*env
, void *vd
, void *vn
, void *vm
)
2685 * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n]
2686 * but note that whether bytes are written to Qd is still subject
2687 * to (all forms of) predication in the usual way.
2689 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
2690 uint16_t mask
= mve_element_mask(env
);
2691 uint16_t p0
= FIELD_EX32(env
->v7m
.vpr
, V7M_VPR
, P0
);
2693 for (e
= 0; e
< 16 / 8; e
++, mask
>>= 8, p0
>>= 8) {
2694 uint64_t r
= m
[H8(e
)];
2695 mergemask(&r
, n
[H8(e
)], p0
);
2696 mergemask(&d
[H8(e
)], r
, mask
);
2698 mve_advance_vpt(env
);
2701 void HELPER(mve_vpnot
)(CPUARMState
*env
)
2704 * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged.
2705 * P0 bits for predicated lanes in executed bits (where mask is 0) are 0.
2706 * P0 bits otherwise are inverted.
2707 * (This is the same logic as VCMP.)
2708 * This insn is itself subject to predication and to beat-wise execution,
2709 * and after it executes VPT state advances in the usual way.
2711 uint16_t mask
= mve_element_mask(env
);
2712 uint16_t eci_mask
= mve_eci_mask(env
);
2713 uint16_t beatpred
= ~env
->v7m
.vpr
& mask
;
2714 env
->v7m
.vpr
= (env
->v7m
.vpr
& ~(uint32_t)eci_mask
) | (beatpred
& eci_mask
);
2715 mve_advance_vpt(env
);
2719 * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed,
2720 * otherwise set according to value of Rn. The calculation of
2721 * newmask here works in the same way as the calculation of the
2722 * ltpmask in mve_element_mask(), but we have pre-calculated
2723 * the masklen in the generated code.
2725 void HELPER(mve_vctp
)(CPUARMState
*env
, uint32_t masklen
)
2727 uint16_t mask
= mve_element_mask(env
);
2728 uint16_t eci_mask
= mve_eci_mask(env
);
2731 assert(masklen
<= 16);
2732 newmask
= masklen
? MAKE_64BIT_MASK(0, masklen
) : 0;
2734 env
->v7m
.vpr
= (env
->v7m
.vpr
& ~(uint32_t)eci_mask
) | (newmask
& eci_mask
);
2735 mve_advance_vpt(env
);
2738 #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
2739 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2741 TYPE *d = vd, *m = vm; \
2742 uint16_t mask = mve_element_mask(env); \
2745 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2747 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \
2748 qc |= sat & mask & 1; \
2751 env->vfp.qc[0] = qc; \
2753 mve_advance_vpt(env); \
2756 #define DO_VQABS_B(N, SATP) \
2757 do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP)
2758 #define DO_VQABS_H(N, SATP) \
2759 do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP)
2760 #define DO_VQABS_W(N, SATP) \
2761 do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP)
2763 #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP)
2764 #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP)
2765 #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP)
2767 DO_1OP_SAT(vqabsb
, 1, int8_t, DO_VQABS_B
)
2768 DO_1OP_SAT(vqabsh
, 2, int16_t, DO_VQABS_H
)
2769 DO_1OP_SAT(vqabsw
, 4, int32_t, DO_VQABS_W
)
2771 DO_1OP_SAT(vqnegb
, 1, int8_t, DO_VQNEG_B
)
2772 DO_1OP_SAT(vqnegh
, 2, int16_t, DO_VQNEG_H
)
2773 DO_1OP_SAT(vqnegw
, 4, int32_t, DO_VQNEG_W
)
2776 * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its
2777 * absolute value; we then do an unsigned comparison.
2779 #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \
2780 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
2784 uint16_t mask = mve_element_mask(env); \
2786 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2787 UTYPE r = DO_ABS(m[H##ESIZE(e)]); \
2788 r = FN(d[H##ESIZE(e)], r); \
2789 mergemask(&d[H##ESIZE(e)], r, mask); \
2791 mve_advance_vpt(env); \
2794 DO_VMAXMINA(vmaxab
, 1, int8_t, uint8_t, DO_MAX
)
2795 DO_VMAXMINA(vmaxah
, 2, int16_t, uint16_t, DO_MAX
)
2796 DO_VMAXMINA(vmaxaw
, 4, int32_t, uint32_t, DO_MAX
)
2797 DO_VMAXMINA(vminab
, 1, int8_t, uint8_t, DO_MIN
)
2798 DO_VMAXMINA(vminah
, 2, int16_t, uint16_t, DO_MIN
)
2799 DO_VMAXMINA(vminaw
, 4, int32_t, uint32_t, DO_MIN
)
2802 * 2-operand floating point. Note that if an element is partially
2803 * predicated we must do the FP operation to update the non-predicated
2804 * bytes, but we must be careful to avoid updating the FP exception
2805 * state unless byte 0 of the element was unpredicated.
2807 #define DO_2OP_FP(OP, ESIZE, TYPE, FN) \
2808 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2809 void *vd, void *vn, void *vm) \
2811 TYPE *d = vd, *n = vn, *m = vm; \
2813 uint16_t mask = mve_element_mask(env); \
2815 float_status *fpst; \
2816 float_status scratch_fpst; \
2817 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2818 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2821 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2822 &env->vfp.standard_fp_status; \
2823 if (!(mask & 1)) { \
2824 /* We need the result but without updating flags */ \
2825 scratch_fpst = *fpst; \
2826 fpst = &scratch_fpst; \
2828 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
2829 mergemask(&d[H##ESIZE(e)], r, mask); \
2831 mve_advance_vpt(env); \
2834 #define DO_2OP_FP_ALL(OP, FN) \
2835 DO_2OP_FP(OP##h, 2, float16, float16_##FN) \
2836 DO_2OP_FP(OP##s, 4, float32, float32_##FN)
2838 DO_2OP_FP_ALL(vfadd
, add
)
2839 DO_2OP_FP_ALL(vfsub
, sub
)
2840 DO_2OP_FP_ALL(vfmul
, mul
)
2842 static inline float16
float16_abd(float16 a
, float16 b
, float_status
*s
)
2844 return float16_abs(float16_sub(a
, b
, s
));
2847 static inline float32
float32_abd(float32 a
, float32 b
, float_status
*s
)
2849 return float32_abs(float32_sub(a
, b
, s
));
2852 DO_2OP_FP_ALL(vfabd
, abd
)
2853 DO_2OP_FP_ALL(vmaxnm
, maxnum
)
2854 DO_2OP_FP_ALL(vminnm
, minnum
)
2856 static inline float16
float16_maxnuma(float16 a
, float16 b
, float_status
*s
)
2858 return float16_maxnum(float16_abs(a
), float16_abs(b
), s
);
2861 static inline float32
float32_maxnuma(float32 a
, float32 b
, float_status
*s
)
2863 return float32_maxnum(float32_abs(a
), float32_abs(b
), s
);
2866 static inline float16
float16_minnuma(float16 a
, float16 b
, float_status
*s
)
2868 return float16_minnum(float16_abs(a
), float16_abs(b
), s
);
2871 static inline float32
float32_minnuma(float32 a
, float32 b
, float_status
*s
)
2873 return float32_minnum(float32_abs(a
), float32_abs(b
), s
);
2876 DO_2OP_FP_ALL(vmaxnma
, maxnuma
)
2877 DO_2OP_FP_ALL(vminnma
, minnuma
)
2879 #define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \
2880 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2881 void *vd, void *vn, void *vm) \
2883 TYPE *d = vd, *n = vn, *m = vm; \
2884 TYPE r[16 / ESIZE]; \
2885 uint16_t tm, mask = mve_element_mask(env); \
2887 float_status *fpst; \
2888 float_status scratch_fpst; \
2889 /* Calculate all results first to avoid overwriting inputs */ \
2890 for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \
2891 if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2895 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2896 &env->vfp.standard_fp_status; \
2898 /* We need the result but without updating flags */ \
2899 scratch_fpst = *fpst; \
2900 fpst = &scratch_fpst; \
2903 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \
2905 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \
2908 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2909 mergemask(&d[H##ESIZE(e)], r[e], mask); \
2911 mve_advance_vpt(env); \
2914 DO_VCADD_FP(vfcadd90h
, 2, float16
, float16_sub
, float16_add
)
2915 DO_VCADD_FP(vfcadd90s
, 4, float32
, float32_sub
, float32_add
)
2916 DO_VCADD_FP(vfcadd270h
, 2, float16
, float16_add
, float16_sub
)
2917 DO_VCADD_FP(vfcadd270s
, 4, float32
, float32_add
, float32_sub
)
2919 #define DO_VFMA(OP, ESIZE, TYPE, CHS) \
2920 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2921 void *vd, void *vn, void *vm) \
2923 TYPE *d = vd, *n = vn, *m = vm; \
2925 uint16_t mask = mve_element_mask(env); \
2927 float_status *fpst; \
2928 float_status scratch_fpst; \
2929 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
2930 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
2933 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2934 &env->vfp.standard_fp_status; \
2935 if (!(mask & 1)) { \
2936 /* We need the result but without updating flags */ \
2937 scratch_fpst = *fpst; \
2938 fpst = &scratch_fpst; \
2940 r = n[H##ESIZE(e)]; \
2942 r = TYPE##_chs(r); \
2944 r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \
2946 mergemask(&d[H##ESIZE(e)], r, mask); \
2948 mve_advance_vpt(env); \
2951 DO_VFMA(vfmah
, 2, float16
, false)
2952 DO_VFMA(vfmas
, 4, float32
, false)
2953 DO_VFMA(vfmsh
, 2, float16
, true)
2954 DO_VFMA(vfmss
, 4, float32
, true)
2956 #define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \
2957 void HELPER(glue(mve_, OP))(CPUARMState *env, \
2958 void *vd, void *vn, void *vm) \
2960 TYPE *d = vd, *n = vn, *m = vm; \
2961 TYPE r0, r1, e1, e2, e3, e4; \
2962 uint16_t mask = mve_element_mask(env); \
2964 float_status *fpst0, *fpst1; \
2965 float_status scratch_fpst; \
2966 /* We loop through pairs of elements at a time */ \
2967 for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \
2968 if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \
2971 fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
2972 &env->vfp.standard_fp_status; \
2974 if (!(mask & 1)) { \
2975 scratch_fpst = *fpst0; \
2976 fpst0 = &scratch_fpst; \
2978 if (!(mask & (1 << ESIZE))) { \
2979 scratch_fpst = *fpst1; \
2980 fpst1 = &scratch_fpst; \
2984 e1 = m[H##ESIZE(e)]; \
2985 e2 = n[H##ESIZE(e)]; \
2986 e3 = m[H##ESIZE(e + 1)]; \
2987 e4 = n[H##ESIZE(e)]; \
2990 e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
2991 e2 = n[H##ESIZE(e + 1)]; \
2992 e3 = m[H##ESIZE(e)]; \
2993 e4 = n[H##ESIZE(e + 1)]; \
2996 e1 = TYPE##_chs(m[H##ESIZE(e)]); \
2997 e2 = n[H##ESIZE(e)]; \
2998 e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
2999 e4 = n[H##ESIZE(e)]; \
3002 e1 = m[H##ESIZE(e + 1)]; \
3003 e2 = n[H##ESIZE(e + 1)]; \
3004 e3 = TYPE##_chs(m[H##ESIZE(e)]); \
3005 e4 = n[H##ESIZE(e + 1)]; \
3008 g_assert_not_reached(); \
3010 r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \
3011 r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \
3012 mergemask(&d[H##ESIZE(e)], r0, mask); \
3013 mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \
3015 mve_advance_vpt(env); \
3018 #define DO_VCMULH(N, M, D, S) float16_mul(N, M, S)
3019 #define DO_VCMULS(N, M, D, S) float32_mul(N, M, S)
3021 #define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S)
3022 #define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S)
3024 DO_VCMLA(vcmul0h
, 2, float16
, 0, DO_VCMULH
)
3025 DO_VCMLA(vcmul0s
, 4, float32
, 0, DO_VCMULS
)
3026 DO_VCMLA(vcmul90h
, 2, float16
, 1, DO_VCMULH
)
3027 DO_VCMLA(vcmul90s
, 4, float32
, 1, DO_VCMULS
)
3028 DO_VCMLA(vcmul180h
, 2, float16
, 2, DO_VCMULH
)
3029 DO_VCMLA(vcmul180s
, 4, float32
, 2, DO_VCMULS
)
3030 DO_VCMLA(vcmul270h
, 2, float16
, 3, DO_VCMULH
)
3031 DO_VCMLA(vcmul270s
, 4, float32
, 3, DO_VCMULS
)
3033 DO_VCMLA(vcmla0h
, 2, float16
, 0, DO_VCMLAH
)
3034 DO_VCMLA(vcmla0s
, 4, float32
, 0, DO_VCMLAS
)
3035 DO_VCMLA(vcmla90h
, 2, float16
, 1, DO_VCMLAH
)
3036 DO_VCMLA(vcmla90s
, 4, float32
, 1, DO_VCMLAS
)
3037 DO_VCMLA(vcmla180h
, 2, float16
, 2, DO_VCMLAH
)
3038 DO_VCMLA(vcmla180s
, 4, float32
, 2, DO_VCMLAS
)
3039 DO_VCMLA(vcmla270h
, 2, float16
, 3, DO_VCMLAH
)
3040 DO_VCMLA(vcmla270s
, 4, float32
, 3, DO_VCMLAS
)
3042 #define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
3043 void HELPER(glue(mve_, OP))(CPUARMState *env, \
3044 void *vd, void *vn, uint32_t rm) \
3046 TYPE *d = vd, *n = vn; \
3048 uint16_t mask = mve_element_mask(env); \
3050 float_status *fpst; \
3051 float_status scratch_fpst; \
3052 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3053 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3056 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3057 &env->vfp.standard_fp_status; \
3058 if (!(mask & 1)) { \
3059 /* We need the result but without updating flags */ \
3060 scratch_fpst = *fpst; \
3061 fpst = &scratch_fpst; \
3063 r = FN(n[H##ESIZE(e)], m, fpst); \
3064 mergemask(&d[H##ESIZE(e)], r, mask); \
3066 mve_advance_vpt(env); \
3069 #define DO_2OP_FP_SCALAR_ALL(OP, FN) \
3070 DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \
3071 DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN)
3073 DO_2OP_FP_SCALAR_ALL(vfadd_scalar
, add
)
3074 DO_2OP_FP_SCALAR_ALL(vfsub_scalar
, sub
)
3075 DO_2OP_FP_SCALAR_ALL(vfmul_scalar
, mul
)
3077 #define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
3078 void HELPER(glue(mve_, OP))(CPUARMState *env, \
3079 void *vd, void *vn, uint32_t rm) \
3081 TYPE *d = vd, *n = vn; \
3083 uint16_t mask = mve_element_mask(env); \
3085 float_status *fpst; \
3086 float_status scratch_fpst; \
3087 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3088 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3091 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3092 &env->vfp.standard_fp_status; \
3093 if (!(mask & 1)) { \
3094 /* We need the result but without updating flags */ \
3095 scratch_fpst = *fpst; \
3096 fpst = &scratch_fpst; \
3098 r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \
3099 mergemask(&d[H##ESIZE(e)], r, mask); \
3101 mve_advance_vpt(env); \
3104 /* VFMAS is vector * vector + scalar, so swap op2 and op3 */
3105 #define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S)
3106 #define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S)
3108 /* VFMA is vector * scalar + vector */
3109 DO_2OP_FP_ACC_SCALAR(vfma_scalarh
, 2, float16
, float16_muladd
)
3110 DO_2OP_FP_ACC_SCALAR(vfma_scalars
, 4, float32
, float32_muladd
)
3111 DO_2OP_FP_ACC_SCALAR(vfmas_scalarh
, 2, float16
, DO_VFMAS_SCALARH
)
3112 DO_2OP_FP_ACC_SCALAR(vfmas_scalars
, 4, float32
, DO_VFMAS_SCALARS
)
3114 /* Floating point max/min across vector. */
3115 #define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \
3116 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
3119 uint16_t mask = mve_element_mask(env); \
3122 TYPE ra = (TYPE)ra_in; \
3123 float_status *fpst = (ESIZE == 2) ? \
3124 &env->vfp.standard_fp_status_f16 : \
3125 &env->vfp.standard_fp_status; \
3126 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3128 TYPE v = m[H##ESIZE(e)]; \
3129 if (TYPE##_is_signaling_nan(ra, fpst)) { \
3130 ra = TYPE##_silence_nan(ra, fpst); \
3131 float_raise(float_flag_invalid, fpst); \
3133 if (TYPE##_is_signaling_nan(v, fpst)) { \
3134 v = TYPE##_silence_nan(v, fpst); \
3135 float_raise(float_flag_invalid, fpst); \
3138 v = TYPE##_abs(v); \
3140 ra = FN(ra, v, fpst); \
3143 mve_advance_vpt(env); \
3149 DO_FP_VMAXMINV(vmaxnmvh
, 2, float16
, false, float16_maxnum
)
3150 DO_FP_VMAXMINV(vmaxnmvs
, 4, float32
, false, float32_maxnum
)
3151 DO_FP_VMAXMINV(vminnmvh
, 2, float16
, false, float16_minnum
)
3152 DO_FP_VMAXMINV(vminnmvs
, 4, float32
, false, float32_minnum
)
3153 DO_FP_VMAXMINV(vmaxnmavh
, 2, float16
, true, float16_maxnum
)
3154 DO_FP_VMAXMINV(vmaxnmavs
, 4, float32
, true, float32_maxnum
)
3155 DO_FP_VMAXMINV(vminnmavh
, 2, float16
, true, float16_minnum
)
3156 DO_FP_VMAXMINV(vminnmavs
, 4, float32
, true, float32_minnum
)
3158 /* FP compares; note that all comparisons signal InvalidOp for QNaNs */
3159 #define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \
3160 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
3162 TYPE *n = vn, *m = vm; \
3163 uint16_t mask = mve_element_mask(env); \
3164 uint16_t eci_mask = mve_eci_mask(env); \
3165 uint16_t beatpred = 0; \
3166 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
3168 float_status *fpst; \
3169 float_status scratch_fpst; \
3171 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
3172 if ((mask & emask) == 0) { \
3175 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3176 &env->vfp.standard_fp_status; \
3177 if (!(mask & (1 << (e * ESIZE)))) { \
3178 /* We need the result but without updating flags */ \
3179 scratch_fpst = *fpst; \
3180 fpst = &scratch_fpst; \
3182 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
3183 /* Comparison sets 0/1 bits for each byte in the element */ \
3184 beatpred |= r * emask; \
3187 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
3188 (beatpred & eci_mask); \
3189 mve_advance_vpt(env); \
3192 #define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
3193 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
3197 uint16_t mask = mve_element_mask(env); \
3198 uint16_t eci_mask = mve_eci_mask(env); \
3199 uint16_t beatpred = 0; \
3200 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
3202 float_status *fpst; \
3203 float_status scratch_fpst; \
3205 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
3206 if ((mask & emask) == 0) { \
3209 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3210 &env->vfp.standard_fp_status; \
3211 if (!(mask & (1 << (e * ESIZE)))) { \
3212 /* We need the result but without updating flags */ \
3213 scratch_fpst = *fpst; \
3214 fpst = &scratch_fpst; \
3216 r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \
3217 /* Comparison sets 0/1 bits for each byte in the element */ \
3218 beatpred |= r * emask; \
3221 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
3222 (beatpred & eci_mask); \
3223 mve_advance_vpt(env); \
3226 #define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \
3227 DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \
3228 DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN)
3231 * Some care is needed here to get the correct result for the unordered case.
3232 * Architecturally EQ, GE and GT are defined to be false for unordered, but
3233 * the NE, LT and LE comparisons are defined as simple logical inverses of
3234 * EQ, GE and GT and so they must return true for unordered. The softfloat
3235 * comparison functions float*_{eq,le,lt} all return false for unordered.
3237 #define DO_GE16(X, Y, S) float16_le(Y, X, S)
3238 #define DO_GE32(X, Y, S) float32_le(Y, X, S)
3239 #define DO_GT16(X, Y, S) float16_lt(Y, X, S)
3240 #define DO_GT32(X, Y, S) float32_lt(Y, X, S)
3242 DO_VCMP_FP_BOTH(vfcmpeqh
, vfcmpeq_scalarh
, 2, float16
, float16_eq
)
3243 DO_VCMP_FP_BOTH(vfcmpeqs
, vfcmpeq_scalars
, 4, float32
, float32_eq
)
3245 DO_VCMP_FP_BOTH(vfcmpneh
, vfcmpne_scalarh
, 2, float16
, !float16_eq
)
3246 DO_VCMP_FP_BOTH(vfcmpnes
, vfcmpne_scalars
, 4, float32
, !float32_eq
)
3248 DO_VCMP_FP_BOTH(vfcmpgeh
, vfcmpge_scalarh
, 2, float16
, DO_GE16
)
3249 DO_VCMP_FP_BOTH(vfcmpges
, vfcmpge_scalars
, 4, float32
, DO_GE32
)
3251 DO_VCMP_FP_BOTH(vfcmplth
, vfcmplt_scalarh
, 2, float16
, !DO_GE16
)
3252 DO_VCMP_FP_BOTH(vfcmplts
, vfcmplt_scalars
, 4, float32
, !DO_GE32
)
3254 DO_VCMP_FP_BOTH(vfcmpgth
, vfcmpgt_scalarh
, 2, float16
, DO_GT16
)
3255 DO_VCMP_FP_BOTH(vfcmpgts
, vfcmpgt_scalars
, 4, float32
, DO_GT32
)
3257 DO_VCMP_FP_BOTH(vfcmpleh
, vfcmple_scalarh
, 2, float16
, !DO_GT16
)
3258 DO_VCMP_FP_BOTH(vfcmples
, vfcmple_scalars
, 4, float32
, !DO_GT32
)
3260 #define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \
3261 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \
3264 TYPE *d = vd, *m = vm; \
3266 uint16_t mask = mve_element_mask(env); \
3268 float_status *fpst; \
3269 float_status scratch_fpst; \
3270 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3271 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3274 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3275 &env->vfp.standard_fp_status; \
3276 if (!(mask & 1)) { \
3277 /* We need the result but without updating flags */ \
3278 scratch_fpst = *fpst; \
3279 fpst = &scratch_fpst; \
3281 r = FN(m[H##ESIZE(e)], shift, fpst); \
3282 mergemask(&d[H##ESIZE(e)], r, mask); \
3284 mve_advance_vpt(env); \
3287 DO_VCVT_FIXED(vcvt_sh
, 2, int16_t, helper_vfp_shtoh
)
3288 DO_VCVT_FIXED(vcvt_uh
, 2, uint16_t, helper_vfp_uhtoh
)
3289 DO_VCVT_FIXED(vcvt_hs
, 2, int16_t, helper_vfp_toshh_round_to_zero
)
3290 DO_VCVT_FIXED(vcvt_hu
, 2, uint16_t, helper_vfp_touhh_round_to_zero
)
3291 DO_VCVT_FIXED(vcvt_sf
, 4, int32_t, helper_vfp_sltos
)
3292 DO_VCVT_FIXED(vcvt_uf
, 4, uint32_t, helper_vfp_ultos
)
3293 DO_VCVT_FIXED(vcvt_fs
, 4, int32_t, helper_vfp_tosls_round_to_zero
)
3294 DO_VCVT_FIXED(vcvt_fu
, 4, uint32_t, helper_vfp_touls_round_to_zero
)
3296 /* VCVT with specified rmode */
3297 #define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \
3298 void HELPER(glue(mve_, OP))(CPUARMState *env, \
3299 void *vd, void *vm, uint32_t rmode) \
3301 TYPE *d = vd, *m = vm; \
3303 uint16_t mask = mve_element_mask(env); \
3305 float_status *fpst; \
3306 float_status scratch_fpst; \
3307 float_status *base_fpst = (ESIZE == 2) ? \
3308 &env->vfp.standard_fp_status_f16 : \
3309 &env->vfp.standard_fp_status; \
3310 uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \
3311 set_float_rounding_mode(rmode, base_fpst); \
3312 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3313 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3317 if (!(mask & 1)) { \
3318 /* We need the result but without updating flags */ \
3319 scratch_fpst = *fpst; \
3320 fpst = &scratch_fpst; \
3322 r = FN(m[H##ESIZE(e)], 0, fpst); \
3323 mergemask(&d[H##ESIZE(e)], r, mask); \
3325 set_float_rounding_mode(prev_rmode, base_fpst); \
3326 mve_advance_vpt(env); \
3329 DO_VCVT_RMODE(vcvt_rm_sh
, 2, uint16_t, helper_vfp_toshh
)
3330 DO_VCVT_RMODE(vcvt_rm_uh
, 2, uint16_t, helper_vfp_touhh
)
3331 DO_VCVT_RMODE(vcvt_rm_ss
, 4, uint32_t, helper_vfp_tosls
)
3332 DO_VCVT_RMODE(vcvt_rm_us
, 4, uint32_t, helper_vfp_touls
)
3334 #define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S)
3335 #define DO_VRINT_RM_S(M, F, S) helper_rints(M, S)
3337 DO_VCVT_RMODE(vrint_rm_h
, 2, uint16_t, DO_VRINT_RM_H
)
3338 DO_VCVT_RMODE(vrint_rm_s
, 4, uint32_t, DO_VRINT_RM_S
)
3341 * VCVT between halfprec and singleprec. As usual for halfprec
3342 * conversions, FZ16 is ignored and AHP is observed.
3344 static void do_vcvt_sh(CPUARMState
*env
, void *vd
, void *vm
, int top
)
3349 uint16_t mask
= mve_element_mask(env
);
3350 bool ieee
= !(env
->vfp
.xregs
[ARM_VFP_FPSCR
] & FPCR_AHP
);
3353 float_status scratch_fpst
;
3354 float_status
*base_fpst
= &env
->vfp
.standard_fp_status
;
3355 bool old_fz
= get_flush_to_zero(base_fpst
);
3356 set_flush_to_zero(false, base_fpst
);
3357 for (e
= 0; e
< 16 / 4; e
++, mask
>>= 4) {
3358 if ((mask
& MAKE_64BIT_MASK(0, 4)) == 0) {
3363 /* We need the result but without updating flags */
3364 scratch_fpst
= *fpst
;
3365 fpst
= &scratch_fpst
;
3367 r
= float32_to_float16(m
[H4(e
)], ieee
, fpst
);
3368 mergemask(&d
[H2(e
* 2 + top
)], r
, mask
>> (top
* 2));
3370 set_flush_to_zero(old_fz
, base_fpst
);
3371 mve_advance_vpt(env
);
3374 static void do_vcvt_hs(CPUARMState
*env
, void *vd
, void *vm
, int top
)
3379 uint16_t mask
= mve_element_mask(env
);
3380 bool ieee
= !(env
->vfp
.xregs
[ARM_VFP_FPSCR
] & FPCR_AHP
);
3383 float_status scratch_fpst
;
3384 float_status
*base_fpst
= &env
->vfp
.standard_fp_status
;
3385 bool old_fiz
= get_flush_inputs_to_zero(base_fpst
);
3386 set_flush_inputs_to_zero(false, base_fpst
);
3387 for (e
= 0; e
< 16 / 4; e
++, mask
>>= 4) {
3388 if ((mask
& MAKE_64BIT_MASK(0, 4)) == 0) {
3392 if (!(mask
& (1 << (top
* 2)))) {
3393 /* We need the result but without updating flags */
3394 scratch_fpst
= *fpst
;
3395 fpst
= &scratch_fpst
;
3397 r
= float16_to_float32(m
[H2(e
* 2 + top
)], ieee
, fpst
);
3398 mergemask(&d
[H4(e
)], r
, mask
);
3400 set_flush_inputs_to_zero(old_fiz
, base_fpst
);
3401 mve_advance_vpt(env
);
3404 void HELPER(mve_vcvtb_sh
)(CPUARMState
*env
, void *vd
, void *vm
)
3406 do_vcvt_sh(env
, vd
, vm
, 0);
3408 void HELPER(mve_vcvtt_sh
)(CPUARMState
*env
, void *vd
, void *vm
)
3410 do_vcvt_sh(env
, vd
, vm
, 1);
3412 void HELPER(mve_vcvtb_hs
)(CPUARMState
*env
, void *vd
, void *vm
)
3414 do_vcvt_hs(env
, vd
, vm
, 0);
3416 void HELPER(mve_vcvtt_hs
)(CPUARMState
*env
, void *vd
, void *vm
)
3418 do_vcvt_hs(env
, vd
, vm
, 1);
3421 #define DO_1OP_FP(OP, ESIZE, TYPE, FN) \
3422 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \
3424 TYPE *d = vd, *m = vm; \
3426 uint16_t mask = mve_element_mask(env); \
3428 float_status *fpst; \
3429 float_status scratch_fpst; \
3430 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
3431 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
3434 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
3435 &env->vfp.standard_fp_status; \
3436 if (!(mask & 1)) { \
3437 /* We need the result but without updating flags */ \
3438 scratch_fpst = *fpst; \
3439 fpst = &scratch_fpst; \
3441 r = FN(m[H##ESIZE(e)], fpst); \
3442 mergemask(&d[H##ESIZE(e)], r, mask); \
3444 mve_advance_vpt(env); \
3447 DO_1OP_FP(vrintx_h
, 2, float16
, float16_round_to_int
)
3448 DO_1OP_FP(vrintx_s
, 4, float32
, float32_round_to_int
)