]>
git.proxmox.com Git - mirror_qemu.git/blob - target/hexagon/gen_tcg_hvx.h
2 * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #ifndef HEXAGON_GEN_TCG_HVX_H
19 #define HEXAGON_GEN_TCG_HVX_H
22 * Histogram instructions
24 * Note that these instructions operate directly on the vector registers
25 * and therefore happen after commit.
27 * The generate_<tag> function is called twice
28 * The first time is during the normal TCG generation
29 * ctx->pre_commit is true
30 * In the masked cases, we save the mask to the qtmp temporary
31 * Otherwise, there is nothing to do
32 * The second call is at the end of gen_commit_packet
33 * ctx->pre_commit is false
34 * Generate the call to the helper
37 static inline void assert_vhist_tmp(DisasContext
*ctx
)
39 /* vhist instructions require exactly one .tmp to be defined */
40 g_assert(ctx
->tmp_vregs_idx
== 1);
43 #define fGEN_TCG_V6_vhist(SHORTCODE) \
44 if (!ctx->pre_commit) { \
45 assert_vhist_tmp(ctx); \
46 gen_helper_vhist(cpu_env); \
48 #define fGEN_TCG_V6_vhistq(SHORTCODE) \
50 if (ctx->pre_commit) { \
51 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
52 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
53 sizeof(MMVector), sizeof(MMVector)); \
55 assert_vhist_tmp(ctx); \
56 gen_helper_vhistq(cpu_env); \
59 #define fGEN_TCG_V6_vwhist256(SHORTCODE) \
60 if (!ctx->pre_commit) { \
61 assert_vhist_tmp(ctx); \
62 gen_helper_vwhist256(cpu_env); \
64 #define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
66 if (ctx->pre_commit) { \
67 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
68 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
69 sizeof(MMVector), sizeof(MMVector)); \
71 assert_vhist_tmp(ctx); \
72 gen_helper_vwhist256q(cpu_env); \
75 #define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
76 if (!ctx->pre_commit) { \
77 assert_vhist_tmp(ctx); \
78 gen_helper_vwhist256_sat(cpu_env); \
80 #define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
82 if (ctx->pre_commit) { \
83 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
84 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
85 sizeof(MMVector), sizeof(MMVector)); \
87 assert_vhist_tmp(ctx); \
88 gen_helper_vwhist256q_sat(cpu_env); \
91 #define fGEN_TCG_V6_vwhist128(SHORTCODE) \
92 if (!ctx->pre_commit) { \
93 assert_vhist_tmp(ctx); \
94 gen_helper_vwhist128(cpu_env); \
96 #define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
98 if (ctx->pre_commit) { \
99 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
100 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
101 sizeof(MMVector), sizeof(MMVector)); \
103 assert_vhist_tmp(ctx); \
104 gen_helper_vwhist128q(cpu_env); \
107 #define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
108 if (!ctx->pre_commit) { \
109 TCGv tcgv_uiV = tcg_constant_tl(uiV); \
110 assert_vhist_tmp(ctx); \
111 gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
113 #define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
115 if (ctx->pre_commit) { \
116 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
117 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
118 sizeof(MMVector), sizeof(MMVector)); \
120 TCGv tcgv_uiV = tcg_constant_tl(uiV); \
121 assert_vhist_tmp(ctx); \
122 gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
127 #define fGEN_TCG_V6_vassign(SHORTCODE) \
128 tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
129 sizeof(MMVector), sizeof(MMVector))
131 /* Vector conditional move */
132 #define fGEN_TCG_VEC_CMOV(PRED) \
134 TCGv lsb = tcg_temp_new(); \
135 TCGLabel *false_label = gen_new_label(); \
136 TCGLabel *end_label = gen_new_label(); \
137 tcg_gen_andi_tl(lsb, PsV, 1); \
138 tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
139 tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
140 sizeof(MMVector), sizeof(MMVector)); \
141 tcg_gen_br(end_label); \
142 gen_set_label(false_label); \
143 tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
145 gen_set_label(end_label); \
149 /* Vector conditional move (true) */
150 #define fGEN_TCG_V6_vcmov(SHORTCODE) \
153 /* Vector conditional move (false) */
154 #define fGEN_TCG_V6_vncmov(SHORTCODE) \
157 /* Vector add - various forms */
158 #define fGEN_TCG_V6_vaddb(SHORTCODE) \
159 tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \
160 sizeof(MMVector), sizeof(MMVector))
162 #define fGEN_TCG_V6_vaddh(SHORTCYDE) \
163 tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \
164 sizeof(MMVector), sizeof(MMVector))
166 #define fGEN_TCG_V6_vaddw(SHORTCODE) \
167 tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
168 sizeof(MMVector), sizeof(MMVector))
170 #define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \
171 tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \
172 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
174 #define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \
175 tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \
176 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
178 #define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \
179 tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \
180 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
182 /* Vector sub - various forms */
183 #define fGEN_TCG_V6_vsubb(SHORTCODE) \
184 tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \
185 sizeof(MMVector), sizeof(MMVector))
187 #define fGEN_TCG_V6_vsubh(SHORTCODE) \
188 tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \
189 sizeof(MMVector), sizeof(MMVector))
191 #define fGEN_TCG_V6_vsubw(SHORTCODE) \
192 tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \
193 sizeof(MMVector), sizeof(MMVector))
195 #define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \
196 tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \
197 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
199 #define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \
200 tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \
201 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
203 #define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \
204 tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \
205 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
207 /* Vector shift right - various forms */
208 #define fGEN_TCG_V6_vasrh(SHORTCODE) \
210 TCGv shift = tcg_temp_new(); \
211 tcg_gen_andi_tl(shift, RtV, 15); \
212 tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \
213 sizeof(MMVector), sizeof(MMVector)); \
216 #define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \
218 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
219 TCGv shift = tcg_temp_new(); \
220 tcg_gen_andi_tl(shift, RtV, 15); \
221 tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \
222 sizeof(MMVector), sizeof(MMVector)); \
223 tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
224 sizeof(MMVector), sizeof(MMVector)); \
227 #define fGEN_TCG_V6_vasrw(SHORTCODE) \
229 TCGv shift = tcg_temp_new(); \
230 tcg_gen_andi_tl(shift, RtV, 31); \
231 tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \
232 sizeof(MMVector), sizeof(MMVector)); \
235 #define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \
237 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
238 TCGv shift = tcg_temp_new(); \
239 tcg_gen_andi_tl(shift, RtV, 31); \
240 tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \
241 sizeof(MMVector), sizeof(MMVector)); \
242 tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
243 sizeof(MMVector), sizeof(MMVector)); \
246 #define fGEN_TCG_V6_vlsrb(SHORTCODE) \
248 TCGv shift = tcg_temp_new(); \
249 tcg_gen_andi_tl(shift, RtV, 7); \
250 tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \
251 sizeof(MMVector), sizeof(MMVector)); \
254 #define fGEN_TCG_V6_vlsrh(SHORTCODE) \
256 TCGv shift = tcg_temp_new(); \
257 tcg_gen_andi_tl(shift, RtV, 15); \
258 tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \
259 sizeof(MMVector), sizeof(MMVector)); \
262 #define fGEN_TCG_V6_vlsrw(SHORTCODE) \
264 TCGv shift = tcg_temp_new(); \
265 tcg_gen_andi_tl(shift, RtV, 31); \
266 tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \
267 sizeof(MMVector), sizeof(MMVector)); \
270 /* Vector shift left - various forms */
271 #define fGEN_TCG_V6_vaslb(SHORTCODE) \
273 TCGv shift = tcg_temp_new(); \
274 tcg_gen_andi_tl(shift, RtV, 7); \
275 tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \
276 sizeof(MMVector), sizeof(MMVector)); \
279 #define fGEN_TCG_V6_vaslh(SHORTCODE) \
281 TCGv shift = tcg_temp_new(); \
282 tcg_gen_andi_tl(shift, RtV, 15); \
283 tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \
284 sizeof(MMVector), sizeof(MMVector)); \
287 #define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \
289 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
290 TCGv shift = tcg_temp_new(); \
291 tcg_gen_andi_tl(shift, RtV, 15); \
292 tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \
293 sizeof(MMVector), sizeof(MMVector)); \
294 tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
295 sizeof(MMVector), sizeof(MMVector)); \
298 #define fGEN_TCG_V6_vaslw(SHORTCODE) \
300 TCGv shift = tcg_temp_new(); \
301 tcg_gen_andi_tl(shift, RtV, 31); \
302 tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \
303 sizeof(MMVector), sizeof(MMVector)); \
306 #define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \
308 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
309 TCGv shift = tcg_temp_new(); \
310 tcg_gen_andi_tl(shift, RtV, 31); \
311 tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \
312 sizeof(MMVector), sizeof(MMVector)); \
313 tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
314 sizeof(MMVector), sizeof(MMVector)); \
317 /* Vector max - various forms */
318 #define fGEN_TCG_V6_vmaxw(SHORTCODE) \
319 tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \
320 sizeof(MMVector), sizeof(MMVector))
321 #define fGEN_TCG_V6_vmaxh(SHORTCODE) \
322 tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \
323 sizeof(MMVector), sizeof(MMVector))
324 #define fGEN_TCG_V6_vmaxuh(SHORTCODE) \
325 tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \
326 sizeof(MMVector), sizeof(MMVector))
327 #define fGEN_TCG_V6_vmaxb(SHORTCODE) \
328 tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \
329 sizeof(MMVector), sizeof(MMVector))
330 #define fGEN_TCG_V6_vmaxub(SHORTCODE) \
331 tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \
332 sizeof(MMVector), sizeof(MMVector))
334 /* Vector min - various forms */
335 #define fGEN_TCG_V6_vminw(SHORTCODE) \
336 tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \
337 sizeof(MMVector), sizeof(MMVector))
338 #define fGEN_TCG_V6_vminh(SHORTCODE) \
339 tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \
340 sizeof(MMVector), sizeof(MMVector))
341 #define fGEN_TCG_V6_vminuh(SHORTCODE) \
342 tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \
343 sizeof(MMVector), sizeof(MMVector))
344 #define fGEN_TCG_V6_vminb(SHORTCODE) \
345 tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \
346 sizeof(MMVector), sizeof(MMVector))
347 #define fGEN_TCG_V6_vminub(SHORTCODE) \
348 tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \
349 sizeof(MMVector), sizeof(MMVector))
351 /* Vector logical ops */
352 #define fGEN_TCG_V6_vxor(SHORTCODE) \
353 tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \
354 sizeof(MMVector), sizeof(MMVector))
356 #define fGEN_TCG_V6_vand(SHORTCODE) \
357 tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \
358 sizeof(MMVector), sizeof(MMVector))
360 #define fGEN_TCG_V6_vor(SHORTCODE) \
361 tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \
362 sizeof(MMVector), sizeof(MMVector))
364 #define fGEN_TCG_V6_vnot(SHORTCODE) \
365 tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \
366 sizeof(MMVector), sizeof(MMVector))
368 /* Q register logical ops */
369 #define fGEN_TCG_V6_pred_or(SHORTCODE) \
370 tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \
371 sizeof(MMQReg), sizeof(MMQReg))
373 #define fGEN_TCG_V6_pred_and(SHORTCODE) \
374 tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \
375 sizeof(MMQReg), sizeof(MMQReg))
377 #define fGEN_TCG_V6_pred_xor(SHORTCODE) \
378 tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \
379 sizeof(MMQReg), sizeof(MMQReg))
381 #define fGEN_TCG_V6_pred_or_n(SHORTCODE) \
382 tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \
383 sizeof(MMQReg), sizeof(MMQReg))
385 #define fGEN_TCG_V6_pred_and_n(SHORTCODE) \
386 tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \
387 sizeof(MMQReg), sizeof(MMQReg))
389 #define fGEN_TCG_V6_pred_not(SHORTCODE) \
390 tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \
391 sizeof(MMQReg), sizeof(MMQReg))
393 /* Vector compares */
394 #define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \
396 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
397 tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
398 sizeof(MMVector), sizeof(MMVector)); \
399 vec_to_qvec(SIZE, QdV_off, tmpoff); \
402 #define fGEN_TCG_V6_vgtw(SHORTCODE) \
403 fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4)
404 #define fGEN_TCG_V6_vgth(SHORTCODE) \
405 fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2)
406 #define fGEN_TCG_V6_vgtb(SHORTCODE) \
407 fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1)
409 #define fGEN_TCG_V6_vgtuw(SHORTCODE) \
410 fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4)
411 #define fGEN_TCG_V6_vgtuh(SHORTCODE) \
412 fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2)
413 #define fGEN_TCG_V6_vgtub(SHORTCODE) \
414 fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1)
416 #define fGEN_TCG_V6_veqw(SHORTCODE) \
417 fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4)
418 #define fGEN_TCG_V6_veqh(SHORTCODE) \
419 fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2)
420 #define fGEN_TCG_V6_veqb(SHORTCODE) \
421 fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1)
423 #define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \
425 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
426 intptr_t qoff = offsetof(CPUHexagonState, qtmp); \
427 tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
428 sizeof(MMVector), sizeof(MMVector)); \
429 vec_to_qvec(SIZE, qoff, tmpoff); \
430 OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \
433 #define fGEN_TCG_V6_vgtw_and(SHORTCODE) \
434 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and)
435 #define fGEN_TCG_V6_vgtw_or(SHORTCODE) \
436 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or)
437 #define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \
438 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor)
440 #define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \
441 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and)
442 #define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \
443 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or)
444 #define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \
445 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor)
447 #define fGEN_TCG_V6_vgth_and(SHORTCODE) \
448 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and)
449 #define fGEN_TCG_V6_vgth_or(SHORTCODE) \
450 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or)
451 #define fGEN_TCG_V6_vgth_xor(SHORTCODE) \
452 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor)
454 #define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \
455 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and)
456 #define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \
457 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or)
458 #define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \
459 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor)
461 #define fGEN_TCG_V6_vgtb_and(SHORTCODE) \
462 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and)
463 #define fGEN_TCG_V6_vgtb_or(SHORTCODE) \
464 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or)
465 #define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \
466 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor)
468 #define fGEN_TCG_V6_vgtub_and(SHORTCODE) \
469 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and)
470 #define fGEN_TCG_V6_vgtub_or(SHORTCODE) \
471 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or)
472 #define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \
473 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor)
475 #define fGEN_TCG_V6_veqw_and(SHORTCODE) \
476 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and)
477 #define fGEN_TCG_V6_veqw_or(SHORTCODE) \
478 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or)
479 #define fGEN_TCG_V6_veqw_xor(SHORTCODE) \
480 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor)
482 #define fGEN_TCG_V6_veqh_and(SHORTCODE) \
483 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and)
484 #define fGEN_TCG_V6_veqh_or(SHORTCODE) \
485 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or)
486 #define fGEN_TCG_V6_veqh_xor(SHORTCODE) \
487 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor)
489 #define fGEN_TCG_V6_veqb_and(SHORTCODE) \
490 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and)
491 #define fGEN_TCG_V6_veqb_or(SHORTCODE) \
492 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or)
493 #define fGEN_TCG_V6_veqb_xor(SHORTCODE) \
494 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor)
496 /* Vector splat - various forms */
497 #define fGEN_TCG_V6_lvsplatw(SHORTCODE) \
498 tcg_gen_gvec_dup_i32(MO_32, VdV_off, \
499 sizeof(MMVector), sizeof(MMVector), RtV)
501 #define fGEN_TCG_V6_lvsplath(SHORTCODE) \
502 tcg_gen_gvec_dup_i32(MO_16, VdV_off, \
503 sizeof(MMVector), sizeof(MMVector), RtV)
505 #define fGEN_TCG_V6_lvsplatb(SHORTCODE) \
506 tcg_gen_gvec_dup_i32(MO_8, VdV_off, \
507 sizeof(MMVector), sizeof(MMVector), RtV)
509 /* Vector absolute value - various forms */
510 #define fGEN_TCG_V6_vabsb(SHORTCODE) \
511 tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \
512 sizeof(MMVector), sizeof(MMVector))
514 #define fGEN_TCG_V6_vabsh(SHORTCODE) \
515 tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \
516 sizeof(MMVector), sizeof(MMVector))
518 #define fGEN_TCG_V6_vabsw(SHORTCODE) \
519 tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \
520 sizeof(MMVector), sizeof(MMVector))
523 #define fGEN_TCG_V6_vL32b_pi(SHORTCODE) SHORTCODE
524 #define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE) SHORTCODE
525 #define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE) SHORTCODE
526 #define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE) SHORTCODE
527 #define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE) SHORTCODE
528 #define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE) SHORTCODE
529 #define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE) SHORTCODE
530 #define fGEN_TCG_V6_vL32b_ai(SHORTCODE) SHORTCODE
531 #define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE) SHORTCODE
532 #define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE) SHORTCODE
533 #define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE) SHORTCODE
534 #define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE) SHORTCODE
535 #define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE) SHORTCODE
536 #define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE) SHORTCODE
537 #define fGEN_TCG_V6_vL32b_ppu(SHORTCODE) SHORTCODE
538 #define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE) SHORTCODE
539 #define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE) SHORTCODE
540 #define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE) SHORTCODE
541 #define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE) SHORTCODE
542 #define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE) SHORTCODE
543 #define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE) SHORTCODE
545 /* Predicated vector loads */
546 #define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \
548 TCGv LSB = tcg_temp_new(); \
549 TCGLabel *false_label = gen_new_label(); \
550 TCGLabel *end_label = gen_new_label(); \
553 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
554 gen_vreg_load(ctx, DSTOFF, EA, true); \
556 tcg_gen_br(end_label); \
557 gen_set_label(false_label); \
558 tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
560 gen_set_label(end_label); \
563 #define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
564 fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
567 fPM_I(RxV, siV * sizeof(MMVector)))
568 #define fGEN_TCG_PRED_VEC_LOAD_npred_pi \
569 fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
572 fPM_I(RxV, siV * sizeof(MMVector)))
574 #define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \
575 fGEN_TCG_PRED_VEC_LOAD_pred_pi
576 #define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \
577 fGEN_TCG_PRED_VEC_LOAD_npred_pi
578 #define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \
579 fGEN_TCG_PRED_VEC_LOAD_pred_pi
580 #define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \
581 fGEN_TCG_PRED_VEC_LOAD_npred_pi
582 #define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \
583 fGEN_TCG_PRED_VEC_LOAD_pred_pi
584 #define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \
585 fGEN_TCG_PRED_VEC_LOAD_npred_pi
586 #define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \
587 fGEN_TCG_PRED_VEC_LOAD_pred_pi
588 #define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \
589 fGEN_TCG_PRED_VEC_LOAD_npred_pi
590 #define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \
591 fGEN_TCG_PRED_VEC_LOAD_pred_pi
592 #define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \
593 fGEN_TCG_PRED_VEC_LOAD_npred_pi
594 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \
595 fGEN_TCG_PRED_VEC_LOAD_pred_pi
596 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \
597 fGEN_TCG_PRED_VEC_LOAD_npred_pi
599 #define fGEN_TCG_PRED_VEC_LOAD_pred_ai \
600 fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
601 fEA_RI(RtV, siV * sizeof(MMVector)), \
604 #define fGEN_TCG_PRED_VEC_LOAD_npred_ai \
605 fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
606 fEA_RI(RtV, siV * sizeof(MMVector)), \
610 #define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \
611 fGEN_TCG_PRED_VEC_LOAD_pred_ai
612 #define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \
613 fGEN_TCG_PRED_VEC_LOAD_npred_ai
614 #define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \
615 fGEN_TCG_PRED_VEC_LOAD_pred_ai
616 #define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \
617 fGEN_TCG_PRED_VEC_LOAD_npred_ai
618 #define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \
619 fGEN_TCG_PRED_VEC_LOAD_pred_ai
620 #define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \
621 fGEN_TCG_PRED_VEC_LOAD_npred_ai
622 #define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \
623 fGEN_TCG_PRED_VEC_LOAD_pred_ai
624 #define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \
625 fGEN_TCG_PRED_VEC_LOAD_npred_ai
626 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \
627 fGEN_TCG_PRED_VEC_LOAD_pred_ai
628 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \
629 fGEN_TCG_PRED_VEC_LOAD_npred_ai
630 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \
631 fGEN_TCG_PRED_VEC_LOAD_pred_ai
632 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \
633 fGEN_TCG_PRED_VEC_LOAD_npred_ai
635 #define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \
636 fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
640 #define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \
641 fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
646 #define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \
647 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
648 #define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \
649 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
650 #define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \
651 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
652 #define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \
653 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
654 #define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \
655 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
656 #define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \
657 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
658 #define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \
659 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
660 #define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \
661 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
662 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \
663 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
664 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \
665 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
666 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \
667 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
668 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \
669 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
672 #define fGEN_TCG_V6_vS32b_pi(SHORTCODE) SHORTCODE
673 #define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE) SHORTCODE
674 #define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE) SHORTCODE
675 #define fGEN_TCG_V6_vS32b_ai(SHORTCODE) SHORTCODE
676 #define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE) SHORTCODE
677 #define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE) SHORTCODE
678 #define fGEN_TCG_V6_vS32b_ppu(SHORTCODE) SHORTCODE
679 #define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE) SHORTCODE
680 #define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE) SHORTCODE
682 /* New value vector stores */
683 #define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \
686 gen_vreg_store(ctx, EA, OsN_off, insn->slot, true); \
690 #define fGEN_TCG_NEWVAL_VEC_STORE_pi \
691 fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector)))
693 #define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \
694 fGEN_TCG_NEWVAL_VEC_STORE_pi
695 #define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \
696 fGEN_TCG_NEWVAL_VEC_STORE_pi
698 #define fGEN_TCG_NEWVAL_VEC_STORE_ai \
699 fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \
702 #define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \
703 fGEN_TCG_NEWVAL_VEC_STORE_ai
704 #define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \
705 fGEN_TCG_NEWVAL_VEC_STORE_ai
707 #define fGEN_TCG_NEWVAL_VEC_STORE_ppu \
708 fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV))
710 #define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \
711 fGEN_TCG_NEWVAL_VEC_STORE_ppu
712 #define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \
713 fGEN_TCG_NEWVAL_VEC_STORE_ppu
715 /* Predicated vector stores */
716 #define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \
718 TCGv LSB = tcg_temp_new(); \
719 TCGLabel *false_label = gen_new_label(); \
720 TCGLabel *end_label = gen_new_label(); \
723 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
724 gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \
726 tcg_gen_br(end_label); \
727 gen_set_label(false_label); \
728 tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
730 gen_set_label(end_label); \
733 #define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
734 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
737 fPM_I(RxV, siV * sizeof(MMVector)))
738 #define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \
739 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
742 fPM_I(RxV, siV * sizeof(MMVector)))
743 #define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \
744 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
747 fPM_I(RxV, siV * sizeof(MMVector)))
748 #define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \
749 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
752 fPM_I(RxV, siV * sizeof(MMVector)))
754 #define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \
755 fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
756 #define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \
757 fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
758 #define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \
759 fGEN_TCG_PRED_VEC_STORE_pred_pi(false)
760 #define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \
761 fGEN_TCG_PRED_VEC_STORE_npred_pi(false)
762 #define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \
763 fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
764 #define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \
765 fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
766 #define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \
767 fGEN_TCG_PRED_VEC_STORE_new_pred_pi
768 #define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \
769 fGEN_TCG_PRED_VEC_STORE_new_npred_pi
770 #define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \
771 fGEN_TCG_PRED_VEC_STORE_new_pred_pi
772 #define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \
773 fGEN_TCG_PRED_VEC_STORE_new_npred_pi
775 #define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \
776 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
777 fEA_RI(RtV, siV * sizeof(MMVector)), \
780 #define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \
781 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
782 fEA_RI(RtV, siV * sizeof(MMVector)), \
785 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \
786 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
787 fEA_RI(RtV, siV * sizeof(MMVector)), \
790 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \
791 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
792 fEA_RI(RtV, siV * sizeof(MMVector)), \
796 #define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \
797 fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
798 #define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \
799 fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
800 #define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \
801 fGEN_TCG_PRED_VEC_STORE_pred_ai(false)
802 #define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \
803 fGEN_TCG_PRED_VEC_STORE_npred_ai(false)
804 #define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \
805 fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
806 #define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \
807 fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
808 #define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \
809 fGEN_TCG_PRED_VEC_STORE_new_pred_ai
810 #define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \
811 fGEN_TCG_PRED_VEC_STORE_new_npred_ai
812 #define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \
813 fGEN_TCG_PRED_VEC_STORE_new_pred_ai
814 #define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \
815 fGEN_TCG_PRED_VEC_STORE_new_npred_ai
817 #define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \
818 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
822 #define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \
823 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
827 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \
828 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
832 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \
833 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
838 #define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \
839 fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
840 #define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \
841 fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
842 #define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \
843 fGEN_TCG_PRED_VEC_STORE_pred_ppu(false)
844 #define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \
845 fGEN_TCG_PRED_VEC_STORE_npred_ppu(false)
846 #define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \
847 fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
848 #define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \
849 fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
850 #define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \
851 fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
852 #define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \
853 fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
854 #define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \
855 fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
856 #define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \
857 fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
859 /* Masked vector stores */
860 #define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE) SHORTCODE
861 #define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE) SHORTCODE
862 #define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE) SHORTCODE
863 #define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE) SHORTCODE
864 #define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE) SHORTCODE
865 #define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE) SHORTCODE
866 #define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE) SHORTCODE
867 #define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE) SHORTCODE
868 #define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE) SHORTCODE
869 #define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE) SHORTCODE
870 #define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE) SHORTCODE
871 #define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE) SHORTCODE
873 /* Store release not modelled in qemu, but need to suppress compiler warnings */
874 #define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \
878 #define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \
883 #define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \