]> git.proxmox.com Git - mirror_qemu.git/blame - target/hexagon/gen_tcg_hvx.h
target/arm: Rewrite check_s2_mmu_setup
[mirror_qemu.git] / target / hexagon / gen_tcg_hvx.h
CommitLineData
d51bcabe 1/*
1e536334 2 * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
d51bcabe
TS
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef HEXAGON_GEN_TCG_HVX_H
19#define HEXAGON_GEN_TCG_HVX_H
20
7ba7657b
TS
21/*
22 * Histogram instructions
23 *
24 * Note that these instructions operate directly on the vector registers
25 * and therefore happen after commit.
26 *
27 * The generate_<tag> function is called twice
28 * The first time is during the normal TCG generation
29 * ctx->pre_commit is true
30 * In the masked cases, we save the mask to the qtmp temporary
31 * Otherwise, there is nothing to do
32 * The second call is at the end of gen_commit_packet
33 * ctx->pre_commit is false
34 * Generate the call to the helper
35 */
36
37static inline void assert_vhist_tmp(DisasContext *ctx)
38{
39 /* vhist instructions require exactly one .tmp to be defined */
40 g_assert(ctx->tmp_vregs_idx == 1);
41}
42
43#define fGEN_TCG_V6_vhist(SHORTCODE) \
44 if (!ctx->pre_commit) { \
45 assert_vhist_tmp(ctx); \
46 gen_helper_vhist(cpu_env); \
47 }
48#define fGEN_TCG_V6_vhistq(SHORTCODE) \
49 do { \
50 if (ctx->pre_commit) { \
51 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
52 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
53 sizeof(MMVector), sizeof(MMVector)); \
54 } else { \
55 assert_vhist_tmp(ctx); \
56 gen_helper_vhistq(cpu_env); \
57 } \
58 } while (0)
59#define fGEN_TCG_V6_vwhist256(SHORTCODE) \
60 if (!ctx->pre_commit) { \
61 assert_vhist_tmp(ctx); \
62 gen_helper_vwhist256(cpu_env); \
63 }
64#define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
65 do { \
66 if (ctx->pre_commit) { \
67 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
68 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
69 sizeof(MMVector), sizeof(MMVector)); \
70 } else { \
71 assert_vhist_tmp(ctx); \
72 gen_helper_vwhist256q(cpu_env); \
73 } \
74 } while (0)
75#define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
76 if (!ctx->pre_commit) { \
77 assert_vhist_tmp(ctx); \
78 gen_helper_vwhist256_sat(cpu_env); \
79 }
80#define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
81 do { \
82 if (ctx->pre_commit) { \
83 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
84 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
85 sizeof(MMVector), sizeof(MMVector)); \
86 } else { \
87 assert_vhist_tmp(ctx); \
88 gen_helper_vwhist256q_sat(cpu_env); \
89 } \
90 } while (0)
91#define fGEN_TCG_V6_vwhist128(SHORTCODE) \
92 if (!ctx->pre_commit) { \
93 assert_vhist_tmp(ctx); \
94 gen_helper_vwhist128(cpu_env); \
95 }
96#define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
97 do { \
98 if (ctx->pre_commit) { \
99 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
100 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
101 sizeof(MMVector), sizeof(MMVector)); \
102 } else { \
103 assert_vhist_tmp(ctx); \
104 gen_helper_vwhist128q(cpu_env); \
105 } \
106 } while (0)
107#define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
108 if (!ctx->pre_commit) { \
109 TCGv tcgv_uiV = tcg_constant_tl(uiV); \
110 assert_vhist_tmp(ctx); \
111 gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
112 }
113#define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
114 do { \
115 if (ctx->pre_commit) { \
116 intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
117 tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
118 sizeof(MMVector), sizeof(MMVector)); \
119 } else { \
120 TCGv tcgv_uiV = tcg_constant_tl(uiV); \
121 assert_vhist_tmp(ctx); \
122 gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
123 } \
124 } while (0)
125
126
32488192
TS
127#define fGEN_TCG_V6_vassign(SHORTCODE) \
128 tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
129 sizeof(MMVector), sizeof(MMVector))
130
131/* Vector conditional move */
132#define fGEN_TCG_VEC_CMOV(PRED) \
133 do { \
134 TCGv lsb = tcg_temp_new(); \
135 TCGLabel *false_label = gen_new_label(); \
136 TCGLabel *end_label = gen_new_label(); \
137 tcg_gen_andi_tl(lsb, PsV, 1); \
138 tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
32488192
TS
139 tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
140 sizeof(MMVector), sizeof(MMVector)); \
141 tcg_gen_br(end_label); \
142 gen_set_label(false_label); \
143 tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
144 1 << insn->slot); \
145 gen_set_label(end_label); \
146 } while (0)
147
148
149/* Vector conditional move (true) */
150#define fGEN_TCG_V6_vcmov(SHORTCODE) \
151 fGEN_TCG_VEC_CMOV(1)
152
153/* Vector conditional move (false) */
154#define fGEN_TCG_V6_vncmov(SHORTCODE) \
155 fGEN_TCG_VEC_CMOV(0)
156
928f0ce4
TS
157/* Vector add - various forms */
158#define fGEN_TCG_V6_vaddb(SHORTCODE) \
159 tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \
160 sizeof(MMVector), sizeof(MMVector))
161
162#define fGEN_TCG_V6_vaddh(SHORTCYDE) \
163 tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \
164 sizeof(MMVector), sizeof(MMVector))
165
166#define fGEN_TCG_V6_vaddw(SHORTCODE) \
167 tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
168 sizeof(MMVector), sizeof(MMVector))
169
170#define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \
171 tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \
172 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
173
174#define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \
175 tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \
176 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
177
178#define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \
179 tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \
180 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
181
182/* Vector sub - various forms */
183#define fGEN_TCG_V6_vsubb(SHORTCODE) \
184 tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \
185 sizeof(MMVector), sizeof(MMVector))
186
187#define fGEN_TCG_V6_vsubh(SHORTCODE) \
188 tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \
189 sizeof(MMVector), sizeof(MMVector))
190
191#define fGEN_TCG_V6_vsubw(SHORTCODE) \
192 tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \
193 sizeof(MMVector), sizeof(MMVector))
194
195#define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \
196 tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \
197 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
198
199#define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \
200 tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \
201 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
202
203#define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \
204 tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \
205 sizeof(MMVector) * 2, sizeof(MMVector) * 2)
206
8866635c
TS
207/* Vector shift right - various forms */
208#define fGEN_TCG_V6_vasrh(SHORTCODE) \
209 do { \
210 TCGv shift = tcg_temp_new(); \
211 tcg_gen_andi_tl(shift, RtV, 15); \
212 tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \
213 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
214 } while (0)
215
216#define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \
217 do { \
218 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
219 TCGv shift = tcg_temp_new(); \
220 tcg_gen_andi_tl(shift, RtV, 15); \
221 tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \
222 sizeof(MMVector), sizeof(MMVector)); \
223 tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
224 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
225 } while (0)
226
227#define fGEN_TCG_V6_vasrw(SHORTCODE) \
228 do { \
229 TCGv shift = tcg_temp_new(); \
230 tcg_gen_andi_tl(shift, RtV, 31); \
231 tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \
232 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
233 } while (0)
234
235#define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \
236 do { \
237 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
238 TCGv shift = tcg_temp_new(); \
239 tcg_gen_andi_tl(shift, RtV, 31); \
240 tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \
241 sizeof(MMVector), sizeof(MMVector)); \
242 tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
243 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
244 } while (0)
245
246#define fGEN_TCG_V6_vlsrb(SHORTCODE) \
247 do { \
248 TCGv shift = tcg_temp_new(); \
249 tcg_gen_andi_tl(shift, RtV, 7); \
250 tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \
251 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
252 } while (0)
253
254#define fGEN_TCG_V6_vlsrh(SHORTCODE) \
255 do { \
256 TCGv shift = tcg_temp_new(); \
257 tcg_gen_andi_tl(shift, RtV, 15); \
258 tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \
259 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
260 } while (0)
261
262#define fGEN_TCG_V6_vlsrw(SHORTCODE) \
263 do { \
264 TCGv shift = tcg_temp_new(); \
265 tcg_gen_andi_tl(shift, RtV, 31); \
266 tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \
267 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
268 } while (0)
269
270/* Vector shift left - various forms */
271#define fGEN_TCG_V6_vaslb(SHORTCODE) \
272 do { \
273 TCGv shift = tcg_temp_new(); \
274 tcg_gen_andi_tl(shift, RtV, 7); \
275 tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \
276 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
277 } while (0)
278
279#define fGEN_TCG_V6_vaslh(SHORTCODE) \
280 do { \
281 TCGv shift = tcg_temp_new(); \
282 tcg_gen_andi_tl(shift, RtV, 15); \
283 tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \
284 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
285 } while (0)
286
287#define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \
288 do { \
289 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
290 TCGv shift = tcg_temp_new(); \
291 tcg_gen_andi_tl(shift, RtV, 15); \
292 tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \
293 sizeof(MMVector), sizeof(MMVector)); \
294 tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
295 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
296 } while (0)
297
298#define fGEN_TCG_V6_vaslw(SHORTCODE) \
299 do { \
300 TCGv shift = tcg_temp_new(); \
301 tcg_gen_andi_tl(shift, RtV, 31); \
302 tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \
303 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
304 } while (0)
305
306#define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \
307 do { \
308 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
309 TCGv shift = tcg_temp_new(); \
310 tcg_gen_andi_tl(shift, RtV, 31); \
311 tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \
312 sizeof(MMVector), sizeof(MMVector)); \
313 tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
314 sizeof(MMVector), sizeof(MMVector)); \
8866635c
TS
315 } while (0)
316
2c8ffa8f
TS
317/* Vector max - various forms */
318#define fGEN_TCG_V6_vmaxw(SHORTCODE) \
319 tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \
320 sizeof(MMVector), sizeof(MMVector))
321#define fGEN_TCG_V6_vmaxh(SHORTCODE) \
322 tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \
323 sizeof(MMVector), sizeof(MMVector))
324#define fGEN_TCG_V6_vmaxuh(SHORTCODE) \
325 tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \
326 sizeof(MMVector), sizeof(MMVector))
327#define fGEN_TCG_V6_vmaxb(SHORTCODE) \
328 tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \
329 sizeof(MMVector), sizeof(MMVector))
330#define fGEN_TCG_V6_vmaxub(SHORTCODE) \
331 tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \
332 sizeof(MMVector), sizeof(MMVector))
333
334/* Vector min - various forms */
335#define fGEN_TCG_V6_vminw(SHORTCODE) \
336 tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \
337 sizeof(MMVector), sizeof(MMVector))
338#define fGEN_TCG_V6_vminh(SHORTCODE) \
339 tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \
340 sizeof(MMVector), sizeof(MMVector))
341#define fGEN_TCG_V6_vminuh(SHORTCODE) \
342 tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \
343 sizeof(MMVector), sizeof(MMVector))
344#define fGEN_TCG_V6_vminb(SHORTCODE) \
345 tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \
346 sizeof(MMVector), sizeof(MMVector))
347#define fGEN_TCG_V6_vminub(SHORTCODE) \
348 tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \
349 sizeof(MMVector), sizeof(MMVector))
350
7f4808ec
TS
351/* Vector logical ops */
352#define fGEN_TCG_V6_vxor(SHORTCODE) \
353 tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \
354 sizeof(MMVector), sizeof(MMVector))
355
356#define fGEN_TCG_V6_vand(SHORTCODE) \
357 tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \
358 sizeof(MMVector), sizeof(MMVector))
359
360#define fGEN_TCG_V6_vor(SHORTCODE) \
361 tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \
362 sizeof(MMVector), sizeof(MMVector))
363
364#define fGEN_TCG_V6_vnot(SHORTCODE) \
365 tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \
366 sizeof(MMVector), sizeof(MMVector))
367
368/* Q register logical ops */
369#define fGEN_TCG_V6_pred_or(SHORTCODE) \
370 tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \
371 sizeof(MMQReg), sizeof(MMQReg))
372
373#define fGEN_TCG_V6_pred_and(SHORTCODE) \
374 tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \
375 sizeof(MMQReg), sizeof(MMQReg))
376
377#define fGEN_TCG_V6_pred_xor(SHORTCODE) \
378 tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \
379 sizeof(MMQReg), sizeof(MMQReg))
380
381#define fGEN_TCG_V6_pred_or_n(SHORTCODE) \
382 tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \
383 sizeof(MMQReg), sizeof(MMQReg))
384
385#define fGEN_TCG_V6_pred_and_n(SHORTCODE) \
386 tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \
387 sizeof(MMQReg), sizeof(MMQReg))
388
389#define fGEN_TCG_V6_pred_not(SHORTCODE) \
390 tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \
391 sizeof(MMQReg), sizeof(MMQReg))
392
242a2c2c
TS
393/* Vector compares */
394#define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \
395 do { \
396 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
397 tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
398 sizeof(MMVector), sizeof(MMVector)); \
399 vec_to_qvec(SIZE, QdV_off, tmpoff); \
400 } while (0)
401
402#define fGEN_TCG_V6_vgtw(SHORTCODE) \
403 fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4)
404#define fGEN_TCG_V6_vgth(SHORTCODE) \
405 fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2)
406#define fGEN_TCG_V6_vgtb(SHORTCODE) \
407 fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1)
408
409#define fGEN_TCG_V6_vgtuw(SHORTCODE) \
410 fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4)
411#define fGEN_TCG_V6_vgtuh(SHORTCODE) \
412 fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2)
413#define fGEN_TCG_V6_vgtub(SHORTCODE) \
414 fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1)
415
416#define fGEN_TCG_V6_veqw(SHORTCODE) \
417 fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4)
418#define fGEN_TCG_V6_veqh(SHORTCODE) \
419 fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2)
420#define fGEN_TCG_V6_veqb(SHORTCODE) \
421 fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1)
422
423#define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \
424 do { \
425 intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
426 intptr_t qoff = offsetof(CPUHexagonState, qtmp); \
427 tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
428 sizeof(MMVector), sizeof(MMVector)); \
429 vec_to_qvec(SIZE, qoff, tmpoff); \
430 OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \
431 } while (0)
432
433#define fGEN_TCG_V6_vgtw_and(SHORTCODE) \
434 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and)
435#define fGEN_TCG_V6_vgtw_or(SHORTCODE) \
436 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or)
437#define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \
438 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor)
439
440#define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \
441 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and)
442#define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \
443 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or)
444#define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \
445 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor)
446
447#define fGEN_TCG_V6_vgth_and(SHORTCODE) \
448 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and)
449#define fGEN_TCG_V6_vgth_or(SHORTCODE) \
450 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or)
451#define fGEN_TCG_V6_vgth_xor(SHORTCODE) \
452 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor)
453
454#define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \
455 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and)
456#define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \
457 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or)
458#define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \
459 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor)
460
461#define fGEN_TCG_V6_vgtb_and(SHORTCODE) \
462 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and)
463#define fGEN_TCG_V6_vgtb_or(SHORTCODE) \
464 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or)
465#define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \
466 fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor)
467
468#define fGEN_TCG_V6_vgtub_and(SHORTCODE) \
469 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and)
470#define fGEN_TCG_V6_vgtub_or(SHORTCODE) \
471 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or)
472#define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \
473 fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor)
474
475#define fGEN_TCG_V6_veqw_and(SHORTCODE) \
476 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and)
477#define fGEN_TCG_V6_veqw_or(SHORTCODE) \
478 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or)
479#define fGEN_TCG_V6_veqw_xor(SHORTCODE) \
480 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor)
481
482#define fGEN_TCG_V6_veqh_and(SHORTCODE) \
483 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and)
484#define fGEN_TCG_V6_veqh_or(SHORTCODE) \
485 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or)
486#define fGEN_TCG_V6_veqh_xor(SHORTCODE) \
487 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor)
488
489#define fGEN_TCG_V6_veqb_and(SHORTCODE) \
490 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and)
491#define fGEN_TCG_V6_veqb_or(SHORTCODE) \
492 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or)
493#define fGEN_TCG_V6_veqb_xor(SHORTCODE) \
494 fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor)
495
b0c2c182
TS
496/* Vector splat - various forms */
497#define fGEN_TCG_V6_lvsplatw(SHORTCODE) \
498 tcg_gen_gvec_dup_i32(MO_32, VdV_off, \
499 sizeof(MMVector), sizeof(MMVector), RtV)
500
501#define fGEN_TCG_V6_lvsplath(SHORTCODE) \
502 tcg_gen_gvec_dup_i32(MO_16, VdV_off, \
503 sizeof(MMVector), sizeof(MMVector), RtV)
504
505#define fGEN_TCG_V6_lvsplatb(SHORTCODE) \
506 tcg_gen_gvec_dup_i32(MO_8, VdV_off, \
507 sizeof(MMVector), sizeof(MMVector), RtV)
508
509/* Vector absolute value - various forms */
510#define fGEN_TCG_V6_vabsb(SHORTCODE) \
511 tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \
512 sizeof(MMVector), sizeof(MMVector))
513
514#define fGEN_TCG_V6_vabsh(SHORTCODE) \
515 tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \
516 sizeof(MMVector), sizeof(MMVector))
517
518#define fGEN_TCG_V6_vabsw(SHORTCODE) \
519 tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \
520 sizeof(MMVector), sizeof(MMVector))
521
5d67ff6c
TS
522/* Vector loads */
523#define fGEN_TCG_V6_vL32b_pi(SHORTCODE) SHORTCODE
524#define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE) SHORTCODE
525#define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE) SHORTCODE
526#define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE) SHORTCODE
527#define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE) SHORTCODE
528#define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE) SHORTCODE
529#define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE) SHORTCODE
530#define fGEN_TCG_V6_vL32b_ai(SHORTCODE) SHORTCODE
531#define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE) SHORTCODE
532#define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE) SHORTCODE
533#define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE) SHORTCODE
534#define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE) SHORTCODE
535#define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE) SHORTCODE
536#define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE) SHORTCODE
537#define fGEN_TCG_V6_vL32b_ppu(SHORTCODE) SHORTCODE
538#define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE) SHORTCODE
539#define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE) SHORTCODE
540#define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE) SHORTCODE
541#define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE) SHORTCODE
542#define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE) SHORTCODE
543#define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE) SHORTCODE
544
545/* Predicated vector loads */
546#define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \
547 do { \
548 TCGv LSB = tcg_temp_new(); \
549 TCGLabel *false_label = gen_new_label(); \
550 TCGLabel *end_label = gen_new_label(); \
551 GET_EA; \
552 PRED; \
553 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
5d67ff6c
TS
554 gen_vreg_load(ctx, DSTOFF, EA, true); \
555 INC; \
556 tcg_gen_br(end_label); \
557 gen_set_label(false_label); \
558 tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
559 1 << insn->slot); \
560 gen_set_label(end_label); \
561 } while (0)
562
563#define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
564 fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
565 fEA_REG(RxV), \
566 VdV_off, \
567 fPM_I(RxV, siV * sizeof(MMVector)))
568#define fGEN_TCG_PRED_VEC_LOAD_npred_pi \
569 fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
570 fEA_REG(RxV), \
571 VdV_off, \
572 fPM_I(RxV, siV * sizeof(MMVector)))
573
574#define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \
575 fGEN_TCG_PRED_VEC_LOAD_pred_pi
576#define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \
577 fGEN_TCG_PRED_VEC_LOAD_npred_pi
578#define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \
579 fGEN_TCG_PRED_VEC_LOAD_pred_pi
580#define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \
581 fGEN_TCG_PRED_VEC_LOAD_npred_pi
582#define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \
583 fGEN_TCG_PRED_VEC_LOAD_pred_pi
584#define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \
585 fGEN_TCG_PRED_VEC_LOAD_npred_pi
586#define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \
587 fGEN_TCG_PRED_VEC_LOAD_pred_pi
588#define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \
589 fGEN_TCG_PRED_VEC_LOAD_npred_pi
590#define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \
591 fGEN_TCG_PRED_VEC_LOAD_pred_pi
592#define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \
593 fGEN_TCG_PRED_VEC_LOAD_npred_pi
594#define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \
595 fGEN_TCG_PRED_VEC_LOAD_pred_pi
596#define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \
597 fGEN_TCG_PRED_VEC_LOAD_npred_pi
598
599#define fGEN_TCG_PRED_VEC_LOAD_pred_ai \
600 fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
601 fEA_RI(RtV, siV * sizeof(MMVector)), \
602 VdV_off, \
603 do {} while (0))
604#define fGEN_TCG_PRED_VEC_LOAD_npred_ai \
605 fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
606 fEA_RI(RtV, siV * sizeof(MMVector)), \
607 VdV_off, \
608 do {} while (0))
609
610#define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \
611 fGEN_TCG_PRED_VEC_LOAD_pred_ai
612#define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \
613 fGEN_TCG_PRED_VEC_LOAD_npred_ai
614#define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \
615 fGEN_TCG_PRED_VEC_LOAD_pred_ai
616#define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \
617 fGEN_TCG_PRED_VEC_LOAD_npred_ai
618#define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \
619 fGEN_TCG_PRED_VEC_LOAD_pred_ai
620#define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \
621 fGEN_TCG_PRED_VEC_LOAD_npred_ai
622#define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \
623 fGEN_TCG_PRED_VEC_LOAD_pred_ai
624#define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \
625 fGEN_TCG_PRED_VEC_LOAD_npred_ai
626#define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \
627 fGEN_TCG_PRED_VEC_LOAD_pred_ai
628#define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \
629 fGEN_TCG_PRED_VEC_LOAD_npred_ai
630#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \
631 fGEN_TCG_PRED_VEC_LOAD_pred_ai
632#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \
633 fGEN_TCG_PRED_VEC_LOAD_npred_ai
634
635#define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \
636 fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
637 fEA_REG(RxV), \
638 VdV_off, \
639 fPM_M(RxV, MuV))
640#define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \
641 fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
642 fEA_REG(RxV), \
643 VdV_off, \
644 fPM_M(RxV, MuV))
645
646#define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \
647 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
648#define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \
649 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
650#define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \
651 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
652#define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \
653 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
654#define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \
655 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
656#define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \
657 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
658#define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \
659 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
660#define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \
661 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
662#define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \
663 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
664#define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \
665 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
666#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \
667 fGEN_TCG_PRED_VEC_LOAD_pred_ppu
668#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \
669 fGEN_TCG_PRED_VEC_LOAD_npred_ppu
670
6b4f7597
TS
671/* Vector stores */
672#define fGEN_TCG_V6_vS32b_pi(SHORTCODE) SHORTCODE
673#define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE) SHORTCODE
674#define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE) SHORTCODE
675#define fGEN_TCG_V6_vS32b_ai(SHORTCODE) SHORTCODE
676#define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE) SHORTCODE
677#define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE) SHORTCODE
678#define fGEN_TCG_V6_vS32b_ppu(SHORTCODE) SHORTCODE
679#define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE) SHORTCODE
680#define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE) SHORTCODE
681
682/* New value vector stores */
683#define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \
684 do { \
685 GET_EA; \
1e536334 686 gen_vreg_store(ctx, EA, OsN_off, insn->slot, true); \
6b4f7597
TS
687 INC; \
688 } while (0)
689
690#define fGEN_TCG_NEWVAL_VEC_STORE_pi \
691 fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector)))
692
693#define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \
694 fGEN_TCG_NEWVAL_VEC_STORE_pi
695#define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \
696 fGEN_TCG_NEWVAL_VEC_STORE_pi
697
698#define fGEN_TCG_NEWVAL_VEC_STORE_ai \
699 fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \
700 do { } while (0))
701
702#define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \
703 fGEN_TCG_NEWVAL_VEC_STORE_ai
704#define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \
705 fGEN_TCG_NEWVAL_VEC_STORE_ai
706
707#define fGEN_TCG_NEWVAL_VEC_STORE_ppu \
708 fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV))
709
710#define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \
711 fGEN_TCG_NEWVAL_VEC_STORE_ppu
712#define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \
713 fGEN_TCG_NEWVAL_VEC_STORE_ppu
714
715/* Predicated vector stores */
716#define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \
717 do { \
718 TCGv LSB = tcg_temp_new(); \
719 TCGLabel *false_label = gen_new_label(); \
720 TCGLabel *end_label = gen_new_label(); \
721 GET_EA; \
722 PRED; \
723 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
1e536334 724 gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \
6b4f7597
TS
725 INC; \
726 tcg_gen_br(end_label); \
727 gen_set_label(false_label); \
728 tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
729 1 << insn->slot); \
730 gen_set_label(end_label); \
731 } while (0)
732
733#define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
734 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
735 fEA_REG(RxV), \
736 VsV_off, ALIGN, \
737 fPM_I(RxV, siV * sizeof(MMVector)))
738#define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \
739 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
740 fEA_REG(RxV), \
741 VsV_off, ALIGN, \
742 fPM_I(RxV, siV * sizeof(MMVector)))
743#define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \
744 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
745 fEA_REG(RxV), \
746 OsN_off, true, \
747 fPM_I(RxV, siV * sizeof(MMVector)))
748#define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \
749 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
750 fEA_REG(RxV), \
751 OsN_off, true, \
752 fPM_I(RxV, siV * sizeof(MMVector)))
753
754#define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \
755 fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
756#define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \
757 fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
758#define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \
759 fGEN_TCG_PRED_VEC_STORE_pred_pi(false)
760#define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \
761 fGEN_TCG_PRED_VEC_STORE_npred_pi(false)
762#define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \
763 fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
764#define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \
765 fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
766#define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \
767 fGEN_TCG_PRED_VEC_STORE_new_pred_pi
768#define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \
769 fGEN_TCG_PRED_VEC_STORE_new_npred_pi
770#define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \
771 fGEN_TCG_PRED_VEC_STORE_new_pred_pi
772#define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \
773 fGEN_TCG_PRED_VEC_STORE_new_npred_pi
774
775#define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \
776 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
777 fEA_RI(RtV, siV * sizeof(MMVector)), \
778 VsV_off, ALIGN, \
779 do { } while (0))
780#define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \
781 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
782 fEA_RI(RtV, siV * sizeof(MMVector)), \
783 VsV_off, ALIGN, \
784 do { } while (0))
785#define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \
786 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
787 fEA_RI(RtV, siV * sizeof(MMVector)), \
788 OsN_off, true, \
789 do { } while (0))
790#define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \
791 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
792 fEA_RI(RtV, siV * sizeof(MMVector)), \
793 OsN_off, true, \
794 do { } while (0))
795
796#define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \
797 fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
798#define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \
799 fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
800#define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \
801 fGEN_TCG_PRED_VEC_STORE_pred_ai(false)
802#define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \
803 fGEN_TCG_PRED_VEC_STORE_npred_ai(false)
804#define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \
805 fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
806#define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \
807 fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
808#define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \
809 fGEN_TCG_PRED_VEC_STORE_new_pred_ai
810#define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \
811 fGEN_TCG_PRED_VEC_STORE_new_npred_ai
812#define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \
813 fGEN_TCG_PRED_VEC_STORE_new_pred_ai
814#define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \
815 fGEN_TCG_PRED_VEC_STORE_new_npred_ai
816
817#define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \
818 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
819 fEA_REG(RxV), \
820 VsV_off, ALIGN, \
821 fPM_M(RxV, MuV))
822#define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \
823 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
824 fEA_REG(RxV), \
825 VsV_off, ALIGN, \
826 fPM_M(RxV, MuV))
827#define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \
828 fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
829 fEA_REG(RxV), \
830 OsN_off, true, \
831 fPM_M(RxV, MuV))
832#define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \
833 fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
834 fEA_REG(RxV), \
835 OsN_off, true, \
836 fPM_M(RxV, MuV))
837
838#define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \
839 fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
840#define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \
841 fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
842#define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \
843 fGEN_TCG_PRED_VEC_STORE_pred_ppu(false)
844#define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \
845 fGEN_TCG_PRED_VEC_STORE_npred_ppu(false)
846#define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \
847 fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
848#define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \
849 fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
850#define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \
851 fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
852#define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \
853 fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
854#define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \
855 fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
856#define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \
857 fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
858
859/* Masked vector stores */
860#define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE) SHORTCODE
861#define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE) SHORTCODE
862#define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE) SHORTCODE
863#define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE) SHORTCODE
864#define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE) SHORTCODE
865#define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE) SHORTCODE
866#define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE) SHORTCODE
867#define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE) SHORTCODE
868#define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE) SHORTCODE
869#define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE) SHORTCODE
870#define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE) SHORTCODE
871#define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE) SHORTCODE
872
873/* Store release not modelled in qemu, but need to suppress compiler warnings */
874#define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \
875 do { \
876 siV = siV; \
877 } while (0)
878#define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \
879 do { \
880 RtV = RtV; \
881 siV = siV; \
882 } while (0)
883#define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \
884 do { \
885 MuV = MuV; \
886 } while (0)
887
d51bcabe 888#endif