]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/translate/vsx-impl.c.inc
target/ppc: Remove xscmpnedp instruction
[mirror_qemu.git] / target / ppc / translate / vsx-impl.c.inc
1 /*** VSX extension ***/
2
3 static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
4 {
5 tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high));
6 }
7
8 static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
9 {
10 tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high));
11 }
12
13 static inline TCGv_ptr gen_vsr_ptr(int reg)
14 {
15 TCGv_ptr r = tcg_temp_new_ptr();
16 tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg));
17 return r;
18 }
19
20 #define VSX_LOAD_SCALAR(name, operation) \
21 static void gen_##name(DisasContext *ctx) \
22 { \
23 TCGv EA; \
24 TCGv_i64 t0; \
25 if (unlikely(!ctx->vsx_enabled)) { \
26 gen_exception(ctx, POWERPC_EXCP_VSXU); \
27 return; \
28 } \
29 t0 = tcg_temp_new_i64(); \
30 gen_set_access_type(ctx, ACCESS_INT); \
31 EA = tcg_temp_new(); \
32 gen_addr_reg_index(ctx, EA); \
33 gen_qemu_##operation(ctx, t0, EA); \
34 set_cpu_vsr(xT(ctx->opcode), t0, true); \
35 /* NOTE: cpu_vsrl is undefined */ \
36 tcg_temp_free(EA); \
37 tcg_temp_free_i64(t0); \
38 }
39
40 VSX_LOAD_SCALAR(lxsdx, ld64_i64)
41 VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
42 VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
43 VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
44 VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
45 VSX_LOAD_SCALAR(lxsspx, ld32fs)
46
47 static void gen_lxvd2x(DisasContext *ctx)
48 {
49 TCGv EA;
50 TCGv_i64 t0;
51 if (unlikely(!ctx->vsx_enabled)) {
52 gen_exception(ctx, POWERPC_EXCP_VSXU);
53 return;
54 }
55 t0 = tcg_temp_new_i64();
56 gen_set_access_type(ctx, ACCESS_INT);
57 EA = tcg_temp_new();
58 gen_addr_reg_index(ctx, EA);
59 gen_qemu_ld64_i64(ctx, t0, EA);
60 set_cpu_vsr(xT(ctx->opcode), t0, true);
61 tcg_gen_addi_tl(EA, EA, 8);
62 gen_qemu_ld64_i64(ctx, t0, EA);
63 set_cpu_vsr(xT(ctx->opcode), t0, false);
64 tcg_temp_free(EA);
65 tcg_temp_free_i64(t0);
66 }
67
68 static void gen_lxvw4x(DisasContext *ctx)
69 {
70 TCGv EA;
71 TCGv_i64 xth;
72 TCGv_i64 xtl;
73 if (unlikely(!ctx->vsx_enabled)) {
74 gen_exception(ctx, POWERPC_EXCP_VSXU);
75 return;
76 }
77 xth = tcg_temp_new_i64();
78 xtl = tcg_temp_new_i64();
79
80 gen_set_access_type(ctx, ACCESS_INT);
81 EA = tcg_temp_new();
82
83 gen_addr_reg_index(ctx, EA);
84 if (ctx->le_mode) {
85 TCGv_i64 t0 = tcg_temp_new_i64();
86 TCGv_i64 t1 = tcg_temp_new_i64();
87
88 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
89 tcg_gen_shri_i64(t1, t0, 32);
90 tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
91 tcg_gen_addi_tl(EA, EA, 8);
92 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
93 tcg_gen_shri_i64(t1, t0, 32);
94 tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
95 tcg_temp_free_i64(t0);
96 tcg_temp_free_i64(t1);
97 } else {
98 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
99 tcg_gen_addi_tl(EA, EA, 8);
100 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
101 }
102 set_cpu_vsr(xT(ctx->opcode), xth, true);
103 set_cpu_vsr(xT(ctx->opcode), xtl, false);
104 tcg_temp_free(EA);
105 tcg_temp_free_i64(xth);
106 tcg_temp_free_i64(xtl);
107 }
108
109 static void gen_lxvwsx(DisasContext *ctx)
110 {
111 TCGv EA;
112 TCGv_i32 data;
113
114 if (xT(ctx->opcode) < 32) {
115 if (unlikely(!ctx->vsx_enabled)) {
116 gen_exception(ctx, POWERPC_EXCP_VSXU);
117 return;
118 }
119 } else {
120 if (unlikely(!ctx->altivec_enabled)) {
121 gen_exception(ctx, POWERPC_EXCP_VPU);
122 return;
123 }
124 }
125
126 gen_set_access_type(ctx, ACCESS_INT);
127 EA = tcg_temp_new();
128
129 gen_addr_reg_index(ctx, EA);
130
131 data = tcg_temp_new_i32();
132 tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
133 tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
134
135 tcg_temp_free(EA);
136 tcg_temp_free_i32(data);
137 }
138
139 static void gen_lxvdsx(DisasContext *ctx)
140 {
141 TCGv EA;
142 TCGv_i64 data;
143
144 if (unlikely(!ctx->vsx_enabled)) {
145 gen_exception(ctx, POWERPC_EXCP_VSXU);
146 return;
147 }
148
149 gen_set_access_type(ctx, ACCESS_INT);
150 EA = tcg_temp_new();
151
152 gen_addr_reg_index(ctx, EA);
153
154 data = tcg_temp_new_i64();
155 tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
156 tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
157
158 tcg_temp_free(EA);
159 tcg_temp_free_i64(data);
160 }
161
162 static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
163 TCGv_i64 inh, TCGv_i64 inl)
164 {
165 TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
166 TCGv_i64 t0 = tcg_temp_new_i64();
167 TCGv_i64 t1 = tcg_temp_new_i64();
168
169 /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
170 tcg_gen_and_i64(t0, inh, mask);
171 tcg_gen_shli_i64(t0, t0, 8);
172 tcg_gen_shri_i64(t1, inh, 8);
173 tcg_gen_and_i64(t1, t1, mask);
174 tcg_gen_or_i64(outh, t0, t1);
175
176 /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
177 tcg_gen_and_i64(t0, inl, mask);
178 tcg_gen_shli_i64(t0, t0, 8);
179 tcg_gen_shri_i64(t1, inl, 8);
180 tcg_gen_and_i64(t1, t1, mask);
181 tcg_gen_or_i64(outl, t0, t1);
182
183 tcg_temp_free_i64(t0);
184 tcg_temp_free_i64(t1);
185 tcg_temp_free_i64(mask);
186 }
187
188 static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
189 TCGv_i64 inh, TCGv_i64 inl)
190 {
191 TCGv_i64 hi = tcg_temp_new_i64();
192 TCGv_i64 lo = tcg_temp_new_i64();
193
194 tcg_gen_bswap64_i64(hi, inh);
195 tcg_gen_bswap64_i64(lo, inl);
196 tcg_gen_shri_i64(outh, hi, 32);
197 tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
198 tcg_gen_shri_i64(outl, lo, 32);
199 tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
200
201 tcg_temp_free_i64(hi);
202 tcg_temp_free_i64(lo);
203 }
204 static void gen_lxvh8x(DisasContext *ctx)
205 {
206 TCGv EA;
207 TCGv_i64 xth;
208 TCGv_i64 xtl;
209
210 if (unlikely(!ctx->vsx_enabled)) {
211 gen_exception(ctx, POWERPC_EXCP_VSXU);
212 return;
213 }
214 xth = tcg_temp_new_i64();
215 xtl = tcg_temp_new_i64();
216 gen_set_access_type(ctx, ACCESS_INT);
217
218 EA = tcg_temp_new();
219 gen_addr_reg_index(ctx, EA);
220 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
221 tcg_gen_addi_tl(EA, EA, 8);
222 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
223 if (ctx->le_mode) {
224 gen_bswap16x8(xth, xtl, xth, xtl);
225 }
226 set_cpu_vsr(xT(ctx->opcode), xth, true);
227 set_cpu_vsr(xT(ctx->opcode), xtl, false);
228 tcg_temp_free(EA);
229 tcg_temp_free_i64(xth);
230 tcg_temp_free_i64(xtl);
231 }
232
233 static void gen_lxvb16x(DisasContext *ctx)
234 {
235 TCGv EA;
236 TCGv_i64 xth;
237 TCGv_i64 xtl;
238
239 if (unlikely(!ctx->vsx_enabled)) {
240 gen_exception(ctx, POWERPC_EXCP_VSXU);
241 return;
242 }
243 xth = tcg_temp_new_i64();
244 xtl = tcg_temp_new_i64();
245 gen_set_access_type(ctx, ACCESS_INT);
246 EA = tcg_temp_new();
247 gen_addr_reg_index(ctx, EA);
248 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
249 tcg_gen_addi_tl(EA, EA, 8);
250 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
251 set_cpu_vsr(xT(ctx->opcode), xth, true);
252 set_cpu_vsr(xT(ctx->opcode), xtl, false);
253 tcg_temp_free(EA);
254 tcg_temp_free_i64(xth);
255 tcg_temp_free_i64(xtl);
256 }
257
258 #ifdef TARGET_PPC64
259 #define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
260 static void gen_##name(DisasContext *ctx) \
261 { \
262 TCGv EA; \
263 TCGv_ptr xt; \
264 \
265 if (xT(ctx->opcode) < 32) { \
266 if (unlikely(!ctx->vsx_enabled)) { \
267 gen_exception(ctx, POWERPC_EXCP_VSXU); \
268 return; \
269 } \
270 } else { \
271 if (unlikely(!ctx->altivec_enabled)) { \
272 gen_exception(ctx, POWERPC_EXCP_VPU); \
273 return; \
274 } \
275 } \
276 EA = tcg_temp_new(); \
277 xt = gen_vsr_ptr(xT(ctx->opcode)); \
278 gen_set_access_type(ctx, ACCESS_INT); \
279 gen_addr_register(ctx, EA); \
280 gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
281 tcg_temp_free(EA); \
282 tcg_temp_free_ptr(xt); \
283 }
284
285 VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
286 VSX_VECTOR_LOAD_STORE_LENGTH(lxvll)
287 VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
288 VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
289 #endif
290
291 #define VSX_LOAD_SCALAR_DS(name, operation) \
292 static void gen_##name(DisasContext *ctx) \
293 { \
294 TCGv EA; \
295 TCGv_i64 xth; \
296 \
297 if (unlikely(!ctx->altivec_enabled)) { \
298 gen_exception(ctx, POWERPC_EXCP_VPU); \
299 return; \
300 } \
301 xth = tcg_temp_new_i64(); \
302 gen_set_access_type(ctx, ACCESS_INT); \
303 EA = tcg_temp_new(); \
304 gen_addr_imm_index(ctx, EA, 0x03); \
305 gen_qemu_##operation(ctx, xth, EA); \
306 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); \
307 /* NOTE: cpu_vsrl is undefined */ \
308 tcg_temp_free(EA); \
309 tcg_temp_free_i64(xth); \
310 }
311
312 VSX_LOAD_SCALAR_DS(lxsd, ld64_i64)
313 VSX_LOAD_SCALAR_DS(lxssp, ld32fs)
314
315 #define VSX_STORE_SCALAR(name, operation) \
316 static void gen_##name(DisasContext *ctx) \
317 { \
318 TCGv EA; \
319 TCGv_i64 t0; \
320 if (unlikely(!ctx->vsx_enabled)) { \
321 gen_exception(ctx, POWERPC_EXCP_VSXU); \
322 return; \
323 } \
324 t0 = tcg_temp_new_i64(); \
325 gen_set_access_type(ctx, ACCESS_INT); \
326 EA = tcg_temp_new(); \
327 gen_addr_reg_index(ctx, EA); \
328 get_cpu_vsr(t0, xS(ctx->opcode), true); \
329 gen_qemu_##operation(ctx, t0, EA); \
330 tcg_temp_free(EA); \
331 tcg_temp_free_i64(t0); \
332 }
333
334 VSX_STORE_SCALAR(stxsdx, st64_i64)
335
336 VSX_STORE_SCALAR(stxsibx, st8_i64)
337 VSX_STORE_SCALAR(stxsihx, st16_i64)
338 VSX_STORE_SCALAR(stxsiwx, st32_i64)
339 VSX_STORE_SCALAR(stxsspx, st32fs)
340
341 static void gen_stxvd2x(DisasContext *ctx)
342 {
343 TCGv EA;
344 TCGv_i64 t0;
345 if (unlikely(!ctx->vsx_enabled)) {
346 gen_exception(ctx, POWERPC_EXCP_VSXU);
347 return;
348 }
349 t0 = tcg_temp_new_i64();
350 gen_set_access_type(ctx, ACCESS_INT);
351 EA = tcg_temp_new();
352 gen_addr_reg_index(ctx, EA);
353 get_cpu_vsr(t0, xS(ctx->opcode), true);
354 gen_qemu_st64_i64(ctx, t0, EA);
355 tcg_gen_addi_tl(EA, EA, 8);
356 get_cpu_vsr(t0, xS(ctx->opcode), false);
357 gen_qemu_st64_i64(ctx, t0, EA);
358 tcg_temp_free(EA);
359 tcg_temp_free_i64(t0);
360 }
361
362 static void gen_stxvw4x(DisasContext *ctx)
363 {
364 TCGv EA;
365 TCGv_i64 xsh;
366 TCGv_i64 xsl;
367
368 if (unlikely(!ctx->vsx_enabled)) {
369 gen_exception(ctx, POWERPC_EXCP_VSXU);
370 return;
371 }
372 xsh = tcg_temp_new_i64();
373 xsl = tcg_temp_new_i64();
374 get_cpu_vsr(xsh, xS(ctx->opcode), true);
375 get_cpu_vsr(xsl, xS(ctx->opcode), false);
376 gen_set_access_type(ctx, ACCESS_INT);
377 EA = tcg_temp_new();
378 gen_addr_reg_index(ctx, EA);
379 if (ctx->le_mode) {
380 TCGv_i64 t0 = tcg_temp_new_i64();
381 TCGv_i64 t1 = tcg_temp_new_i64();
382
383 tcg_gen_shri_i64(t0, xsh, 32);
384 tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
385 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
386 tcg_gen_addi_tl(EA, EA, 8);
387 tcg_gen_shri_i64(t0, xsl, 32);
388 tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
389 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
390 tcg_temp_free_i64(t0);
391 tcg_temp_free_i64(t1);
392 } else {
393 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
394 tcg_gen_addi_tl(EA, EA, 8);
395 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
396 }
397 tcg_temp_free(EA);
398 tcg_temp_free_i64(xsh);
399 tcg_temp_free_i64(xsl);
400 }
401
402 static void gen_stxvh8x(DisasContext *ctx)
403 {
404 TCGv EA;
405 TCGv_i64 xsh;
406 TCGv_i64 xsl;
407
408 if (unlikely(!ctx->vsx_enabled)) {
409 gen_exception(ctx, POWERPC_EXCP_VSXU);
410 return;
411 }
412 xsh = tcg_temp_new_i64();
413 xsl = tcg_temp_new_i64();
414 get_cpu_vsr(xsh, xS(ctx->opcode), true);
415 get_cpu_vsr(xsl, xS(ctx->opcode), false);
416 gen_set_access_type(ctx, ACCESS_INT);
417 EA = tcg_temp_new();
418 gen_addr_reg_index(ctx, EA);
419 if (ctx->le_mode) {
420 TCGv_i64 outh = tcg_temp_new_i64();
421 TCGv_i64 outl = tcg_temp_new_i64();
422
423 gen_bswap16x8(outh, outl, xsh, xsl);
424 tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
425 tcg_gen_addi_tl(EA, EA, 8);
426 tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
427 tcg_temp_free_i64(outh);
428 tcg_temp_free_i64(outl);
429 } else {
430 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
431 tcg_gen_addi_tl(EA, EA, 8);
432 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
433 }
434 tcg_temp_free(EA);
435 tcg_temp_free_i64(xsh);
436 tcg_temp_free_i64(xsl);
437 }
438
439 static void gen_stxvb16x(DisasContext *ctx)
440 {
441 TCGv EA;
442 TCGv_i64 xsh;
443 TCGv_i64 xsl;
444
445 if (unlikely(!ctx->vsx_enabled)) {
446 gen_exception(ctx, POWERPC_EXCP_VSXU);
447 return;
448 }
449 xsh = tcg_temp_new_i64();
450 xsl = tcg_temp_new_i64();
451 get_cpu_vsr(xsh, xS(ctx->opcode), true);
452 get_cpu_vsr(xsl, xS(ctx->opcode), false);
453 gen_set_access_type(ctx, ACCESS_INT);
454 EA = tcg_temp_new();
455 gen_addr_reg_index(ctx, EA);
456 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
457 tcg_gen_addi_tl(EA, EA, 8);
458 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
459 tcg_temp_free(EA);
460 tcg_temp_free_i64(xsh);
461 tcg_temp_free_i64(xsl);
462 }
463
464 #define VSX_STORE_SCALAR_DS(name, operation) \
465 static void gen_##name(DisasContext *ctx) \
466 { \
467 TCGv EA; \
468 TCGv_i64 xth; \
469 \
470 if (unlikely(!ctx->altivec_enabled)) { \
471 gen_exception(ctx, POWERPC_EXCP_VPU); \
472 return; \
473 } \
474 xth = tcg_temp_new_i64(); \
475 get_cpu_vsr(xth, rD(ctx->opcode) + 32, true); \
476 gen_set_access_type(ctx, ACCESS_INT); \
477 EA = tcg_temp_new(); \
478 gen_addr_imm_index(ctx, EA, 0x03); \
479 gen_qemu_##operation(ctx, xth, EA); \
480 /* NOTE: cpu_vsrl is undefined */ \
481 tcg_temp_free(EA); \
482 tcg_temp_free_i64(xth); \
483 }
484
485 VSX_STORE_SCALAR_DS(stxsd, st64_i64)
486 VSX_STORE_SCALAR_DS(stxssp, st32fs)
487
488 static void gen_mfvsrwz(DisasContext *ctx)
489 {
490 if (xS(ctx->opcode) < 32) {
491 if (unlikely(!ctx->fpu_enabled)) {
492 gen_exception(ctx, POWERPC_EXCP_FPU);
493 return;
494 }
495 } else {
496 if (unlikely(!ctx->altivec_enabled)) {
497 gen_exception(ctx, POWERPC_EXCP_VPU);
498 return;
499 }
500 }
501 TCGv_i64 tmp = tcg_temp_new_i64();
502 TCGv_i64 xsh = tcg_temp_new_i64();
503 get_cpu_vsr(xsh, xS(ctx->opcode), true);
504 tcg_gen_ext32u_i64(tmp, xsh);
505 tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
506 tcg_temp_free_i64(tmp);
507 tcg_temp_free_i64(xsh);
508 }
509
510 static void gen_mtvsrwa(DisasContext *ctx)
511 {
512 if (xS(ctx->opcode) < 32) {
513 if (unlikely(!ctx->fpu_enabled)) {
514 gen_exception(ctx, POWERPC_EXCP_FPU);
515 return;
516 }
517 } else {
518 if (unlikely(!ctx->altivec_enabled)) {
519 gen_exception(ctx, POWERPC_EXCP_VPU);
520 return;
521 }
522 }
523 TCGv_i64 tmp = tcg_temp_new_i64();
524 TCGv_i64 xsh = tcg_temp_new_i64();
525 tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
526 tcg_gen_ext32s_i64(xsh, tmp);
527 set_cpu_vsr(xT(ctx->opcode), xsh, true);
528 tcg_temp_free_i64(tmp);
529 tcg_temp_free_i64(xsh);
530 }
531
532 static void gen_mtvsrwz(DisasContext *ctx)
533 {
534 if (xS(ctx->opcode) < 32) {
535 if (unlikely(!ctx->fpu_enabled)) {
536 gen_exception(ctx, POWERPC_EXCP_FPU);
537 return;
538 }
539 } else {
540 if (unlikely(!ctx->altivec_enabled)) {
541 gen_exception(ctx, POWERPC_EXCP_VPU);
542 return;
543 }
544 }
545 TCGv_i64 tmp = tcg_temp_new_i64();
546 TCGv_i64 xsh = tcg_temp_new_i64();
547 tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
548 tcg_gen_ext32u_i64(xsh, tmp);
549 set_cpu_vsr(xT(ctx->opcode), xsh, true);
550 tcg_temp_free_i64(tmp);
551 tcg_temp_free_i64(xsh);
552 }
553
554 #if defined(TARGET_PPC64)
555 static void gen_mfvsrd(DisasContext *ctx)
556 {
557 TCGv_i64 t0;
558 if (xS(ctx->opcode) < 32) {
559 if (unlikely(!ctx->fpu_enabled)) {
560 gen_exception(ctx, POWERPC_EXCP_FPU);
561 return;
562 }
563 } else {
564 if (unlikely(!ctx->altivec_enabled)) {
565 gen_exception(ctx, POWERPC_EXCP_VPU);
566 return;
567 }
568 }
569 t0 = tcg_temp_new_i64();
570 get_cpu_vsr(t0, xS(ctx->opcode), true);
571 tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
572 tcg_temp_free_i64(t0);
573 }
574
575 static void gen_mtvsrd(DisasContext *ctx)
576 {
577 TCGv_i64 t0;
578 if (xS(ctx->opcode) < 32) {
579 if (unlikely(!ctx->fpu_enabled)) {
580 gen_exception(ctx, POWERPC_EXCP_FPU);
581 return;
582 }
583 } else {
584 if (unlikely(!ctx->altivec_enabled)) {
585 gen_exception(ctx, POWERPC_EXCP_VPU);
586 return;
587 }
588 }
589 t0 = tcg_temp_new_i64();
590 tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
591 set_cpu_vsr(xT(ctx->opcode), t0, true);
592 tcg_temp_free_i64(t0);
593 }
594
595 static void gen_mfvsrld(DisasContext *ctx)
596 {
597 TCGv_i64 t0;
598 if (xS(ctx->opcode) < 32) {
599 if (unlikely(!ctx->vsx_enabled)) {
600 gen_exception(ctx, POWERPC_EXCP_VSXU);
601 return;
602 }
603 } else {
604 if (unlikely(!ctx->altivec_enabled)) {
605 gen_exception(ctx, POWERPC_EXCP_VPU);
606 return;
607 }
608 }
609 t0 = tcg_temp_new_i64();
610 get_cpu_vsr(t0, xS(ctx->opcode), false);
611 tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
612 tcg_temp_free_i64(t0);
613 }
614
615 static void gen_mtvsrdd(DisasContext *ctx)
616 {
617 TCGv_i64 t0;
618 if (xT(ctx->opcode) < 32) {
619 if (unlikely(!ctx->vsx_enabled)) {
620 gen_exception(ctx, POWERPC_EXCP_VSXU);
621 return;
622 }
623 } else {
624 if (unlikely(!ctx->altivec_enabled)) {
625 gen_exception(ctx, POWERPC_EXCP_VPU);
626 return;
627 }
628 }
629
630 t0 = tcg_temp_new_i64();
631 if (!rA(ctx->opcode)) {
632 tcg_gen_movi_i64(t0, 0);
633 } else {
634 tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
635 }
636 set_cpu_vsr(xT(ctx->opcode), t0, true);
637
638 tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
639 set_cpu_vsr(xT(ctx->opcode), t0, false);
640 tcg_temp_free_i64(t0);
641 }
642
643 static void gen_mtvsrws(DisasContext *ctx)
644 {
645 TCGv_i64 t0;
646 if (xT(ctx->opcode) < 32) {
647 if (unlikely(!ctx->vsx_enabled)) {
648 gen_exception(ctx, POWERPC_EXCP_VSXU);
649 return;
650 }
651 } else {
652 if (unlikely(!ctx->altivec_enabled)) {
653 gen_exception(ctx, POWERPC_EXCP_VPU);
654 return;
655 }
656 }
657
658 t0 = tcg_temp_new_i64();
659 tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
660 cpu_gpr[rA(ctx->opcode)], 32, 32);
661 set_cpu_vsr(xT(ctx->opcode), t0, false);
662 set_cpu_vsr(xT(ctx->opcode), t0, true);
663 tcg_temp_free_i64(t0);
664 }
665
666 #endif
667
668 #define OP_ABS 1
669 #define OP_NABS 2
670 #define OP_NEG 3
671 #define OP_CPSGN 4
672 #define SGN_MASK_DP 0x8000000000000000ull
673 #define SGN_MASK_SP 0x8000000080000000ull
674
675 #define VSX_SCALAR_MOVE(name, op, sgn_mask) \
676 static void glue(gen_, name)(DisasContext *ctx) \
677 { \
678 TCGv_i64 xb, sgm; \
679 if (unlikely(!ctx->vsx_enabled)) { \
680 gen_exception(ctx, POWERPC_EXCP_VSXU); \
681 return; \
682 } \
683 xb = tcg_temp_new_i64(); \
684 sgm = tcg_temp_new_i64(); \
685 get_cpu_vsr(xb, xB(ctx->opcode), true); \
686 tcg_gen_movi_i64(sgm, sgn_mask); \
687 switch (op) { \
688 case OP_ABS: { \
689 tcg_gen_andc_i64(xb, xb, sgm); \
690 break; \
691 } \
692 case OP_NABS: { \
693 tcg_gen_or_i64(xb, xb, sgm); \
694 break; \
695 } \
696 case OP_NEG: { \
697 tcg_gen_xor_i64(xb, xb, sgm); \
698 break; \
699 } \
700 case OP_CPSGN: { \
701 TCGv_i64 xa = tcg_temp_new_i64(); \
702 get_cpu_vsr(xa, xA(ctx->opcode), true); \
703 tcg_gen_and_i64(xa, xa, sgm); \
704 tcg_gen_andc_i64(xb, xb, sgm); \
705 tcg_gen_or_i64(xb, xb, xa); \
706 tcg_temp_free_i64(xa); \
707 break; \
708 } \
709 } \
710 set_cpu_vsr(xT(ctx->opcode), xb, true); \
711 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
712 tcg_temp_free_i64(xb); \
713 tcg_temp_free_i64(sgm); \
714 }
715
716 VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
717 VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
718 VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
719 VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
720
721 #define VSX_SCALAR_MOVE_QP(name, op, sgn_mask) \
722 static void glue(gen_, name)(DisasContext *ctx) \
723 { \
724 int xa; \
725 int xt = rD(ctx->opcode) + 32; \
726 int xb = rB(ctx->opcode) + 32; \
727 TCGv_i64 xah, xbh, xbl, sgm, tmp; \
728 \
729 if (unlikely(!ctx->vsx_enabled)) { \
730 gen_exception(ctx, POWERPC_EXCP_VSXU); \
731 return; \
732 } \
733 xbh = tcg_temp_new_i64(); \
734 xbl = tcg_temp_new_i64(); \
735 sgm = tcg_temp_new_i64(); \
736 tmp = tcg_temp_new_i64(); \
737 get_cpu_vsr(xbh, xb, true); \
738 get_cpu_vsr(xbl, xb, false); \
739 tcg_gen_movi_i64(sgm, sgn_mask); \
740 switch (op) { \
741 case OP_ABS: \
742 tcg_gen_andc_i64(xbh, xbh, sgm); \
743 break; \
744 case OP_NABS: \
745 tcg_gen_or_i64(xbh, xbh, sgm); \
746 break; \
747 case OP_NEG: \
748 tcg_gen_xor_i64(xbh, xbh, sgm); \
749 break; \
750 case OP_CPSGN: \
751 xah = tcg_temp_new_i64(); \
752 xa = rA(ctx->opcode) + 32; \
753 get_cpu_vsr(tmp, xa, true); \
754 tcg_gen_and_i64(xah, tmp, sgm); \
755 tcg_gen_andc_i64(xbh, xbh, sgm); \
756 tcg_gen_or_i64(xbh, xbh, xah); \
757 tcg_temp_free_i64(xah); \
758 break; \
759 } \
760 set_cpu_vsr(xt, xbh, true); \
761 set_cpu_vsr(xt, xbl, false); \
762 tcg_temp_free_i64(xbl); \
763 tcg_temp_free_i64(xbh); \
764 tcg_temp_free_i64(sgm); \
765 tcg_temp_free_i64(tmp); \
766 }
767
768 VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
769 VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP)
770 VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
771 VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
772
773 #define VSX_VECTOR_MOVE(name, op, sgn_mask) \
774 static void glue(gen_, name)(DisasContext *ctx) \
775 { \
776 TCGv_i64 xbh, xbl, sgm; \
777 if (unlikely(!ctx->vsx_enabled)) { \
778 gen_exception(ctx, POWERPC_EXCP_VSXU); \
779 return; \
780 } \
781 xbh = tcg_temp_new_i64(); \
782 xbl = tcg_temp_new_i64(); \
783 sgm = tcg_temp_new_i64(); \
784 get_cpu_vsr(xbh, xB(ctx->opcode), true); \
785 get_cpu_vsr(xbl, xB(ctx->opcode), false); \
786 tcg_gen_movi_i64(sgm, sgn_mask); \
787 switch (op) { \
788 case OP_ABS: { \
789 tcg_gen_andc_i64(xbh, xbh, sgm); \
790 tcg_gen_andc_i64(xbl, xbl, sgm); \
791 break; \
792 } \
793 case OP_NABS: { \
794 tcg_gen_or_i64(xbh, xbh, sgm); \
795 tcg_gen_or_i64(xbl, xbl, sgm); \
796 break; \
797 } \
798 case OP_NEG: { \
799 tcg_gen_xor_i64(xbh, xbh, sgm); \
800 tcg_gen_xor_i64(xbl, xbl, sgm); \
801 break; \
802 } \
803 case OP_CPSGN: { \
804 TCGv_i64 xah = tcg_temp_new_i64(); \
805 TCGv_i64 xal = tcg_temp_new_i64(); \
806 get_cpu_vsr(xah, xA(ctx->opcode), true); \
807 get_cpu_vsr(xal, xA(ctx->opcode), false); \
808 tcg_gen_and_i64(xah, xah, sgm); \
809 tcg_gen_and_i64(xal, xal, sgm); \
810 tcg_gen_andc_i64(xbh, xbh, sgm); \
811 tcg_gen_andc_i64(xbl, xbl, sgm); \
812 tcg_gen_or_i64(xbh, xbh, xah); \
813 tcg_gen_or_i64(xbl, xbl, xal); \
814 tcg_temp_free_i64(xah); \
815 tcg_temp_free_i64(xal); \
816 break; \
817 } \
818 } \
819 set_cpu_vsr(xT(ctx->opcode), xbh, true); \
820 set_cpu_vsr(xT(ctx->opcode), xbl, false); \
821 tcg_temp_free_i64(xbh); \
822 tcg_temp_free_i64(xbl); \
823 tcg_temp_free_i64(sgm); \
824 }
825
826 VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
827 VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
828 VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
829 VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
830 VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
831 VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
832 VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
833 VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
834
835 #define VSX_CMP(name, op1, op2, inval, type) \
836 static void gen_##name(DisasContext *ctx) \
837 { \
838 TCGv_i32 ignored; \
839 TCGv_ptr xt, xa, xb; \
840 if (unlikely(!ctx->vsx_enabled)) { \
841 gen_exception(ctx, POWERPC_EXCP_VSXU); \
842 return; \
843 } \
844 xt = gen_vsr_ptr(xT(ctx->opcode)); \
845 xa = gen_vsr_ptr(xA(ctx->opcode)); \
846 xb = gen_vsr_ptr(xB(ctx->opcode)); \
847 if ((ctx->opcode >> (31 - 21)) & 1) { \
848 gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \
849 } else { \
850 ignored = tcg_temp_new_i32(); \
851 gen_helper_##name(ignored, cpu_env, xt, xa, xb); \
852 tcg_temp_free_i32(ignored); \
853 } \
854 gen_helper_float_check_status(cpu_env); \
855 tcg_temp_free_ptr(xt); \
856 tcg_temp_free_ptr(xa); \
857 tcg_temp_free_ptr(xb); \
858 }
859
860 VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
861 VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
862 VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
863 VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
864 VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
865 VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
866 VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
867 VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
868
869 static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
870 {
871 TCGv_i32 ro;
872 TCGv_ptr xt, xb;
873
874 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
875 REQUIRE_VSX(ctx);
876
877 ro = tcg_const_i32(a->rc);
878
879 xt = gen_avr_ptr(a->rt);
880 xb = gen_avr_ptr(a->rb);
881 gen_helper_XSCVQPDP(cpu_env, ro, xt, xb);
882 tcg_temp_free_i32(ro);
883 tcg_temp_free_ptr(xt);
884 tcg_temp_free_ptr(xb);
885
886 return true;
887 }
888
889 #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
890 static void gen_##name(DisasContext *ctx) \
891 { \
892 TCGv_i32 opc; \
893 if (unlikely(!ctx->vsx_enabled)) { \
894 gen_exception(ctx, POWERPC_EXCP_VSXU); \
895 return; \
896 } \
897 opc = tcg_const_i32(ctx->opcode); \
898 gen_helper_##name(cpu_env, opc); \
899 tcg_temp_free_i32(opc); \
900 }
901
902 #define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
903 static void gen_##name(DisasContext *ctx) \
904 { \
905 TCGv_ptr xt, xa, xb; \
906 if (unlikely(!ctx->vsx_enabled)) { \
907 gen_exception(ctx, POWERPC_EXCP_VSXU); \
908 return; \
909 } \
910 xt = gen_vsr_ptr(xT(ctx->opcode)); \
911 xa = gen_vsr_ptr(xA(ctx->opcode)); \
912 xb = gen_vsr_ptr(xB(ctx->opcode)); \
913 gen_helper_##name(cpu_env, xt, xa, xb); \
914 tcg_temp_free_ptr(xt); \
915 tcg_temp_free_ptr(xa); \
916 tcg_temp_free_ptr(xb); \
917 }
918
919 #define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
920 static void gen_##name(DisasContext *ctx) \
921 { \
922 TCGv_ptr xt, xb; \
923 if (unlikely(!ctx->vsx_enabled)) { \
924 gen_exception(ctx, POWERPC_EXCP_VSXU); \
925 return; \
926 } \
927 xt = gen_vsr_ptr(xT(ctx->opcode)); \
928 xb = gen_vsr_ptr(xB(ctx->opcode)); \
929 gen_helper_##name(cpu_env, xt, xb); \
930 tcg_temp_free_ptr(xt); \
931 tcg_temp_free_ptr(xb); \
932 }
933
934 #define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \
935 static void gen_##name(DisasContext *ctx) \
936 { \
937 TCGv_i32 opc; \
938 TCGv_ptr xa, xb; \
939 if (unlikely(!ctx->vsx_enabled)) { \
940 gen_exception(ctx, POWERPC_EXCP_VSXU); \
941 return; \
942 } \
943 opc = tcg_const_i32(ctx->opcode); \
944 xa = gen_vsr_ptr(xA(ctx->opcode)); \
945 xb = gen_vsr_ptr(xB(ctx->opcode)); \
946 gen_helper_##name(cpu_env, opc, xa, xb); \
947 tcg_temp_free_i32(opc); \
948 tcg_temp_free_ptr(xa); \
949 tcg_temp_free_ptr(xb); \
950 }
951
952 #define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \
953 static void gen_##name(DisasContext *ctx) \
954 { \
955 TCGv_i32 opc; \
956 TCGv_ptr xb; \
957 if (unlikely(!ctx->vsx_enabled)) { \
958 gen_exception(ctx, POWERPC_EXCP_VSXU); \
959 return; \
960 } \
961 opc = tcg_const_i32(ctx->opcode); \
962 xb = gen_vsr_ptr(xB(ctx->opcode)); \
963 gen_helper_##name(cpu_env, opc, xb); \
964 tcg_temp_free_i32(opc); \
965 tcg_temp_free_ptr(xb); \
966 }
967
968 #define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \
969 static void gen_##name(DisasContext *ctx) \
970 { \
971 TCGv_i32 opc; \
972 TCGv_ptr xt, xa, xb; \
973 if (unlikely(!ctx->vsx_enabled)) { \
974 gen_exception(ctx, POWERPC_EXCP_VSXU); \
975 return; \
976 } \
977 opc = tcg_const_i32(ctx->opcode); \
978 xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
979 xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
980 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
981 gen_helper_##name(cpu_env, opc, xt, xa, xb); \
982 tcg_temp_free_i32(opc); \
983 tcg_temp_free_ptr(xt); \
984 tcg_temp_free_ptr(xa); \
985 tcg_temp_free_ptr(xb); \
986 }
987
988 #define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \
989 static void gen_##name(DisasContext *ctx) \
990 { \
991 TCGv_i32 opc; \
992 TCGv_ptr xt, xb; \
993 if (unlikely(!ctx->vsx_enabled)) { \
994 gen_exception(ctx, POWERPC_EXCP_VSXU); \
995 return; \
996 } \
997 opc = tcg_const_i32(ctx->opcode); \
998 xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
999 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
1000 gen_helper_##name(cpu_env, opc, xt, xb); \
1001 tcg_temp_free_i32(opc); \
1002 tcg_temp_free_ptr(xt); \
1003 tcg_temp_free_ptr(xb); \
1004 }
1005
1006 #define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \
1007 static void gen_##name(DisasContext *ctx) \
1008 { \
1009 TCGv_i32 opc; \
1010 TCGv_ptr xa, xb; \
1011 if (unlikely(!ctx->vsx_enabled)) { \
1012 gen_exception(ctx, POWERPC_EXCP_VSXU); \
1013 return; \
1014 } \
1015 opc = tcg_const_i32(ctx->opcode); \
1016 xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
1017 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
1018 gen_helper_##name(cpu_env, opc, xa, xb); \
1019 tcg_temp_free_i32(opc); \
1020 tcg_temp_free_ptr(xa); \
1021 tcg_temp_free_ptr(xb); \
1022 }
1023
1024 #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
1025 static void gen_##name(DisasContext *ctx) \
1026 { \
1027 TCGv_i64 t0; \
1028 TCGv_i64 t1; \
1029 if (unlikely(!ctx->vsx_enabled)) { \
1030 gen_exception(ctx, POWERPC_EXCP_VSXU); \
1031 return; \
1032 } \
1033 t0 = tcg_temp_new_i64(); \
1034 t1 = tcg_temp_new_i64(); \
1035 get_cpu_vsr(t0, xB(ctx->opcode), true); \
1036 gen_helper_##name(t1, cpu_env, t0); \
1037 set_cpu_vsr(xT(ctx->opcode), t1, true); \
1038 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
1039 tcg_temp_free_i64(t0); \
1040 tcg_temp_free_i64(t1); \
1041 }
1042
1043 GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
1044 GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
1045 GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
1046 GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
1047 GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
1048 GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
1049 GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
1050 GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
1051 GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
1052 GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
1053 GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
1054 GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
1055 GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
1056 GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
1057 GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
1058 GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
1059 GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
1060 GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
1061 GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
1062 GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
1063 GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
1064 GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
1065 GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
1066 GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
1067 GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
1068 GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
1069 GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
1070 GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
1071 GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
1072 GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
1073 GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
1074 GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
1075 GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
1076 GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
1077 GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
1078 GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
1079 GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
1080 GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
1081 GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
1082 GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
1083 GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
1084 GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
1085 GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
1086 GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
1087 GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
1088 GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
1089 GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
1090 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
1091 GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
1092 GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
1093 GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
1094 GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
1095 GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
1096 GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
1097 GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
1098 GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
1099 GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
1100 GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
1101 GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
1102 GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
1103 GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
1104 GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
1105 GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300)
1106 GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300)
1107
1108 GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
1109 GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
1110 GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
1111 GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
1112 GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
1113 GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
1114 GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
1115 GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
1116 GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
1117 GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
1118 GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
1119 GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
1120 GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
1121 GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
1122 GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
1123 GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
1124 GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
1125 GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
1126 GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
1127 GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
1128 GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
1129 GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
1130 GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
1131 GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
1132 GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
1133
1134 GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
1135 GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
1136 GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
1137 GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
1138 GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
1139 GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
1140 GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
1141 GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
1142 GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
1143 GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
1144 GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
1145 GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
1146 GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
1147 GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
1148 GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
1149 GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
1150 GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
1151 GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
1152 GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
1153 GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
1154 GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
1155 GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
1156 GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
1157 GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
1158 GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
1159 GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
1160 GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
1161 GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX)
1162 GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
1163
1164 static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a)
1165 {
1166 TCGv_ptr xt, xa, xb;
1167
1168 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1169 REQUIRE_VSX(ctx);
1170
1171 xt = gen_vsr_ptr(a->xt);
1172 xa = gen_vsr_ptr(a->xa);
1173 xb = gen_vsr_ptr(a->xb);
1174
1175 gen_helper_VPERM(xt, xa, xt, xb);
1176
1177 tcg_temp_free_ptr(xt);
1178 tcg_temp_free_ptr(xa);
1179 tcg_temp_free_ptr(xb);
1180
1181 return true;
1182 }
1183
1184 static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a)
1185 {
1186 TCGv_ptr xt, xa, xb;
1187
1188 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1189 REQUIRE_VSX(ctx);
1190
1191 xt = gen_vsr_ptr(a->xt);
1192 xa = gen_vsr_ptr(a->xa);
1193 xb = gen_vsr_ptr(a->xb);
1194
1195 gen_helper_VPERMR(xt, xa, xt, xb);
1196
1197 tcg_temp_free_ptr(xt);
1198 tcg_temp_free_ptr(xa);
1199 tcg_temp_free_ptr(xb);
1200
1201 return true;
1202 }
1203
1204 static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a)
1205 {
1206 TCGv_i64 t0, t1;
1207
1208 REQUIRE_INSNS_FLAGS2(ctx, VSX);
1209 REQUIRE_VSX(ctx);
1210
1211 t0 = tcg_temp_new_i64();
1212
1213 if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
1214 t1 = tcg_temp_new_i64();
1215
1216 get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1217 get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
1218
1219 set_cpu_vsr(a->xt, t0, true);
1220 set_cpu_vsr(a->xt, t1, false);
1221
1222 tcg_temp_free_i64(t1);
1223 } else {
1224 get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1225 set_cpu_vsr(a->xt, t0, true);
1226
1227 get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
1228 set_cpu_vsr(a->xt, t0, false);
1229 }
1230
1231 tcg_temp_free_i64(t0);
1232
1233 return true;
1234 }
1235
1236 static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a)
1237 {
1238 TCGv_ptr xt, xa, xb, xc;
1239
1240 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1241 REQUIRE_VSX(ctx);
1242
1243 xt = gen_vsr_ptr(a->xt);
1244 xa = gen_vsr_ptr(a->xa);
1245 xb = gen_vsr_ptr(a->xb);
1246 xc = gen_vsr_ptr(a->xc);
1247
1248 gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
1249
1250 tcg_temp_free_ptr(xt);
1251 tcg_temp_free_ptr(xa);
1252 tcg_temp_free_ptr(xb);
1253 tcg_temp_free_ptr(xc);
1254
1255 return true;
1256 }
1257
1258 #define XXGENPCV(NAME) \
1259 static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a) \
1260 { \
1261 TCGv_ptr xt, vrb; \
1262 \
1263 REQUIRE_INSNS_FLAGS2(ctx, ISA310); \
1264 REQUIRE_VSX(ctx); \
1265 \
1266 if (a->imm & ~0x3) { \
1267 gen_invalid(ctx); \
1268 return true; \
1269 } \
1270 \
1271 xt = gen_vsr_ptr(a->xt); \
1272 vrb = gen_avr_ptr(a->vrb); \
1273 \
1274 switch (a->imm) { \
1275 case 0b00000: /* Big-Endian expansion */ \
1276 glue(gen_helper_, glue(NAME, _be_exp))(xt, vrb); \
1277 break; \
1278 case 0b00001: /* Big-Endian compression */ \
1279 glue(gen_helper_, glue(NAME, _be_comp))(xt, vrb); \
1280 break; \
1281 case 0b00010: /* Little-Endian expansion */ \
1282 glue(gen_helper_, glue(NAME, _le_exp))(xt, vrb); \
1283 break; \
1284 case 0b00011: /* Little-Endian compression */ \
1285 glue(gen_helper_, glue(NAME, _le_comp))(xt, vrb); \
1286 break; \
1287 } \
1288 \
1289 tcg_temp_free_ptr(xt); \
1290 tcg_temp_free_ptr(vrb); \
1291 \
1292 return true; \
1293 }
1294
1295 XXGENPCV(XXGENPCVBM)
1296 XXGENPCV(XXGENPCVHM)
1297 XXGENPCV(XXGENPCVWM)
1298 XXGENPCV(XXGENPCVDM)
1299 #undef XXGENPCV
1300
1301 static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
1302 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1303 {
1304 TCGv_ptr t, s1, s2, s3;
1305
1306 t = gen_vsr_ptr(tgt);
1307 s1 = gen_vsr_ptr(src1);
1308 s2 = gen_vsr_ptr(src2);
1309 s3 = gen_vsr_ptr(src3);
1310
1311 gen_helper(cpu_env, t, s1, s2, s3);
1312
1313 tcg_temp_free_ptr(t);
1314 tcg_temp_free_ptr(s1);
1315 tcg_temp_free_ptr(s2);
1316 tcg_temp_free_ptr(s3);
1317
1318 return true;
1319 }
1320
1321 static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
1322 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1323 {
1324 REQUIRE_VSX(ctx);
1325
1326 if (type_a) {
1327 return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
1328 }
1329 return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
1330 }
1331
1332 TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
1333 TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
1334 TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
1335 TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
1336 TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
1337 TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
1338 TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
1339 TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
1340 TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
1341 TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
1342 TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
1343 TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
1344 TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
1345 TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
1346 TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
1347 TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
1348
1349 static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
1350 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
1351 void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1352 {
1353 int vrt, vra, vrb;
1354
1355 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1356 REQUIRE_VSX(ctx);
1357
1358 vrt = a->rt + 32;
1359 vra = a->ra + 32;
1360 vrb = a->rb + 32;
1361
1362 if (a->rc) {
1363 return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
1364 }
1365
1366 return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
1367 }
1368
1369 TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
1370 TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
1371 TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
1372 TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
1373
1374 #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
1375 static void gen_##name(DisasContext *ctx) \
1376 { \
1377 TCGv_ptr xt, xa, b, c; \
1378 if (unlikely(!ctx->vsx_enabled)) { \
1379 gen_exception(ctx, POWERPC_EXCP_VSXU); \
1380 return; \
1381 } \
1382 xt = gen_vsr_ptr(xT(ctx->opcode)); \
1383 xa = gen_vsr_ptr(xA(ctx->opcode)); \
1384 if (ctx->opcode & PPC_BIT32(25)) { \
1385 /* \
1386 * AxT + B \
1387 */ \
1388 b = gen_vsr_ptr(xT(ctx->opcode)); \
1389 c = gen_vsr_ptr(xB(ctx->opcode)); \
1390 } else { \
1391 /* \
1392 * AxB + T \
1393 */ \
1394 b = gen_vsr_ptr(xB(ctx->opcode)); \
1395 c = gen_vsr_ptr(xT(ctx->opcode)); \
1396 } \
1397 gen_helper_##name(cpu_env, xt, xa, b, c); \
1398 tcg_temp_free_ptr(xt); \
1399 tcg_temp_free_ptr(xa); \
1400 tcg_temp_free_ptr(b); \
1401 tcg_temp_free_ptr(c); \
1402 }
1403
1404 GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
1405 GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
1406 GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
1407 GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
1408 GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
1409 GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
1410 GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
1411 GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
1412
1413 static void gen_xxbrd(DisasContext *ctx)
1414 {
1415 TCGv_i64 xth;
1416 TCGv_i64 xtl;
1417 TCGv_i64 xbh;
1418 TCGv_i64 xbl;
1419
1420 if (unlikely(!ctx->vsx_enabled)) {
1421 gen_exception(ctx, POWERPC_EXCP_VSXU);
1422 return;
1423 }
1424 xth = tcg_temp_new_i64();
1425 xtl = tcg_temp_new_i64();
1426 xbh = tcg_temp_new_i64();
1427 xbl = tcg_temp_new_i64();
1428 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1429 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1430
1431 tcg_gen_bswap64_i64(xth, xbh);
1432 tcg_gen_bswap64_i64(xtl, xbl);
1433 set_cpu_vsr(xT(ctx->opcode), xth, true);
1434 set_cpu_vsr(xT(ctx->opcode), xtl, false);
1435
1436 tcg_temp_free_i64(xth);
1437 tcg_temp_free_i64(xtl);
1438 tcg_temp_free_i64(xbh);
1439 tcg_temp_free_i64(xbl);
1440 }
1441
1442 static void gen_xxbrh(DisasContext *ctx)
1443 {
1444 TCGv_i64 xth;
1445 TCGv_i64 xtl;
1446 TCGv_i64 xbh;
1447 TCGv_i64 xbl;
1448
1449 if (unlikely(!ctx->vsx_enabled)) {
1450 gen_exception(ctx, POWERPC_EXCP_VSXU);
1451 return;
1452 }
1453 xth = tcg_temp_new_i64();
1454 xtl = tcg_temp_new_i64();
1455 xbh = tcg_temp_new_i64();
1456 xbl = tcg_temp_new_i64();
1457 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1458 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1459
1460 gen_bswap16x8(xth, xtl, xbh, xbl);
1461 set_cpu_vsr(xT(ctx->opcode), xth, true);
1462 set_cpu_vsr(xT(ctx->opcode), xtl, false);
1463
1464 tcg_temp_free_i64(xth);
1465 tcg_temp_free_i64(xtl);
1466 tcg_temp_free_i64(xbh);
1467 tcg_temp_free_i64(xbl);
1468 }
1469
1470 static void gen_xxbrq(DisasContext *ctx)
1471 {
1472 TCGv_i64 xth;
1473 TCGv_i64 xtl;
1474 TCGv_i64 xbh;
1475 TCGv_i64 xbl;
1476 TCGv_i64 t0;
1477
1478 if (unlikely(!ctx->vsx_enabled)) {
1479 gen_exception(ctx, POWERPC_EXCP_VSXU);
1480 return;
1481 }
1482 xth = tcg_temp_new_i64();
1483 xtl = tcg_temp_new_i64();
1484 xbh = tcg_temp_new_i64();
1485 xbl = tcg_temp_new_i64();
1486 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1487 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1488 t0 = tcg_temp_new_i64();
1489
1490 tcg_gen_bswap64_i64(t0, xbl);
1491 tcg_gen_bswap64_i64(xtl, xbh);
1492 set_cpu_vsr(xT(ctx->opcode), xtl, false);
1493 tcg_gen_mov_i64(xth, t0);
1494 set_cpu_vsr(xT(ctx->opcode), xth, true);
1495
1496 tcg_temp_free_i64(t0);
1497 tcg_temp_free_i64(xth);
1498 tcg_temp_free_i64(xtl);
1499 tcg_temp_free_i64(xbh);
1500 tcg_temp_free_i64(xbl);
1501 }
1502
1503 static void gen_xxbrw(DisasContext *ctx)
1504 {
1505 TCGv_i64 xth;
1506 TCGv_i64 xtl;
1507 TCGv_i64 xbh;
1508 TCGv_i64 xbl;
1509
1510 if (unlikely(!ctx->vsx_enabled)) {
1511 gen_exception(ctx, POWERPC_EXCP_VSXU);
1512 return;
1513 }
1514 xth = tcg_temp_new_i64();
1515 xtl = tcg_temp_new_i64();
1516 xbh = tcg_temp_new_i64();
1517 xbl = tcg_temp_new_i64();
1518 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1519 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1520
1521 gen_bswap32x4(xth, xtl, xbh, xbl);
1522 set_cpu_vsr(xT(ctx->opcode), xth, true);
1523 set_cpu_vsr(xT(ctx->opcode), xtl, false);
1524
1525 tcg_temp_free_i64(xth);
1526 tcg_temp_free_i64(xtl);
1527 tcg_temp_free_i64(xbh);
1528 tcg_temp_free_i64(xbl);
1529 }
1530
1531 #define VSX_LOGICAL(name, vece, tcg_op) \
1532 static void glue(gen_, name)(DisasContext *ctx) \
1533 { \
1534 if (unlikely(!ctx->vsx_enabled)) { \
1535 gen_exception(ctx, POWERPC_EXCP_VSXU); \
1536 return; \
1537 } \
1538 tcg_op(vece, vsr_full_offset(xT(ctx->opcode)), \
1539 vsr_full_offset(xA(ctx->opcode)), \
1540 vsr_full_offset(xB(ctx->opcode)), 16, 16); \
1541 }
1542
1543 VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and)
1544 VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc)
1545 VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or)
1546 VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor)
1547 VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor)
1548 VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv)
1549 VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
1550 VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
1551
1552 #define VSX_XXMRG(name, high) \
1553 static void glue(gen_, name)(DisasContext *ctx) \
1554 { \
1555 TCGv_i64 a0, a1, b0, b1, tmp; \
1556 if (unlikely(!ctx->vsx_enabled)) { \
1557 gen_exception(ctx, POWERPC_EXCP_VSXU); \
1558 return; \
1559 } \
1560 a0 = tcg_temp_new_i64(); \
1561 a1 = tcg_temp_new_i64(); \
1562 b0 = tcg_temp_new_i64(); \
1563 b1 = tcg_temp_new_i64(); \
1564 tmp = tcg_temp_new_i64(); \
1565 get_cpu_vsr(a0, xA(ctx->opcode), high); \
1566 get_cpu_vsr(a1, xA(ctx->opcode), high); \
1567 get_cpu_vsr(b0, xB(ctx->opcode), high); \
1568 get_cpu_vsr(b1, xB(ctx->opcode), high); \
1569 tcg_gen_shri_i64(a0, a0, 32); \
1570 tcg_gen_shri_i64(b0, b0, 32); \
1571 tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \
1572 set_cpu_vsr(xT(ctx->opcode), tmp, true); \
1573 tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \
1574 set_cpu_vsr(xT(ctx->opcode), tmp, false); \
1575 tcg_temp_free_i64(a0); \
1576 tcg_temp_free_i64(a1); \
1577 tcg_temp_free_i64(b0); \
1578 tcg_temp_free_i64(b1); \
1579 tcg_temp_free_i64(tmp); \
1580 }
1581
1582 VSX_XXMRG(xxmrghw, 1)
1583 VSX_XXMRG(xxmrglw, 0)
1584
1585 static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a)
1586 {
1587 REQUIRE_INSNS_FLAGS2(ctx, VSX);
1588 REQUIRE_VSX(ctx);
1589
1590 tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc),
1591 vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
1592
1593 return true;
1594 }
1595
1596 static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2 *a)
1597 {
1598 int tofs, bofs;
1599
1600 REQUIRE_VSX(ctx);
1601
1602 tofs = vsr_full_offset(a->xt);
1603 bofs = vsr_full_offset(a->xb);
1604 bofs += a->uim << MO_32;
1605 #ifndef HOST_WORDS_BIG_ENDIAN
1606 bofs ^= 8 | 4;
1607 #endif
1608
1609 tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
1610 return true;
1611 }
1612
1613 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
1614
1615 static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
1616 {
1617 if (a->xt < 32) {
1618 REQUIRE_VSX(ctx);
1619 } else {
1620 REQUIRE_VECTOR(ctx);
1621 }
1622 tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
1623 return true;
1624 }
1625
1626 static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
1627 {
1628 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1629 REQUIRE_VSX(ctx);
1630
1631 tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
1632
1633 return true;
1634 }
1635
1636 static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
1637 {
1638 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1639 REQUIRE_VSX(ctx);
1640
1641 tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
1642 helper_todouble(a->si));
1643 return true;
1644 }
1645
1646 static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
1647 {
1648 TCGv_i32 imm;
1649
1650 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1651 REQUIRE_VSX(ctx);
1652
1653 imm = tcg_constant_i32(a->si);
1654
1655 tcg_gen_st_i32(imm, cpu_env,
1656 offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
1657 tcg_gen_st_i32(imm, cpu_env,
1658 offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
1659
1660 return true;
1661 }
1662
1663 static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
1664 {
1665 static const uint64_t values[32] = {
1666 0, /* Unspecified */
1667 0x3FFF000000000000llu, /* QP +1.0 */
1668 0x4000000000000000llu, /* QP +2.0 */
1669 0x4000800000000000llu, /* QP +3.0 */
1670 0x4001000000000000llu, /* QP +4.0 */
1671 0x4001400000000000llu, /* QP +5.0 */
1672 0x4001800000000000llu, /* QP +6.0 */
1673 0x4001C00000000000llu, /* QP +7.0 */
1674 0x7FFF000000000000llu, /* QP +Inf */
1675 0x7FFF800000000000llu, /* QP dQNaN */
1676 0, /* Unspecified */
1677 0, /* Unspecified */
1678 0, /* Unspecified */
1679 0, /* Unspecified */
1680 0, /* Unspecified */
1681 0, /* Unspecified */
1682 0x8000000000000000llu, /* QP -0.0 */
1683 0xBFFF000000000000llu, /* QP -1.0 */
1684 0xC000000000000000llu, /* QP -2.0 */
1685 0xC000800000000000llu, /* QP -3.0 */
1686 0xC001000000000000llu, /* QP -4.0 */
1687 0xC001400000000000llu, /* QP -5.0 */
1688 0xC001800000000000llu, /* QP -6.0 */
1689 0xC001C00000000000llu, /* QP -7.0 */
1690 0xFFFF000000000000llu, /* QP -Inf */
1691 };
1692
1693 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1694 REQUIRE_VSX(ctx);
1695
1696 if (values[a->uim]) {
1697 set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
1698 set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
1699 } else {
1700 gen_invalid(ctx);
1701 }
1702
1703 return true;
1704 }
1705
1706 static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a)
1707 {
1708 TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
1709
1710 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1711 REQUIRE_VSX(ctx);
1712
1713 xb = tcg_temp_new_i64();
1714 t0 = tcg_temp_new_i64();
1715 t1 = tcg_temp_new_i64();
1716 all_true = tcg_temp_new_i64();
1717 all_false = tcg_temp_new_i64();
1718 mask = tcg_constant_i64(dup_const(MO_8, 1));
1719 zero = tcg_constant_i64(0);
1720
1721 get_cpu_vsr(xb, a->xb, true);
1722 tcg_gen_and_i64(t0, mask, xb);
1723 get_cpu_vsr(xb, a->xb, false);
1724 tcg_gen_and_i64(t1, mask, xb);
1725
1726 tcg_gen_or_i64(all_false, t0, t1);
1727 tcg_gen_and_i64(all_true, t0, t1);
1728
1729 tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero);
1730 tcg_gen_shli_i64(all_false, all_false, 1);
1731 tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask);
1732 tcg_gen_shli_i64(all_true, all_true, 3);
1733
1734 tcg_gen_or_i64(t0, all_false, all_true);
1735 tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0);
1736
1737 tcg_temp_free_i64(xb);
1738 tcg_temp_free_i64(t0);
1739 tcg_temp_free_i64(t1);
1740 tcg_temp_free_i64(all_true);
1741 tcg_temp_free_i64(all_false);
1742
1743 return true;
1744 }
1745
1746 static void gen_xxsldwi(DisasContext *ctx)
1747 {
1748 TCGv_i64 xth, xtl;
1749 if (unlikely(!ctx->vsx_enabled)) {
1750 gen_exception(ctx, POWERPC_EXCP_VSXU);
1751 return;
1752 }
1753 xth = tcg_temp_new_i64();
1754 xtl = tcg_temp_new_i64();
1755
1756 switch (SHW(ctx->opcode)) {
1757 case 0: {
1758 get_cpu_vsr(xth, xA(ctx->opcode), true);
1759 get_cpu_vsr(xtl, xA(ctx->opcode), false);
1760 break;
1761 }
1762 case 1: {
1763 TCGv_i64 t0 = tcg_temp_new_i64();
1764 get_cpu_vsr(xth, xA(ctx->opcode), true);
1765 tcg_gen_shli_i64(xth, xth, 32);
1766 get_cpu_vsr(t0, xA(ctx->opcode), false);
1767 tcg_gen_shri_i64(t0, t0, 32);
1768 tcg_gen_or_i64(xth, xth, t0);
1769 get_cpu_vsr(xtl, xA(ctx->opcode), false);
1770 tcg_gen_shli_i64(xtl, xtl, 32);
1771 get_cpu_vsr(t0, xB(ctx->opcode), true);
1772 tcg_gen_shri_i64(t0, t0, 32);
1773 tcg_gen_or_i64(xtl, xtl, t0);
1774 tcg_temp_free_i64(t0);
1775 break;
1776 }
1777 case 2: {
1778 get_cpu_vsr(xth, xA(ctx->opcode), false);
1779 get_cpu_vsr(xtl, xB(ctx->opcode), true);
1780 break;
1781 }
1782 case 3: {
1783 TCGv_i64 t0 = tcg_temp_new_i64();
1784 get_cpu_vsr(xth, xA(ctx->opcode), false);
1785 tcg_gen_shli_i64(xth, xth, 32);
1786 get_cpu_vsr(t0, xB(ctx->opcode), true);
1787 tcg_gen_shri_i64(t0, t0, 32);
1788 tcg_gen_or_i64(xth, xth, t0);
1789 get_cpu_vsr(xtl, xB(ctx->opcode), true);
1790 tcg_gen_shli_i64(xtl, xtl, 32);
1791 get_cpu_vsr(t0, xB(ctx->opcode), false);
1792 tcg_gen_shri_i64(t0, t0, 32);
1793 tcg_gen_or_i64(xtl, xtl, t0);
1794 tcg_temp_free_i64(t0);
1795 break;
1796 }
1797 }
1798
1799 set_cpu_vsr(xT(ctx->opcode), xth, true);
1800 set_cpu_vsr(xT(ctx->opcode), xtl, false);
1801
1802 tcg_temp_free_i64(xth);
1803 tcg_temp_free_i64(xtl);
1804 }
1805
1806 #define VSX_EXTRACT_INSERT(name) \
1807 static void gen_##name(DisasContext *ctx) \
1808 { \
1809 TCGv_ptr xt, xb; \
1810 TCGv_i32 t0; \
1811 TCGv_i64 t1; \
1812 uint8_t uimm = UIMM4(ctx->opcode); \
1813 \
1814 if (unlikely(!ctx->vsx_enabled)) { \
1815 gen_exception(ctx, POWERPC_EXCP_VSXU); \
1816 return; \
1817 } \
1818 xt = gen_vsr_ptr(xT(ctx->opcode)); \
1819 xb = gen_vsr_ptr(xB(ctx->opcode)); \
1820 t0 = tcg_temp_new_i32(); \
1821 t1 = tcg_temp_new_i64(); \
1822 /* \
1823 * uimm > 15 out of bound and for \
1824 * uimm > 12 handle as per hardware in helper \
1825 */ \
1826 if (uimm > 15) { \
1827 tcg_gen_movi_i64(t1, 0); \
1828 set_cpu_vsr(xT(ctx->opcode), t1, true); \
1829 set_cpu_vsr(xT(ctx->opcode), t1, false); \
1830 return; \
1831 } \
1832 tcg_gen_movi_i32(t0, uimm); \
1833 gen_helper_##name(cpu_env, xt, xb, t0); \
1834 tcg_temp_free_ptr(xb); \
1835 tcg_temp_free_ptr(xt); \
1836 tcg_temp_free_i32(t0); \
1837 tcg_temp_free_i64(t1); \
1838 }
1839
1840 VSX_EXTRACT_INSERT(xxextractuw)
1841 VSX_EXTRACT_INSERT(xxinsertw)
1842
1843 #ifdef TARGET_PPC64
1844 static void gen_xsxexpdp(DisasContext *ctx)
1845 {
1846 TCGv rt = cpu_gpr[rD(ctx->opcode)];
1847 TCGv_i64 t0;
1848 if (unlikely(!ctx->vsx_enabled)) {
1849 gen_exception(ctx, POWERPC_EXCP_VSXU);
1850 return;
1851 }
1852 t0 = tcg_temp_new_i64();
1853 get_cpu_vsr(t0, xB(ctx->opcode), true);
1854 tcg_gen_extract_i64(rt, t0, 52, 11);
1855 tcg_temp_free_i64(t0);
1856 }
1857
1858 static void gen_xsxexpqp(DisasContext *ctx)
1859 {
1860 TCGv_i64 xth;
1861 TCGv_i64 xtl;
1862 TCGv_i64 xbh;
1863
1864 if (unlikely(!ctx->vsx_enabled)) {
1865 gen_exception(ctx, POWERPC_EXCP_VSXU);
1866 return;
1867 }
1868 xth = tcg_temp_new_i64();
1869 xtl = tcg_temp_new_i64();
1870 xbh = tcg_temp_new_i64();
1871 get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1872
1873 tcg_gen_extract_i64(xth, xbh, 48, 15);
1874 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1875 tcg_gen_movi_i64(xtl, 0);
1876 set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1877
1878 tcg_temp_free_i64(xbh);
1879 tcg_temp_free_i64(xth);
1880 tcg_temp_free_i64(xtl);
1881 }
1882
1883 static void gen_xsiexpdp(DisasContext *ctx)
1884 {
1885 TCGv_i64 xth;
1886 TCGv ra = cpu_gpr[rA(ctx->opcode)];
1887 TCGv rb = cpu_gpr[rB(ctx->opcode)];
1888 TCGv_i64 t0;
1889
1890 if (unlikely(!ctx->vsx_enabled)) {
1891 gen_exception(ctx, POWERPC_EXCP_VSXU);
1892 return;
1893 }
1894 t0 = tcg_temp_new_i64();
1895 xth = tcg_temp_new_i64();
1896 tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
1897 tcg_gen_andi_i64(t0, rb, 0x7FF);
1898 tcg_gen_shli_i64(t0, t0, 52);
1899 tcg_gen_or_i64(xth, xth, t0);
1900 set_cpu_vsr(xT(ctx->opcode), xth, true);
1901 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false);
1902 tcg_temp_free_i64(t0);
1903 tcg_temp_free_i64(xth);
1904 }
1905
1906 static void gen_xsiexpqp(DisasContext *ctx)
1907 {
1908 TCGv_i64 xth;
1909 TCGv_i64 xtl;
1910 TCGv_i64 xah;
1911 TCGv_i64 xal;
1912 TCGv_i64 xbh;
1913 TCGv_i64 t0;
1914
1915 if (unlikely(!ctx->vsx_enabled)) {
1916 gen_exception(ctx, POWERPC_EXCP_VSXU);
1917 return;
1918 }
1919 xth = tcg_temp_new_i64();
1920 xtl = tcg_temp_new_i64();
1921 xah = tcg_temp_new_i64();
1922 xal = tcg_temp_new_i64();
1923 get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
1924 get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
1925 xbh = tcg_temp_new_i64();
1926 get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1927 t0 = tcg_temp_new_i64();
1928
1929 tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
1930 tcg_gen_andi_i64(t0, xbh, 0x7FFF);
1931 tcg_gen_shli_i64(t0, t0, 48);
1932 tcg_gen_or_i64(xth, xth, t0);
1933 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1934 tcg_gen_mov_i64(xtl, xal);
1935 set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1936
1937 tcg_temp_free_i64(t0);
1938 tcg_temp_free_i64(xth);
1939 tcg_temp_free_i64(xtl);
1940 tcg_temp_free_i64(xah);
1941 tcg_temp_free_i64(xal);
1942 tcg_temp_free_i64(xbh);
1943 }
1944
1945 static void gen_xsxsigdp(DisasContext *ctx)
1946 {
1947 TCGv rt = cpu_gpr[rD(ctx->opcode)];
1948 TCGv_i64 t0, t1, zr, nan, exp;
1949
1950 if (unlikely(!ctx->vsx_enabled)) {
1951 gen_exception(ctx, POWERPC_EXCP_VSXU);
1952 return;
1953 }
1954 exp = tcg_temp_new_i64();
1955 t0 = tcg_temp_new_i64();
1956 t1 = tcg_temp_new_i64();
1957 zr = tcg_const_i64(0);
1958 nan = tcg_const_i64(2047);
1959
1960 get_cpu_vsr(t1, xB(ctx->opcode), true);
1961 tcg_gen_extract_i64(exp, t1, 52, 11);
1962 tcg_gen_movi_i64(t0, 0x0010000000000000);
1963 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
1964 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
1965 get_cpu_vsr(t1, xB(ctx->opcode), true);
1966 tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
1967
1968 tcg_temp_free_i64(t0);
1969 tcg_temp_free_i64(t1);
1970 tcg_temp_free_i64(exp);
1971 tcg_temp_free_i64(zr);
1972 tcg_temp_free_i64(nan);
1973 }
1974
1975 static void gen_xsxsigqp(DisasContext *ctx)
1976 {
1977 TCGv_i64 t0, zr, nan, exp;
1978 TCGv_i64 xth;
1979 TCGv_i64 xtl;
1980 TCGv_i64 xbh;
1981 TCGv_i64 xbl;
1982
1983 if (unlikely(!ctx->vsx_enabled)) {
1984 gen_exception(ctx, POWERPC_EXCP_VSXU);
1985 return;
1986 }
1987 xth = tcg_temp_new_i64();
1988 xtl = tcg_temp_new_i64();
1989 xbh = tcg_temp_new_i64();
1990 xbl = tcg_temp_new_i64();
1991 get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1992 get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
1993 exp = tcg_temp_new_i64();
1994 t0 = tcg_temp_new_i64();
1995 zr = tcg_const_i64(0);
1996 nan = tcg_const_i64(32767);
1997
1998 tcg_gen_extract_i64(exp, xbh, 48, 15);
1999 tcg_gen_movi_i64(t0, 0x0001000000000000);
2000 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2001 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2002 tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
2003 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
2004 tcg_gen_mov_i64(xtl, xbl);
2005 set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
2006
2007 tcg_temp_free_i64(t0);
2008 tcg_temp_free_i64(exp);
2009 tcg_temp_free_i64(zr);
2010 tcg_temp_free_i64(nan);
2011 tcg_temp_free_i64(xth);
2012 tcg_temp_free_i64(xtl);
2013 tcg_temp_free_i64(xbh);
2014 tcg_temp_free_i64(xbl);
2015 }
2016 #endif
2017
2018 static void gen_xviexpsp(DisasContext *ctx)
2019 {
2020 TCGv_i64 xth;
2021 TCGv_i64 xtl;
2022 TCGv_i64 xah;
2023 TCGv_i64 xal;
2024 TCGv_i64 xbh;
2025 TCGv_i64 xbl;
2026 TCGv_i64 t0;
2027
2028 if (unlikely(!ctx->vsx_enabled)) {
2029 gen_exception(ctx, POWERPC_EXCP_VSXU);
2030 return;
2031 }
2032 xth = tcg_temp_new_i64();
2033 xtl = tcg_temp_new_i64();
2034 xah = tcg_temp_new_i64();
2035 xal = tcg_temp_new_i64();
2036 xbh = tcg_temp_new_i64();
2037 xbl = tcg_temp_new_i64();
2038 get_cpu_vsr(xah, xA(ctx->opcode), true);
2039 get_cpu_vsr(xal, xA(ctx->opcode), false);
2040 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2041 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2042 t0 = tcg_temp_new_i64();
2043
2044 tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
2045 tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
2046 tcg_gen_shli_i64(t0, t0, 23);
2047 tcg_gen_or_i64(xth, xth, t0);
2048 set_cpu_vsr(xT(ctx->opcode), xth, true);
2049 tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
2050 tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
2051 tcg_gen_shli_i64(t0, t0, 23);
2052 tcg_gen_or_i64(xtl, xtl, t0);
2053 set_cpu_vsr(xT(ctx->opcode), xtl, false);
2054
2055 tcg_temp_free_i64(t0);
2056 tcg_temp_free_i64(xth);
2057 tcg_temp_free_i64(xtl);
2058 tcg_temp_free_i64(xah);
2059 tcg_temp_free_i64(xal);
2060 tcg_temp_free_i64(xbh);
2061 tcg_temp_free_i64(xbl);
2062 }
2063
2064 static void gen_xviexpdp(DisasContext *ctx)
2065 {
2066 TCGv_i64 xth;
2067 TCGv_i64 xtl;
2068 TCGv_i64 xah;
2069 TCGv_i64 xal;
2070 TCGv_i64 xbh;
2071 TCGv_i64 xbl;
2072
2073 if (unlikely(!ctx->vsx_enabled)) {
2074 gen_exception(ctx, POWERPC_EXCP_VSXU);
2075 return;
2076 }
2077 xth = tcg_temp_new_i64();
2078 xtl = tcg_temp_new_i64();
2079 xah = tcg_temp_new_i64();
2080 xal = tcg_temp_new_i64();
2081 xbh = tcg_temp_new_i64();
2082 xbl = tcg_temp_new_i64();
2083 get_cpu_vsr(xah, xA(ctx->opcode), true);
2084 get_cpu_vsr(xal, xA(ctx->opcode), false);
2085 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2086 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2087
2088 tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
2089 set_cpu_vsr(xT(ctx->opcode), xth, true);
2090
2091 tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
2092 set_cpu_vsr(xT(ctx->opcode), xtl, false);
2093
2094 tcg_temp_free_i64(xth);
2095 tcg_temp_free_i64(xtl);
2096 tcg_temp_free_i64(xah);
2097 tcg_temp_free_i64(xal);
2098 tcg_temp_free_i64(xbh);
2099 tcg_temp_free_i64(xbl);
2100 }
2101
2102 static void gen_xvxexpsp(DisasContext *ctx)
2103 {
2104 TCGv_i64 xth;
2105 TCGv_i64 xtl;
2106 TCGv_i64 xbh;
2107 TCGv_i64 xbl;
2108
2109 if (unlikely(!ctx->vsx_enabled)) {
2110 gen_exception(ctx, POWERPC_EXCP_VSXU);
2111 return;
2112 }
2113 xth = tcg_temp_new_i64();
2114 xtl = tcg_temp_new_i64();
2115 xbh = tcg_temp_new_i64();
2116 xbl = tcg_temp_new_i64();
2117 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2118 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2119
2120 tcg_gen_shri_i64(xth, xbh, 23);
2121 tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
2122 set_cpu_vsr(xT(ctx->opcode), xth, true);
2123 tcg_gen_shri_i64(xtl, xbl, 23);
2124 tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
2125 set_cpu_vsr(xT(ctx->opcode), xtl, false);
2126
2127 tcg_temp_free_i64(xth);
2128 tcg_temp_free_i64(xtl);
2129 tcg_temp_free_i64(xbh);
2130 tcg_temp_free_i64(xbl);
2131 }
2132
2133 static void gen_xvxexpdp(DisasContext *ctx)
2134 {
2135 TCGv_i64 xth;
2136 TCGv_i64 xtl;
2137 TCGv_i64 xbh;
2138 TCGv_i64 xbl;
2139
2140 if (unlikely(!ctx->vsx_enabled)) {
2141 gen_exception(ctx, POWERPC_EXCP_VSXU);
2142 return;
2143 }
2144 xth = tcg_temp_new_i64();
2145 xtl = tcg_temp_new_i64();
2146 xbh = tcg_temp_new_i64();
2147 xbl = tcg_temp_new_i64();
2148 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2149 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2150
2151 tcg_gen_extract_i64(xth, xbh, 52, 11);
2152 set_cpu_vsr(xT(ctx->opcode), xth, true);
2153 tcg_gen_extract_i64(xtl, xbl, 52, 11);
2154 set_cpu_vsr(xT(ctx->opcode), xtl, false);
2155
2156 tcg_temp_free_i64(xth);
2157 tcg_temp_free_i64(xtl);
2158 tcg_temp_free_i64(xbh);
2159 tcg_temp_free_i64(xbl);
2160 }
2161
2162 GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
2163
2164 static void gen_xvxsigdp(DisasContext *ctx)
2165 {
2166 TCGv_i64 xth;
2167 TCGv_i64 xtl;
2168 TCGv_i64 xbh;
2169 TCGv_i64 xbl;
2170 TCGv_i64 t0, zr, nan, exp;
2171
2172 if (unlikely(!ctx->vsx_enabled)) {
2173 gen_exception(ctx, POWERPC_EXCP_VSXU);
2174 return;
2175 }
2176 xth = tcg_temp_new_i64();
2177 xtl = tcg_temp_new_i64();
2178 xbh = tcg_temp_new_i64();
2179 xbl = tcg_temp_new_i64();
2180 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2181 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2182 exp = tcg_temp_new_i64();
2183 t0 = tcg_temp_new_i64();
2184 zr = tcg_const_i64(0);
2185 nan = tcg_const_i64(2047);
2186
2187 tcg_gen_extract_i64(exp, xbh, 52, 11);
2188 tcg_gen_movi_i64(t0, 0x0010000000000000);
2189 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2190 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2191 tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
2192 set_cpu_vsr(xT(ctx->opcode), xth, true);
2193
2194 tcg_gen_extract_i64(exp, xbl, 52, 11);
2195 tcg_gen_movi_i64(t0, 0x0010000000000000);
2196 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2197 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2198 tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
2199 set_cpu_vsr(xT(ctx->opcode), xtl, false);
2200
2201 tcg_temp_free_i64(t0);
2202 tcg_temp_free_i64(exp);
2203 tcg_temp_free_i64(zr);
2204 tcg_temp_free_i64(nan);
2205 tcg_temp_free_i64(xth);
2206 tcg_temp_free_i64(xtl);
2207 tcg_temp_free_i64(xbh);
2208 tcg_temp_free_i64(xbl);
2209 }
2210
2211 static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
2212 int rt, bool store, bool paired)
2213 {
2214 TCGv ea;
2215 TCGv_i64 xt;
2216 MemOp mop;
2217 int rt1, rt2;
2218
2219 xt = tcg_temp_new_i64();
2220
2221 mop = DEF_MEMOP(MO_UQ);
2222
2223 gen_set_access_type(ctx, ACCESS_INT);
2224 ea = do_ea_calc(ctx, ra, displ);
2225
2226 if (paired && ctx->le_mode) {
2227 rt1 = rt + 1;
2228 rt2 = rt;
2229 } else {
2230 rt1 = rt;
2231 rt2 = rt + 1;
2232 }
2233
2234 if (store) {
2235 get_cpu_vsr(xt, rt1, !ctx->le_mode);
2236 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2237 gen_addr_add(ctx, ea, ea, 8);
2238 get_cpu_vsr(xt, rt1, ctx->le_mode);
2239 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2240 if (paired) {
2241 gen_addr_add(ctx, ea, ea, 8);
2242 get_cpu_vsr(xt, rt2, !ctx->le_mode);
2243 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2244 gen_addr_add(ctx, ea, ea, 8);
2245 get_cpu_vsr(xt, rt2, ctx->le_mode);
2246 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2247 }
2248 } else {
2249 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2250 set_cpu_vsr(rt1, xt, !ctx->le_mode);
2251 gen_addr_add(ctx, ea, ea, 8);
2252 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2253 set_cpu_vsr(rt1, xt, ctx->le_mode);
2254 if (paired) {
2255 gen_addr_add(ctx, ea, ea, 8);
2256 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2257 set_cpu_vsr(rt2, xt, !ctx->le_mode);
2258 gen_addr_add(ctx, ea, ea, 8);
2259 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2260 set_cpu_vsr(rt2, xt, ctx->le_mode);
2261 }
2262 }
2263
2264 tcg_temp_free(ea);
2265 tcg_temp_free_i64(xt);
2266 return true;
2267 }
2268
2269 static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
2270 {
2271 if (paired || a->rt >= 32) {
2272 REQUIRE_VSX(ctx);
2273 } else {
2274 REQUIRE_VECTOR(ctx);
2275 }
2276
2277 return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
2278 }
2279
2280 static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
2281 bool store, bool paired)
2282 {
2283 arg_D d;
2284 REQUIRE_VSX(ctx);
2285
2286 if (!resolve_PLS_D(ctx, &d, a)) {
2287 return true;
2288 }
2289
2290 return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
2291 }
2292
2293 static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
2294 {
2295 if (paired || a->rt >= 32) {
2296 REQUIRE_VSX(ctx);
2297 } else {
2298 REQUIRE_VECTOR(ctx);
2299 }
2300
2301 return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
2302 }
2303
2304 TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
2305 TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
2306 TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
2307 TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
2308 TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
2309 TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
2310 TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
2311 TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
2312 TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
2313 TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
2314 TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
2315 TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
2316
2317 static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c,
2318 int64_t imm)
2319 {
2320 /*
2321 * Instead of processing imm bit-by-bit, we'll skip the computation of
2322 * conjunctions whose corresponding bit is unset.
2323 */
2324 int bit;
2325 TCGv_i64 conj, disj;
2326
2327 conj = tcg_temp_new_i64();
2328 disj = tcg_const_i64(0);
2329
2330 /* Iterate over set bits from the least to the most significant bit */
2331 while (imm) {
2332 /*
2333 * Get the next bit to be processed with ctz64. Invert the result of
2334 * ctz64 to match the indexing used by PowerISA.
2335 */
2336 bit = 7 - ctz64(imm);
2337 if (bit & 0x4) {
2338 tcg_gen_mov_i64(conj, a);
2339 } else {
2340 tcg_gen_not_i64(conj, a);
2341 }
2342 if (bit & 0x2) {
2343 tcg_gen_and_i64(conj, conj, b);
2344 } else {
2345 tcg_gen_andc_i64(conj, conj, b);
2346 }
2347 if (bit & 0x1) {
2348 tcg_gen_and_i64(conj, conj, c);
2349 } else {
2350 tcg_gen_andc_i64(conj, conj, c);
2351 }
2352 tcg_gen_or_i64(disj, disj, conj);
2353
2354 /* Unset the least significant bit that is set */
2355 imm &= imm - 1;
2356 }
2357
2358 tcg_gen_mov_i64(t, disj);
2359
2360 tcg_temp_free_i64(conj);
2361 tcg_temp_free_i64(disj);
2362 }
2363
2364 static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2365 TCGv_vec c, int64_t imm)
2366 {
2367 /*
2368 * Instead of processing imm bit-by-bit, we'll skip the computation of
2369 * conjunctions whose corresponding bit is unset.
2370 */
2371 int bit;
2372 TCGv_vec disj, conj;
2373
2374 disj = tcg_const_zeros_vec_matching(t);
2375 conj = tcg_temp_new_vec_matching(t);
2376
2377 /* Iterate over set bits from the least to the most significant bit */
2378 while (imm) {
2379 /*
2380 * Get the next bit to be processed with ctz64. Invert the result of
2381 * ctz64 to match the indexing used by PowerISA.
2382 */
2383 bit = 7 - ctz64(imm);
2384 if (bit & 0x4) {
2385 tcg_gen_mov_vec(conj, a);
2386 } else {
2387 tcg_gen_not_vec(vece, conj, a);
2388 }
2389 if (bit & 0x2) {
2390 tcg_gen_and_vec(vece, conj, conj, b);
2391 } else {
2392 tcg_gen_andc_vec(vece, conj, conj, b);
2393 }
2394 if (bit & 0x1) {
2395 tcg_gen_and_vec(vece, conj, conj, c);
2396 } else {
2397 tcg_gen_andc_vec(vece, conj, conj, c);
2398 }
2399 tcg_gen_or_vec(vece, disj, disj, conj);
2400
2401 /* Unset the least significant bit that is set */
2402 imm &= imm - 1;
2403 }
2404
2405 tcg_gen_mov_vec(t, disj);
2406
2407 tcg_temp_free_vec(disj);
2408 tcg_temp_free_vec(conj);
2409 }
2410
2411 static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a)
2412 {
2413 static const TCGOpcode vecop_list[] = {
2414 INDEX_op_andc_vec, 0
2415 };
2416 static const GVecGen4i op = {
2417 .fniv = gen_xxeval_vec,
2418 .fno = gen_helper_XXEVAL,
2419 .fni8 = gen_xxeval_i64,
2420 .opt_opc = vecop_list,
2421 .vece = MO_64
2422 };
2423 int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa),
2424 xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
2425
2426 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2427 REQUIRE_VSX(ctx);
2428
2429 /* Equivalent functions that can be implemented with a single gen_gvec */
2430 switch (a->imm) {
2431 case 0b00000000: /* true */
2432 set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
2433 set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
2434 break;
2435 case 0b00000011: /* and(B,A) */
2436 tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
2437 break;
2438 case 0b00000101: /* and(C,A) */
2439 tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16);
2440 break;
2441 case 0b00001111: /* A */
2442 tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16);
2443 break;
2444 case 0b00010001: /* and(C,B) */
2445 tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
2446 break;
2447 case 0b00011011: /* C?B:A */
2448 tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
2449 break;
2450 case 0b00011101: /* B?C:A */
2451 tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
2452 break;
2453 case 0b00100111: /* C?A:B */
2454 tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
2455 break;
2456 case 0b00110011: /* B */
2457 tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
2458 break;
2459 case 0b00110101: /* A?C:B */
2460 tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
2461 break;
2462 case 0b00111100: /* xor(B,A) */
2463 tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
2464 break;
2465 case 0b00111111: /* or(B,A) */
2466 tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
2467 break;
2468 case 0b01000111: /* B?A:C */
2469 tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
2470 break;
2471 case 0b01010011: /* A?B:C */
2472 tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
2473 break;
2474 case 0b01010101: /* C */
2475 tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16);
2476 break;
2477 case 0b01011010: /* xor(C,A) */
2478 tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16);
2479 break;
2480 case 0b01011111: /* or(C,A) */
2481 tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16);
2482 break;
2483 case 0b01100110: /* xor(C,B) */
2484 tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
2485 break;
2486 case 0b01110111: /* or(C,B) */
2487 tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
2488 break;
2489 case 0b10001000: /* nor(C,B) */
2490 tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
2491 break;
2492 case 0b10011001: /* eqv(C,B) */
2493 tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
2494 break;
2495 case 0b10100000: /* nor(C,A) */
2496 tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16);
2497 break;
2498 case 0b10100101: /* eqv(C,A) */
2499 tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16);
2500 break;
2501 case 0b10101010: /* not(C) */
2502 tcg_gen_gvec_not(MO_64, xt, xc, 16, 16);
2503 break;
2504 case 0b11000000: /* nor(B,A) */
2505 tcg_gen_gvec_nor(MO_64, xt, xb, xa, 16, 16);
2506 break;
2507 case 0b11000011: /* eqv(B,A) */
2508 tcg_gen_gvec_eqv(MO_64, xt, xb, xa, 16, 16);
2509 break;
2510 case 0b11001100: /* not(B) */
2511 tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
2512 break;
2513 case 0b11101110: /* nand(C,B) */
2514 tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
2515 break;
2516 case 0b11110000: /* not(A) */
2517 tcg_gen_gvec_not(MO_64, xt, xa, 16, 16);
2518 break;
2519 case 0b11111010: /* nand(C,A) */
2520 tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16);
2521 break;
2522 case 0b11111100: /* nand(B,A) */
2523 tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
2524 break;
2525 case 0b11111111: /* true */
2526 set_cpu_vsr(a->xt, tcg_constant_i64(-1), true);
2527 set_cpu_vsr(a->xt, tcg_constant_i64(-1), false);
2528 break;
2529 default:
2530 /* Fallback to compute all conjunctions/disjunctions */
2531 tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
2532 }
2533
2534 return true;
2535 }
2536
2537 static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2538 TCGv_vec c)
2539 {
2540 TCGv_vec tmp = tcg_temp_new_vec_matching(c);
2541 tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
2542 tcg_gen_bitsel_vec(vece, t, tmp, b, a);
2543 tcg_temp_free_vec(tmp);
2544 }
2545
2546 static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece)
2547 {
2548 static const TCGOpcode vecop_list[] = {
2549 INDEX_op_sari_vec, 0
2550 };
2551 static const GVecGen4 ops[4] = {
2552 {
2553 .fniv = gen_xxblendv_vec,
2554 .fno = gen_helper_XXBLENDVB,
2555 .opt_opc = vecop_list,
2556 .vece = MO_8
2557 },
2558 {
2559 .fniv = gen_xxblendv_vec,
2560 .fno = gen_helper_XXBLENDVH,
2561 .opt_opc = vecop_list,
2562 .vece = MO_16
2563 },
2564 {
2565 .fniv = gen_xxblendv_vec,
2566 .fno = gen_helper_XXBLENDVW,
2567 .opt_opc = vecop_list,
2568 .vece = MO_32
2569 },
2570 {
2571 .fniv = gen_xxblendv_vec,
2572 .fno = gen_helper_XXBLENDVD,
2573 .opt_opc = vecop_list,
2574 .vece = MO_64
2575 }
2576 };
2577
2578 REQUIRE_VSX(ctx);
2579
2580 tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
2581 vsr_full_offset(a->xb), vsr_full_offset(a->xc),
2582 16, 16, &ops[vece]);
2583
2584 return true;
2585 }
2586
2587 TRANS(XXBLENDVB, do_xxblendv, MO_8)
2588 TRANS(XXBLENDVH, do_xxblendv, MO_16)
2589 TRANS(XXBLENDVW, do_xxblendv, MO_32)
2590 TRANS(XXBLENDVD, do_xxblendv, MO_64)
2591
2592 static bool do_xsmaxmincjdp(DisasContext *ctx, arg_XX3 *a,
2593 void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2594 {
2595 TCGv_ptr xt, xa, xb;
2596
2597 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2598 REQUIRE_VSX(ctx);
2599
2600 xt = gen_vsr_ptr(a->xt);
2601 xa = gen_vsr_ptr(a->xa);
2602 xb = gen_vsr_ptr(a->xb);
2603
2604 helper(cpu_env, xt, xa, xb);
2605
2606 tcg_temp_free_ptr(xt);
2607 tcg_temp_free_ptr(xa);
2608 tcg_temp_free_ptr(xb);
2609
2610 return true;
2611 }
2612
2613 TRANS(XSMAXCDP, do_xsmaxmincjdp, gen_helper_xsmaxcdp)
2614 TRANS(XSMINCDP, do_xsmaxmincjdp, gen_helper_xsmincdp)
2615 TRANS(XSMAXJDP, do_xsmaxmincjdp, gen_helper_xsmaxjdp)
2616 TRANS(XSMINJDP, do_xsmaxmincjdp, gen_helper_xsminjdp)
2617
2618 #undef GEN_XX2FORM
2619 #undef GEN_XX3FORM
2620 #undef GEN_XX2IFORM
2621 #undef GEN_XX3_RC_FORM
2622 #undef GEN_XX3FORM_DM
2623 #undef VSX_LOGICAL