]> git.proxmox.com Git - mirror_qemu.git/blob - target-ppc/translate/vsx-impl.inc.c
target-ppc: improve stxvw4x implementation
[mirror_qemu.git] / target-ppc / translate / vsx-impl.inc.c
1 /*** VSX extension ***/
2
3 static inline TCGv_i64 cpu_vsrh(int n)
4 {
5 if (n < 32) {
6 return cpu_fpr[n];
7 } else {
8 return cpu_avrh[n-32];
9 }
10 }
11
12 static inline TCGv_i64 cpu_vsrl(int n)
13 {
14 if (n < 32) {
15 return cpu_vsr[n];
16 } else {
17 return cpu_avrl[n-32];
18 }
19 }
20
21 #define VSX_LOAD_SCALAR(name, operation) \
22 static void gen_##name(DisasContext *ctx) \
23 { \
24 TCGv EA; \
25 if (unlikely(!ctx->vsx_enabled)) { \
26 gen_exception(ctx, POWERPC_EXCP_VSXU); \
27 return; \
28 } \
29 gen_set_access_type(ctx, ACCESS_INT); \
30 EA = tcg_temp_new(); \
31 gen_addr_reg_index(ctx, EA); \
32 gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
33 /* NOTE: cpu_vsrl is undefined */ \
34 tcg_temp_free(EA); \
35 }
36
37 VSX_LOAD_SCALAR(lxsdx, ld64_i64)
38 VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
39 VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
40 VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
41 VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
42 VSX_LOAD_SCALAR(lxsspx, ld32fs)
43
44 static void gen_lxvd2x(DisasContext *ctx)
45 {
46 TCGv EA;
47 if (unlikely(!ctx->vsx_enabled)) {
48 gen_exception(ctx, POWERPC_EXCP_VSXU);
49 return;
50 }
51 gen_set_access_type(ctx, ACCESS_INT);
52 EA = tcg_temp_new();
53 gen_addr_reg_index(ctx, EA);
54 gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
55 tcg_gen_addi_tl(EA, EA, 8);
56 gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
57 tcg_temp_free(EA);
58 }
59
60 static void gen_lxvdsx(DisasContext *ctx)
61 {
62 TCGv EA;
63 if (unlikely(!ctx->vsx_enabled)) {
64 gen_exception(ctx, POWERPC_EXCP_VSXU);
65 return;
66 }
67 gen_set_access_type(ctx, ACCESS_INT);
68 EA = tcg_temp_new();
69 gen_addr_reg_index(ctx, EA);
70 gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
71 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
72 tcg_temp_free(EA);
73 }
74
75 static void gen_lxvw4x(DisasContext *ctx)
76 {
77 TCGv EA;
78 TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
79 TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
80 if (unlikely(!ctx->vsx_enabled)) {
81 gen_exception(ctx, POWERPC_EXCP_VSXU);
82 return;
83 }
84 gen_set_access_type(ctx, ACCESS_INT);
85 EA = tcg_temp_new();
86
87 gen_addr_reg_index(ctx, EA);
88 if (ctx->le_mode) {
89 TCGv_i64 t0 = tcg_temp_new_i64();
90 TCGv_i64 t1 = tcg_temp_new_i64();
91
92 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
93 tcg_gen_shri_i64(t1, t0, 32);
94 tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
95 tcg_gen_addi_tl(EA, EA, 8);
96 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
97 tcg_gen_shri_i64(t1, t0, 32);
98 tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
99 tcg_temp_free_i64(t0);
100 tcg_temp_free_i64(t1);
101 } else {
102 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
103 tcg_gen_addi_tl(EA, EA, 8);
104 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
105 }
106 tcg_temp_free(EA);
107 }
108
109 #define VSX_STORE_SCALAR(name, operation) \
110 static void gen_##name(DisasContext *ctx) \
111 { \
112 TCGv EA; \
113 if (unlikely(!ctx->vsx_enabled)) { \
114 gen_exception(ctx, POWERPC_EXCP_VSXU); \
115 return; \
116 } \
117 gen_set_access_type(ctx, ACCESS_INT); \
118 EA = tcg_temp_new(); \
119 gen_addr_reg_index(ctx, EA); \
120 gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
121 tcg_temp_free(EA); \
122 }
123
124 VSX_STORE_SCALAR(stxsdx, st64_i64)
125
126 VSX_STORE_SCALAR(stxsibx, st8_i64)
127 VSX_STORE_SCALAR(stxsihx, st16_i64)
128 VSX_STORE_SCALAR(stxsiwx, st32_i64)
129 VSX_STORE_SCALAR(stxsspx, st32fs)
130
131 static void gen_stxvd2x(DisasContext *ctx)
132 {
133 TCGv EA;
134 if (unlikely(!ctx->vsx_enabled)) {
135 gen_exception(ctx, POWERPC_EXCP_VSXU);
136 return;
137 }
138 gen_set_access_type(ctx, ACCESS_INT);
139 EA = tcg_temp_new();
140 gen_addr_reg_index(ctx, EA);
141 gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
142 tcg_gen_addi_tl(EA, EA, 8);
143 gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
144 tcg_temp_free(EA);
145 }
146
147 static void gen_stxvw4x(DisasContext *ctx)
148 {
149 TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
150 TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
151 TCGv EA;
152 if (unlikely(!ctx->vsx_enabled)) {
153 gen_exception(ctx, POWERPC_EXCP_VSXU);
154 return;
155 }
156 gen_set_access_type(ctx, ACCESS_INT);
157 EA = tcg_temp_new();
158 gen_addr_reg_index(ctx, EA);
159 if (ctx->le_mode) {
160 TCGv_i64 t0 = tcg_temp_new_i64();
161 TCGv_i64 t1 = tcg_temp_new_i64();
162
163 tcg_gen_shri_i64(t0, xsh, 32);
164 tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
165 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
166 tcg_gen_addi_tl(EA, EA, 8);
167 tcg_gen_shri_i64(t0, xsl, 32);
168 tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
169 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
170 tcg_temp_free_i64(t0);
171 tcg_temp_free_i64(t1);
172 } else {
173 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
174 tcg_gen_addi_tl(EA, EA, 8);
175 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
176 }
177 tcg_temp_free(EA);
178 }
179
180 #define MV_VSRW(name, tcgop1, tcgop2, target, source) \
181 static void gen_##name(DisasContext *ctx) \
182 { \
183 if (xS(ctx->opcode) < 32) { \
184 if (unlikely(!ctx->fpu_enabled)) { \
185 gen_exception(ctx, POWERPC_EXCP_FPU); \
186 return; \
187 } \
188 } else { \
189 if (unlikely(!ctx->altivec_enabled)) { \
190 gen_exception(ctx, POWERPC_EXCP_VPU); \
191 return; \
192 } \
193 } \
194 TCGv_i64 tmp = tcg_temp_new_i64(); \
195 tcg_gen_##tcgop1(tmp, source); \
196 tcg_gen_##tcgop2(target, tmp); \
197 tcg_temp_free_i64(tmp); \
198 }
199
200
201 MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
202 cpu_vsrh(xS(ctx->opcode)))
203 MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
204 cpu_gpr[rA(ctx->opcode)])
205 MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
206 cpu_gpr[rA(ctx->opcode)])
207
208 #if defined(TARGET_PPC64)
209 #define MV_VSRD(name, target, source) \
210 static void gen_##name(DisasContext *ctx) \
211 { \
212 if (xS(ctx->opcode) < 32) { \
213 if (unlikely(!ctx->fpu_enabled)) { \
214 gen_exception(ctx, POWERPC_EXCP_FPU); \
215 return; \
216 } \
217 } else { \
218 if (unlikely(!ctx->altivec_enabled)) { \
219 gen_exception(ctx, POWERPC_EXCP_VPU); \
220 return; \
221 } \
222 } \
223 tcg_gen_mov_i64(target, source); \
224 }
225
226 MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
227 MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
228
229 static void gen_mfvsrld(DisasContext *ctx)
230 {
231 if (xS(ctx->opcode) < 32) {
232 if (unlikely(!ctx->vsx_enabled)) {
233 gen_exception(ctx, POWERPC_EXCP_VSXU);
234 return;
235 }
236 } else {
237 if (unlikely(!ctx->altivec_enabled)) {
238 gen_exception(ctx, POWERPC_EXCP_VPU);
239 return;
240 }
241 }
242
243 tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], cpu_vsrl(xS(ctx->opcode)));
244 }
245
246 static void gen_mtvsrdd(DisasContext *ctx)
247 {
248 if (xT(ctx->opcode) < 32) {
249 if (unlikely(!ctx->vsx_enabled)) {
250 gen_exception(ctx, POWERPC_EXCP_VSXU);
251 return;
252 }
253 } else {
254 if (unlikely(!ctx->altivec_enabled)) {
255 gen_exception(ctx, POWERPC_EXCP_VPU);
256 return;
257 }
258 }
259
260 if (!rA(ctx->opcode)) {
261 tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0);
262 } else {
263 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]);
264 }
265
266 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rB(ctx->opcode)]);
267 }
268
269 #endif
270
271 static void gen_xxpermdi(DisasContext *ctx)
272 {
273 if (unlikely(!ctx->vsx_enabled)) {
274 gen_exception(ctx, POWERPC_EXCP_VSXU);
275 return;
276 }
277
278 if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
279 (xT(ctx->opcode) == xB(ctx->opcode)))) {
280 TCGv_i64 xh, xl;
281
282 xh = tcg_temp_new_i64();
283 xl = tcg_temp_new_i64();
284
285 if ((DM(ctx->opcode) & 2) == 0) {
286 tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
287 } else {
288 tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
289 }
290 if ((DM(ctx->opcode) & 1) == 0) {
291 tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
292 } else {
293 tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
294 }
295
296 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
297 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
298
299 tcg_temp_free_i64(xh);
300 tcg_temp_free_i64(xl);
301 } else {
302 if ((DM(ctx->opcode) & 2) == 0) {
303 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
304 } else {
305 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
306 }
307 if ((DM(ctx->opcode) & 1) == 0) {
308 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
309 } else {
310 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
311 }
312 }
313 }
314
315 #define OP_ABS 1
316 #define OP_NABS 2
317 #define OP_NEG 3
318 #define OP_CPSGN 4
319 #define SGN_MASK_DP 0x8000000000000000ull
320 #define SGN_MASK_SP 0x8000000080000000ull
321
322 #define VSX_SCALAR_MOVE(name, op, sgn_mask) \
323 static void glue(gen_, name)(DisasContext * ctx) \
324 { \
325 TCGv_i64 xb, sgm; \
326 if (unlikely(!ctx->vsx_enabled)) { \
327 gen_exception(ctx, POWERPC_EXCP_VSXU); \
328 return; \
329 } \
330 xb = tcg_temp_new_i64(); \
331 sgm = tcg_temp_new_i64(); \
332 tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
333 tcg_gen_movi_i64(sgm, sgn_mask); \
334 switch (op) { \
335 case OP_ABS: { \
336 tcg_gen_andc_i64(xb, xb, sgm); \
337 break; \
338 } \
339 case OP_NABS: { \
340 tcg_gen_or_i64(xb, xb, sgm); \
341 break; \
342 } \
343 case OP_NEG: { \
344 tcg_gen_xor_i64(xb, xb, sgm); \
345 break; \
346 } \
347 case OP_CPSGN: { \
348 TCGv_i64 xa = tcg_temp_new_i64(); \
349 tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
350 tcg_gen_and_i64(xa, xa, sgm); \
351 tcg_gen_andc_i64(xb, xb, sgm); \
352 tcg_gen_or_i64(xb, xb, xa); \
353 tcg_temp_free_i64(xa); \
354 break; \
355 } \
356 } \
357 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
358 tcg_temp_free_i64(xb); \
359 tcg_temp_free_i64(sgm); \
360 }
361
362 VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
363 VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
364 VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
365 VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
366
367 #define VSX_VECTOR_MOVE(name, op, sgn_mask) \
368 static void glue(gen_, name)(DisasContext * ctx) \
369 { \
370 TCGv_i64 xbh, xbl, sgm; \
371 if (unlikely(!ctx->vsx_enabled)) { \
372 gen_exception(ctx, POWERPC_EXCP_VSXU); \
373 return; \
374 } \
375 xbh = tcg_temp_new_i64(); \
376 xbl = tcg_temp_new_i64(); \
377 sgm = tcg_temp_new_i64(); \
378 tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
379 tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
380 tcg_gen_movi_i64(sgm, sgn_mask); \
381 switch (op) { \
382 case OP_ABS: { \
383 tcg_gen_andc_i64(xbh, xbh, sgm); \
384 tcg_gen_andc_i64(xbl, xbl, sgm); \
385 break; \
386 } \
387 case OP_NABS: { \
388 tcg_gen_or_i64(xbh, xbh, sgm); \
389 tcg_gen_or_i64(xbl, xbl, sgm); \
390 break; \
391 } \
392 case OP_NEG: { \
393 tcg_gen_xor_i64(xbh, xbh, sgm); \
394 tcg_gen_xor_i64(xbl, xbl, sgm); \
395 break; \
396 } \
397 case OP_CPSGN: { \
398 TCGv_i64 xah = tcg_temp_new_i64(); \
399 TCGv_i64 xal = tcg_temp_new_i64(); \
400 tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
401 tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
402 tcg_gen_and_i64(xah, xah, sgm); \
403 tcg_gen_and_i64(xal, xal, sgm); \
404 tcg_gen_andc_i64(xbh, xbh, sgm); \
405 tcg_gen_andc_i64(xbl, xbl, sgm); \
406 tcg_gen_or_i64(xbh, xbh, xah); \
407 tcg_gen_or_i64(xbl, xbl, xal); \
408 tcg_temp_free_i64(xah); \
409 tcg_temp_free_i64(xal); \
410 break; \
411 } \
412 } \
413 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
414 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
415 tcg_temp_free_i64(xbh); \
416 tcg_temp_free_i64(xbl); \
417 tcg_temp_free_i64(sgm); \
418 }
419
420 VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
421 VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
422 VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
423 VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
424 VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
425 VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
426 VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
427 VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
428
429 #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
430 static void gen_##name(DisasContext * ctx) \
431 { \
432 TCGv_i32 opc; \
433 if (unlikely(!ctx->vsx_enabled)) { \
434 gen_exception(ctx, POWERPC_EXCP_VSXU); \
435 return; \
436 } \
437 opc = tcg_const_i32(ctx->opcode); \
438 gen_helper_##name(cpu_env, opc); \
439 tcg_temp_free_i32(opc); \
440 }
441
442 #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
443 static void gen_##name(DisasContext * ctx) \
444 { \
445 if (unlikely(!ctx->vsx_enabled)) { \
446 gen_exception(ctx, POWERPC_EXCP_VSXU); \
447 return; \
448 } \
449 gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
450 cpu_vsrh(xB(ctx->opcode))); \
451 }
452
453 GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
454 GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
455 GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
456 GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
457 GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
458 GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
459 GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
460 GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
461 GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
462 GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
463 GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
464 GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
465 GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
466 GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
467 GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
468 GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
469 GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
470 GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
471 GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
472 GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
473 GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
474 GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
475 GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
476 GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
477 GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
478 GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
479 GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
480 GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
481 GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
482 GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
483 GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
484 GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
485 GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
486 GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
487 GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
488 GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
489 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
490
491 GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
492 GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
493 GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
494 GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
495 GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
496 GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
497 GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
498 GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
499 GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
500 GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
501 GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
502 GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
503 GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
504 GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
505 GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
506 GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
507 GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
508
509 GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
510 GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
511 GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
512 GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
513 GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
514 GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
515 GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
516 GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
517 GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
518 GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
519 GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
520 GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
521 GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
522 GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
523 GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
524 GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
525 GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
526 GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
527 GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
528 GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
529 GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
530 GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
531 GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
532 GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
533 GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
534 GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
535 GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
536 GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
537 GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
538 GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
539 GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
540 GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
541 GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
542 GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
543 GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
544 GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
545
546 GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
547 GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
548 GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
549 GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
550 GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
551 GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
552 GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
553 GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
554 GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
555 GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
556 GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
557 GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
558 GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
559 GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
560 GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
561 GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
562 GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
563 GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
564 GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
565 GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
566 GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
567 GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
568 GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
569 GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
570 GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
571 GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
572 GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
573 GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
574 GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
575 GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
576 GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
577 GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
578 GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
579 GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
580 GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
581 GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
582
583 #define VSX_LOGICAL(name, tcg_op) \
584 static void glue(gen_, name)(DisasContext * ctx) \
585 { \
586 if (unlikely(!ctx->vsx_enabled)) { \
587 gen_exception(ctx, POWERPC_EXCP_VSXU); \
588 return; \
589 } \
590 tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
591 cpu_vsrh(xB(ctx->opcode))); \
592 tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
593 cpu_vsrl(xB(ctx->opcode))); \
594 }
595
596 VSX_LOGICAL(xxland, tcg_gen_and_i64)
597 VSX_LOGICAL(xxlandc, tcg_gen_andc_i64)
598 VSX_LOGICAL(xxlor, tcg_gen_or_i64)
599 VSX_LOGICAL(xxlxor, tcg_gen_xor_i64)
600 VSX_LOGICAL(xxlnor, tcg_gen_nor_i64)
601 VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64)
602 VSX_LOGICAL(xxlnand, tcg_gen_nand_i64)
603 VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
604
605 #define VSX_XXMRG(name, high) \
606 static void glue(gen_, name)(DisasContext * ctx) \
607 { \
608 TCGv_i64 a0, a1, b0, b1; \
609 if (unlikely(!ctx->vsx_enabled)) { \
610 gen_exception(ctx, POWERPC_EXCP_VSXU); \
611 return; \
612 } \
613 a0 = tcg_temp_new_i64(); \
614 a1 = tcg_temp_new_i64(); \
615 b0 = tcg_temp_new_i64(); \
616 b1 = tcg_temp_new_i64(); \
617 if (high) { \
618 tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
619 tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
620 tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
621 tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
622 } else { \
623 tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
624 tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
625 tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
626 tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
627 } \
628 tcg_gen_shri_i64(a0, a0, 32); \
629 tcg_gen_shri_i64(b0, b0, 32); \
630 tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
631 b0, a0, 32, 32); \
632 tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
633 b1, a1, 32, 32); \
634 tcg_temp_free_i64(a0); \
635 tcg_temp_free_i64(a1); \
636 tcg_temp_free_i64(b0); \
637 tcg_temp_free_i64(b1); \
638 }
639
640 VSX_XXMRG(xxmrghw, 1)
641 VSX_XXMRG(xxmrglw, 0)
642
643 static void gen_xxsel(DisasContext * ctx)
644 {
645 TCGv_i64 a, b, c;
646 if (unlikely(!ctx->vsx_enabled)) {
647 gen_exception(ctx, POWERPC_EXCP_VSXU);
648 return;
649 }
650 a = tcg_temp_new_i64();
651 b = tcg_temp_new_i64();
652 c = tcg_temp_new_i64();
653
654 tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
655 tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
656 tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
657
658 tcg_gen_and_i64(b, b, c);
659 tcg_gen_andc_i64(a, a, c);
660 tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
661
662 tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
663 tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
664 tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
665
666 tcg_gen_and_i64(b, b, c);
667 tcg_gen_andc_i64(a, a, c);
668 tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
669
670 tcg_temp_free_i64(a);
671 tcg_temp_free_i64(b);
672 tcg_temp_free_i64(c);
673 }
674
675 static void gen_xxspltw(DisasContext *ctx)
676 {
677 TCGv_i64 b, b2;
678 TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
679 cpu_vsrl(xB(ctx->opcode)) :
680 cpu_vsrh(xB(ctx->opcode));
681
682 if (unlikely(!ctx->vsx_enabled)) {
683 gen_exception(ctx, POWERPC_EXCP_VSXU);
684 return;
685 }
686
687 b = tcg_temp_new_i64();
688 b2 = tcg_temp_new_i64();
689
690 if (UIM(ctx->opcode) & 1) {
691 tcg_gen_ext32u_i64(b, vsr);
692 } else {
693 tcg_gen_shri_i64(b, vsr, 32);
694 }
695
696 tcg_gen_shli_i64(b2, b, 32);
697 tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
698 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
699
700 tcg_temp_free_i64(b);
701 tcg_temp_free_i64(b2);
702 }
703
704 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
705
706 static void gen_xxspltib(DisasContext *ctx)
707 {
708 unsigned char uim8 = IMM8(ctx->opcode);
709 if (xS(ctx->opcode) < 32) {
710 if (unlikely(!ctx->altivec_enabled)) {
711 gen_exception(ctx, POWERPC_EXCP_VPU);
712 return;
713 }
714 } else {
715 if (unlikely(!ctx->vsx_enabled)) {
716 gen_exception(ctx, POWERPC_EXCP_VSXU);
717 return;
718 }
719 }
720 tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
721 tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
722 }
723
724 static void gen_xxsldwi(DisasContext *ctx)
725 {
726 TCGv_i64 xth, xtl;
727 if (unlikely(!ctx->vsx_enabled)) {
728 gen_exception(ctx, POWERPC_EXCP_VSXU);
729 return;
730 }
731 xth = tcg_temp_new_i64();
732 xtl = tcg_temp_new_i64();
733
734 switch (SHW(ctx->opcode)) {
735 case 0: {
736 tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
737 tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
738 break;
739 }
740 case 1: {
741 TCGv_i64 t0 = tcg_temp_new_i64();
742 tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
743 tcg_gen_shli_i64(xth, xth, 32);
744 tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
745 tcg_gen_shri_i64(t0, t0, 32);
746 tcg_gen_or_i64(xth, xth, t0);
747 tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
748 tcg_gen_shli_i64(xtl, xtl, 32);
749 tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
750 tcg_gen_shri_i64(t0, t0, 32);
751 tcg_gen_or_i64(xtl, xtl, t0);
752 tcg_temp_free_i64(t0);
753 break;
754 }
755 case 2: {
756 tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
757 tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
758 break;
759 }
760 case 3: {
761 TCGv_i64 t0 = tcg_temp_new_i64();
762 tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
763 tcg_gen_shli_i64(xth, xth, 32);
764 tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
765 tcg_gen_shri_i64(t0, t0, 32);
766 tcg_gen_or_i64(xth, xth, t0);
767 tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
768 tcg_gen_shli_i64(xtl, xtl, 32);
769 tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
770 tcg_gen_shri_i64(t0, t0, 32);
771 tcg_gen_or_i64(xtl, xtl, t0);
772 tcg_temp_free_i64(t0);
773 break;
774 }
775 }
776
777 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
778 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
779
780 tcg_temp_free_i64(xth);
781 tcg_temp_free_i64(xtl);
782 }
783
784 #undef GEN_XX2FORM
785 #undef GEN_XX3FORM
786 #undef GEN_XX2IFORM
787 #undef GEN_XX3_RC_FORM
788 #undef GEN_XX3FORM_DM
789 #undef VSX_LOGICAL