]> git.proxmox.com Git - mirror_qemu.git/blob - target-ppc/translate/vsx-impl.inc.c
Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2016-10-07' into staging
[mirror_qemu.git] / target-ppc / translate / vsx-impl.inc.c
1 /*** VSX extension ***/
2
3 static inline TCGv_i64 cpu_vsrh(int n)
4 {
5 if (n < 32) {
6 return cpu_fpr[n];
7 } else {
8 return cpu_avrh[n-32];
9 }
10 }
11
12 static inline TCGv_i64 cpu_vsrl(int n)
13 {
14 if (n < 32) {
15 return cpu_vsr[n];
16 } else {
17 return cpu_avrl[n-32];
18 }
19 }
20
21 #define VSX_LOAD_SCALAR(name, operation) \
22 static void gen_##name(DisasContext *ctx) \
23 { \
24 TCGv EA; \
25 if (unlikely(!ctx->vsx_enabled)) { \
26 gen_exception(ctx, POWERPC_EXCP_VSXU); \
27 return; \
28 } \
29 gen_set_access_type(ctx, ACCESS_INT); \
30 EA = tcg_temp_new(); \
31 gen_addr_reg_index(ctx, EA); \
32 gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
33 /* NOTE: cpu_vsrl is undefined */ \
34 tcg_temp_free(EA); \
35 }
36
37 VSX_LOAD_SCALAR(lxsdx, ld64_i64)
38 VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
39 VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
40 VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
41 VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
42 VSX_LOAD_SCALAR(lxsspx, ld32fs)
43
44 static void gen_lxvd2x(DisasContext *ctx)
45 {
46 TCGv EA;
47 if (unlikely(!ctx->vsx_enabled)) {
48 gen_exception(ctx, POWERPC_EXCP_VSXU);
49 return;
50 }
51 gen_set_access_type(ctx, ACCESS_INT);
52 EA = tcg_temp_new();
53 gen_addr_reg_index(ctx, EA);
54 gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
55 tcg_gen_addi_tl(EA, EA, 8);
56 gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
57 tcg_temp_free(EA);
58 }
59
60 static void gen_lxvdsx(DisasContext *ctx)
61 {
62 TCGv EA;
63 if (unlikely(!ctx->vsx_enabled)) {
64 gen_exception(ctx, POWERPC_EXCP_VSXU);
65 return;
66 }
67 gen_set_access_type(ctx, ACCESS_INT);
68 EA = tcg_temp_new();
69 gen_addr_reg_index(ctx, EA);
70 gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
71 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
72 tcg_temp_free(EA);
73 }
74
75 static void gen_lxvw4x(DisasContext *ctx)
76 {
77 TCGv EA;
78 TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
79 TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
80 if (unlikely(!ctx->vsx_enabled)) {
81 gen_exception(ctx, POWERPC_EXCP_VSXU);
82 return;
83 }
84 gen_set_access_type(ctx, ACCESS_INT);
85 EA = tcg_temp_new();
86
87 gen_addr_reg_index(ctx, EA);
88 if (ctx->le_mode) {
89 TCGv_i64 t0 = tcg_temp_new_i64();
90 TCGv_i64 t1 = tcg_temp_new_i64();
91
92 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
93 tcg_gen_shri_i64(t1, t0, 32);
94 tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
95 tcg_gen_addi_tl(EA, EA, 8);
96 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
97 tcg_gen_shri_i64(t1, t0, 32);
98 tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
99 tcg_temp_free_i64(t0);
100 tcg_temp_free_i64(t1);
101 } else {
102 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
103 tcg_gen_addi_tl(EA, EA, 8);
104 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
105 }
106 tcg_temp_free(EA);
107 }
108
109 static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
110 TCGv_i64 inh, TCGv_i64 inl)
111 {
112 TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
113 TCGv_i64 t0 = tcg_temp_new_i64();
114 TCGv_i64 t1 = tcg_temp_new_i64();
115
116 /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
117 tcg_gen_and_i64(t0, inh, mask);
118 tcg_gen_shli_i64(t0, t0, 8);
119 tcg_gen_shri_i64(t1, inh, 8);
120 tcg_gen_and_i64(t1, t1, mask);
121 tcg_gen_or_i64(outh, t0, t1);
122
123 /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
124 tcg_gen_and_i64(t0, inl, mask);
125 tcg_gen_shli_i64(t0, t0, 8);
126 tcg_gen_shri_i64(t1, inl, 8);
127 tcg_gen_and_i64(t1, t1, mask);
128 tcg_gen_or_i64(outl, t0, t1);
129
130 tcg_temp_free_i64(t0);
131 tcg_temp_free_i64(t1);
132 tcg_temp_free_i64(mask);
133 }
134
135 static void gen_lxvh8x(DisasContext *ctx)
136 {
137 TCGv EA;
138 TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
139 TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
140
141 if (unlikely(!ctx->vsx_enabled)) {
142 gen_exception(ctx, POWERPC_EXCP_VSXU);
143 return;
144 }
145 gen_set_access_type(ctx, ACCESS_INT);
146
147 EA = tcg_temp_new();
148 gen_addr_reg_index(ctx, EA);
149 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
150 tcg_gen_addi_tl(EA, EA, 8);
151 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
152 if (ctx->le_mode) {
153 gen_bswap16x8(xth, xtl, xth, xtl);
154 }
155 tcg_temp_free(EA);
156 }
157
158 static void gen_lxvb16x(DisasContext *ctx)
159 {
160 TCGv EA;
161 TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
162 TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
163
164 if (unlikely(!ctx->vsx_enabled)) {
165 gen_exception(ctx, POWERPC_EXCP_VSXU);
166 return;
167 }
168 gen_set_access_type(ctx, ACCESS_INT);
169 EA = tcg_temp_new();
170 gen_addr_reg_index(ctx, EA);
171 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
172 tcg_gen_addi_tl(EA, EA, 8);
173 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
174 tcg_temp_free(EA);
175 }
176
177 #define VSX_STORE_SCALAR(name, operation) \
178 static void gen_##name(DisasContext *ctx) \
179 { \
180 TCGv EA; \
181 if (unlikely(!ctx->vsx_enabled)) { \
182 gen_exception(ctx, POWERPC_EXCP_VSXU); \
183 return; \
184 } \
185 gen_set_access_type(ctx, ACCESS_INT); \
186 EA = tcg_temp_new(); \
187 gen_addr_reg_index(ctx, EA); \
188 gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
189 tcg_temp_free(EA); \
190 }
191
192 VSX_STORE_SCALAR(stxsdx, st64_i64)
193
194 VSX_STORE_SCALAR(stxsibx, st8_i64)
195 VSX_STORE_SCALAR(stxsihx, st16_i64)
196 VSX_STORE_SCALAR(stxsiwx, st32_i64)
197 VSX_STORE_SCALAR(stxsspx, st32fs)
198
199 static void gen_stxvd2x(DisasContext *ctx)
200 {
201 TCGv EA;
202 if (unlikely(!ctx->vsx_enabled)) {
203 gen_exception(ctx, POWERPC_EXCP_VSXU);
204 return;
205 }
206 gen_set_access_type(ctx, ACCESS_INT);
207 EA = tcg_temp_new();
208 gen_addr_reg_index(ctx, EA);
209 gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
210 tcg_gen_addi_tl(EA, EA, 8);
211 gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
212 tcg_temp_free(EA);
213 }
214
215 static void gen_stxvw4x(DisasContext *ctx)
216 {
217 TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
218 TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
219 TCGv EA;
220 if (unlikely(!ctx->vsx_enabled)) {
221 gen_exception(ctx, POWERPC_EXCP_VSXU);
222 return;
223 }
224 gen_set_access_type(ctx, ACCESS_INT);
225 EA = tcg_temp_new();
226 gen_addr_reg_index(ctx, EA);
227 if (ctx->le_mode) {
228 TCGv_i64 t0 = tcg_temp_new_i64();
229 TCGv_i64 t1 = tcg_temp_new_i64();
230
231 tcg_gen_shri_i64(t0, xsh, 32);
232 tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
233 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
234 tcg_gen_addi_tl(EA, EA, 8);
235 tcg_gen_shri_i64(t0, xsl, 32);
236 tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
237 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
238 tcg_temp_free_i64(t0);
239 tcg_temp_free_i64(t1);
240 } else {
241 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
242 tcg_gen_addi_tl(EA, EA, 8);
243 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
244 }
245 tcg_temp_free(EA);
246 }
247
248 static void gen_stxvh8x(DisasContext *ctx)
249 {
250 TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
251 TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
252 TCGv EA;
253
254 if (unlikely(!ctx->vsx_enabled)) {
255 gen_exception(ctx, POWERPC_EXCP_VSXU);
256 return;
257 }
258 gen_set_access_type(ctx, ACCESS_INT);
259 EA = tcg_temp_new();
260 gen_addr_reg_index(ctx, EA);
261 if (ctx->le_mode) {
262 TCGv_i64 outh = tcg_temp_new_i64();
263 TCGv_i64 outl = tcg_temp_new_i64();
264
265 gen_bswap16x8(outh, outl, xsh, xsl);
266 tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
267 tcg_gen_addi_tl(EA, EA, 8);
268 tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
269 tcg_temp_free_i64(outh);
270 tcg_temp_free_i64(outl);
271 } else {
272 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
273 tcg_gen_addi_tl(EA, EA, 8);
274 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
275 }
276 tcg_temp_free(EA);
277 }
278
279 static void gen_stxvb16x(DisasContext *ctx)
280 {
281 TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
282 TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
283 TCGv EA;
284
285 if (unlikely(!ctx->vsx_enabled)) {
286 gen_exception(ctx, POWERPC_EXCP_VSXU);
287 return;
288 }
289 gen_set_access_type(ctx, ACCESS_INT);
290 EA = tcg_temp_new();
291 gen_addr_reg_index(ctx, EA);
292 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
293 tcg_gen_addi_tl(EA, EA, 8);
294 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
295 tcg_temp_free(EA);
296 }
297
298 #define MV_VSRW(name, tcgop1, tcgop2, target, source) \
299 static void gen_##name(DisasContext *ctx) \
300 { \
301 if (xS(ctx->opcode) < 32) { \
302 if (unlikely(!ctx->fpu_enabled)) { \
303 gen_exception(ctx, POWERPC_EXCP_FPU); \
304 return; \
305 } \
306 } else { \
307 if (unlikely(!ctx->altivec_enabled)) { \
308 gen_exception(ctx, POWERPC_EXCP_VPU); \
309 return; \
310 } \
311 } \
312 TCGv_i64 tmp = tcg_temp_new_i64(); \
313 tcg_gen_##tcgop1(tmp, source); \
314 tcg_gen_##tcgop2(target, tmp); \
315 tcg_temp_free_i64(tmp); \
316 }
317
318
319 MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
320 cpu_vsrh(xS(ctx->opcode)))
321 MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
322 cpu_gpr[rA(ctx->opcode)])
323 MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
324 cpu_gpr[rA(ctx->opcode)])
325
326 #if defined(TARGET_PPC64)
327 #define MV_VSRD(name, target, source) \
328 static void gen_##name(DisasContext *ctx) \
329 { \
330 if (xS(ctx->opcode) < 32) { \
331 if (unlikely(!ctx->fpu_enabled)) { \
332 gen_exception(ctx, POWERPC_EXCP_FPU); \
333 return; \
334 } \
335 } else { \
336 if (unlikely(!ctx->altivec_enabled)) { \
337 gen_exception(ctx, POWERPC_EXCP_VPU); \
338 return; \
339 } \
340 } \
341 tcg_gen_mov_i64(target, source); \
342 }
343
344 MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
345 MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
346
347 static void gen_mfvsrld(DisasContext *ctx)
348 {
349 if (xS(ctx->opcode) < 32) {
350 if (unlikely(!ctx->vsx_enabled)) {
351 gen_exception(ctx, POWERPC_EXCP_VSXU);
352 return;
353 }
354 } else {
355 if (unlikely(!ctx->altivec_enabled)) {
356 gen_exception(ctx, POWERPC_EXCP_VPU);
357 return;
358 }
359 }
360
361 tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], cpu_vsrl(xS(ctx->opcode)));
362 }
363
364 static void gen_mtvsrdd(DisasContext *ctx)
365 {
366 if (xT(ctx->opcode) < 32) {
367 if (unlikely(!ctx->vsx_enabled)) {
368 gen_exception(ctx, POWERPC_EXCP_VSXU);
369 return;
370 }
371 } else {
372 if (unlikely(!ctx->altivec_enabled)) {
373 gen_exception(ctx, POWERPC_EXCP_VPU);
374 return;
375 }
376 }
377
378 if (!rA(ctx->opcode)) {
379 tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0);
380 } else {
381 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]);
382 }
383
384 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rB(ctx->opcode)]);
385 }
386
387 static void gen_mtvsrws(DisasContext *ctx)
388 {
389 if (xT(ctx->opcode) < 32) {
390 if (unlikely(!ctx->vsx_enabled)) {
391 gen_exception(ctx, POWERPC_EXCP_VSXU);
392 return;
393 }
394 } else {
395 if (unlikely(!ctx->altivec_enabled)) {
396 gen_exception(ctx, POWERPC_EXCP_VPU);
397 return;
398 }
399 }
400
401 tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)],
402 cpu_gpr[rA(ctx->opcode)], 32, 32);
403 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xT(ctx->opcode)));
404 }
405
406 #endif
407
408 static void gen_xxpermdi(DisasContext *ctx)
409 {
410 if (unlikely(!ctx->vsx_enabled)) {
411 gen_exception(ctx, POWERPC_EXCP_VSXU);
412 return;
413 }
414
415 if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
416 (xT(ctx->opcode) == xB(ctx->opcode)))) {
417 TCGv_i64 xh, xl;
418
419 xh = tcg_temp_new_i64();
420 xl = tcg_temp_new_i64();
421
422 if ((DM(ctx->opcode) & 2) == 0) {
423 tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
424 } else {
425 tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
426 }
427 if ((DM(ctx->opcode) & 1) == 0) {
428 tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
429 } else {
430 tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
431 }
432
433 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
434 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
435
436 tcg_temp_free_i64(xh);
437 tcg_temp_free_i64(xl);
438 } else {
439 if ((DM(ctx->opcode) & 2) == 0) {
440 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
441 } else {
442 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
443 }
444 if ((DM(ctx->opcode) & 1) == 0) {
445 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
446 } else {
447 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
448 }
449 }
450 }
451
452 #define OP_ABS 1
453 #define OP_NABS 2
454 #define OP_NEG 3
455 #define OP_CPSGN 4
456 #define SGN_MASK_DP 0x8000000000000000ull
457 #define SGN_MASK_SP 0x8000000080000000ull
458
459 #define VSX_SCALAR_MOVE(name, op, sgn_mask) \
460 static void glue(gen_, name)(DisasContext * ctx) \
461 { \
462 TCGv_i64 xb, sgm; \
463 if (unlikely(!ctx->vsx_enabled)) { \
464 gen_exception(ctx, POWERPC_EXCP_VSXU); \
465 return; \
466 } \
467 xb = tcg_temp_new_i64(); \
468 sgm = tcg_temp_new_i64(); \
469 tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
470 tcg_gen_movi_i64(sgm, sgn_mask); \
471 switch (op) { \
472 case OP_ABS: { \
473 tcg_gen_andc_i64(xb, xb, sgm); \
474 break; \
475 } \
476 case OP_NABS: { \
477 tcg_gen_or_i64(xb, xb, sgm); \
478 break; \
479 } \
480 case OP_NEG: { \
481 tcg_gen_xor_i64(xb, xb, sgm); \
482 break; \
483 } \
484 case OP_CPSGN: { \
485 TCGv_i64 xa = tcg_temp_new_i64(); \
486 tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
487 tcg_gen_and_i64(xa, xa, sgm); \
488 tcg_gen_andc_i64(xb, xb, sgm); \
489 tcg_gen_or_i64(xb, xb, xa); \
490 tcg_temp_free_i64(xa); \
491 break; \
492 } \
493 } \
494 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
495 tcg_temp_free_i64(xb); \
496 tcg_temp_free_i64(sgm); \
497 }
498
499 VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
500 VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
501 VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
502 VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
503
504 #define VSX_VECTOR_MOVE(name, op, sgn_mask) \
505 static void glue(gen_, name)(DisasContext * ctx) \
506 { \
507 TCGv_i64 xbh, xbl, sgm; \
508 if (unlikely(!ctx->vsx_enabled)) { \
509 gen_exception(ctx, POWERPC_EXCP_VSXU); \
510 return; \
511 } \
512 xbh = tcg_temp_new_i64(); \
513 xbl = tcg_temp_new_i64(); \
514 sgm = tcg_temp_new_i64(); \
515 tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
516 tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
517 tcg_gen_movi_i64(sgm, sgn_mask); \
518 switch (op) { \
519 case OP_ABS: { \
520 tcg_gen_andc_i64(xbh, xbh, sgm); \
521 tcg_gen_andc_i64(xbl, xbl, sgm); \
522 break; \
523 } \
524 case OP_NABS: { \
525 tcg_gen_or_i64(xbh, xbh, sgm); \
526 tcg_gen_or_i64(xbl, xbl, sgm); \
527 break; \
528 } \
529 case OP_NEG: { \
530 tcg_gen_xor_i64(xbh, xbh, sgm); \
531 tcg_gen_xor_i64(xbl, xbl, sgm); \
532 break; \
533 } \
534 case OP_CPSGN: { \
535 TCGv_i64 xah = tcg_temp_new_i64(); \
536 TCGv_i64 xal = tcg_temp_new_i64(); \
537 tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
538 tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
539 tcg_gen_and_i64(xah, xah, sgm); \
540 tcg_gen_and_i64(xal, xal, sgm); \
541 tcg_gen_andc_i64(xbh, xbh, sgm); \
542 tcg_gen_andc_i64(xbl, xbl, sgm); \
543 tcg_gen_or_i64(xbh, xbh, xah); \
544 tcg_gen_or_i64(xbl, xbl, xal); \
545 tcg_temp_free_i64(xah); \
546 tcg_temp_free_i64(xal); \
547 break; \
548 } \
549 } \
550 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
551 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
552 tcg_temp_free_i64(xbh); \
553 tcg_temp_free_i64(xbl); \
554 tcg_temp_free_i64(sgm); \
555 }
556
557 VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
558 VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
559 VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
560 VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
561 VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
562 VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
563 VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
564 VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
565
566 #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
567 static void gen_##name(DisasContext * ctx) \
568 { \
569 TCGv_i32 opc; \
570 if (unlikely(!ctx->vsx_enabled)) { \
571 gen_exception(ctx, POWERPC_EXCP_VSXU); \
572 return; \
573 } \
574 opc = tcg_const_i32(ctx->opcode); \
575 gen_helper_##name(cpu_env, opc); \
576 tcg_temp_free_i32(opc); \
577 }
578
579 #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
580 static void gen_##name(DisasContext * ctx) \
581 { \
582 if (unlikely(!ctx->vsx_enabled)) { \
583 gen_exception(ctx, POWERPC_EXCP_VSXU); \
584 return; \
585 } \
586 gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
587 cpu_vsrh(xB(ctx->opcode))); \
588 }
589
590 GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
591 GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
592 GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
593 GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
594 GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
595 GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
596 GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
597 GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
598 GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
599 GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
600 GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
601 GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
602 GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
603 GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
604 GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
605 GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
606 GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
607 GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
608 GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
609 GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
610 GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
611 GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
612 GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
613 GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
614 GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
615 GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
616 GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
617 GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
618 GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
619 GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
620 GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
621 GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
622 GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
623 GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
624 GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
625 GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
626 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
627
628 GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
629 GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
630 GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
631 GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
632 GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
633 GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
634 GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
635 GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
636 GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
637 GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
638 GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
639 GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
640 GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
641 GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
642 GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
643 GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
644 GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
645
646 GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
647 GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
648 GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
649 GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
650 GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
651 GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
652 GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
653 GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
654 GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
655 GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
656 GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
657 GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
658 GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
659 GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
660 GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
661 GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
662 GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
663 GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
664 GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
665 GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
666 GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
667 GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
668 GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
669 GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
670 GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
671 GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
672 GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
673 GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
674 GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
675 GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
676 GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
677 GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
678 GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
679 GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
680 GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
681 GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
682
683 GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
684 GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
685 GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
686 GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
687 GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
688 GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
689 GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
690 GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
691 GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
692 GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
693 GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
694 GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
695 GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
696 GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
697 GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
698 GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
699 GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
700 GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
701 GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
702 GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
703 GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
704 GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
705 GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
706 GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
707 GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
708 GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
709 GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
710 GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
711 GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
712 GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
713 GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
714 GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
715 GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
716 GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
717 GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
718 GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
719
720 #define VSX_LOGICAL(name, tcg_op) \
721 static void glue(gen_, name)(DisasContext * ctx) \
722 { \
723 if (unlikely(!ctx->vsx_enabled)) { \
724 gen_exception(ctx, POWERPC_EXCP_VSXU); \
725 return; \
726 } \
727 tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
728 cpu_vsrh(xB(ctx->opcode))); \
729 tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
730 cpu_vsrl(xB(ctx->opcode))); \
731 }
732
733 VSX_LOGICAL(xxland, tcg_gen_and_i64)
734 VSX_LOGICAL(xxlandc, tcg_gen_andc_i64)
735 VSX_LOGICAL(xxlor, tcg_gen_or_i64)
736 VSX_LOGICAL(xxlxor, tcg_gen_xor_i64)
737 VSX_LOGICAL(xxlnor, tcg_gen_nor_i64)
738 VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64)
739 VSX_LOGICAL(xxlnand, tcg_gen_nand_i64)
740 VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
741
742 #define VSX_XXMRG(name, high) \
743 static void glue(gen_, name)(DisasContext * ctx) \
744 { \
745 TCGv_i64 a0, a1, b0, b1; \
746 if (unlikely(!ctx->vsx_enabled)) { \
747 gen_exception(ctx, POWERPC_EXCP_VSXU); \
748 return; \
749 } \
750 a0 = tcg_temp_new_i64(); \
751 a1 = tcg_temp_new_i64(); \
752 b0 = tcg_temp_new_i64(); \
753 b1 = tcg_temp_new_i64(); \
754 if (high) { \
755 tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
756 tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
757 tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
758 tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
759 } else { \
760 tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
761 tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
762 tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
763 tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
764 } \
765 tcg_gen_shri_i64(a0, a0, 32); \
766 tcg_gen_shri_i64(b0, b0, 32); \
767 tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
768 b0, a0, 32, 32); \
769 tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
770 b1, a1, 32, 32); \
771 tcg_temp_free_i64(a0); \
772 tcg_temp_free_i64(a1); \
773 tcg_temp_free_i64(b0); \
774 tcg_temp_free_i64(b1); \
775 }
776
777 VSX_XXMRG(xxmrghw, 1)
778 VSX_XXMRG(xxmrglw, 0)
779
780 static void gen_xxsel(DisasContext * ctx)
781 {
782 TCGv_i64 a, b, c;
783 if (unlikely(!ctx->vsx_enabled)) {
784 gen_exception(ctx, POWERPC_EXCP_VSXU);
785 return;
786 }
787 a = tcg_temp_new_i64();
788 b = tcg_temp_new_i64();
789 c = tcg_temp_new_i64();
790
791 tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
792 tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
793 tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
794
795 tcg_gen_and_i64(b, b, c);
796 tcg_gen_andc_i64(a, a, c);
797 tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
798
799 tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
800 tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
801 tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
802
803 tcg_gen_and_i64(b, b, c);
804 tcg_gen_andc_i64(a, a, c);
805 tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
806
807 tcg_temp_free_i64(a);
808 tcg_temp_free_i64(b);
809 tcg_temp_free_i64(c);
810 }
811
812 static void gen_xxspltw(DisasContext *ctx)
813 {
814 TCGv_i64 b, b2;
815 TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
816 cpu_vsrl(xB(ctx->opcode)) :
817 cpu_vsrh(xB(ctx->opcode));
818
819 if (unlikely(!ctx->vsx_enabled)) {
820 gen_exception(ctx, POWERPC_EXCP_VSXU);
821 return;
822 }
823
824 b = tcg_temp_new_i64();
825 b2 = tcg_temp_new_i64();
826
827 if (UIM(ctx->opcode) & 1) {
828 tcg_gen_ext32u_i64(b, vsr);
829 } else {
830 tcg_gen_shri_i64(b, vsr, 32);
831 }
832
833 tcg_gen_shli_i64(b2, b, 32);
834 tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
835 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
836
837 tcg_temp_free_i64(b);
838 tcg_temp_free_i64(b2);
839 }
840
841 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
842
843 static void gen_xxspltib(DisasContext *ctx)
844 {
845 unsigned char uim8 = IMM8(ctx->opcode);
846 if (xS(ctx->opcode) < 32) {
847 if (unlikely(!ctx->altivec_enabled)) {
848 gen_exception(ctx, POWERPC_EXCP_VPU);
849 return;
850 }
851 } else {
852 if (unlikely(!ctx->vsx_enabled)) {
853 gen_exception(ctx, POWERPC_EXCP_VSXU);
854 return;
855 }
856 }
857 tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
858 tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
859 }
860
861 static void gen_xxsldwi(DisasContext *ctx)
862 {
863 TCGv_i64 xth, xtl;
864 if (unlikely(!ctx->vsx_enabled)) {
865 gen_exception(ctx, POWERPC_EXCP_VSXU);
866 return;
867 }
868 xth = tcg_temp_new_i64();
869 xtl = tcg_temp_new_i64();
870
871 switch (SHW(ctx->opcode)) {
872 case 0: {
873 tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
874 tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
875 break;
876 }
877 case 1: {
878 TCGv_i64 t0 = tcg_temp_new_i64();
879 tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
880 tcg_gen_shli_i64(xth, xth, 32);
881 tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
882 tcg_gen_shri_i64(t0, t0, 32);
883 tcg_gen_or_i64(xth, xth, t0);
884 tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
885 tcg_gen_shli_i64(xtl, xtl, 32);
886 tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
887 tcg_gen_shri_i64(t0, t0, 32);
888 tcg_gen_or_i64(xtl, xtl, t0);
889 tcg_temp_free_i64(t0);
890 break;
891 }
892 case 2: {
893 tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
894 tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
895 break;
896 }
897 case 3: {
898 TCGv_i64 t0 = tcg_temp_new_i64();
899 tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
900 tcg_gen_shli_i64(xth, xth, 32);
901 tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
902 tcg_gen_shri_i64(t0, t0, 32);
903 tcg_gen_or_i64(xth, xth, t0);
904 tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
905 tcg_gen_shli_i64(xtl, xtl, 32);
906 tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
907 tcg_gen_shri_i64(t0, t0, 32);
908 tcg_gen_or_i64(xtl, xtl, t0);
909 tcg_temp_free_i64(t0);
910 break;
911 }
912 }
913
914 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
915 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
916
917 tcg_temp_free_i64(xth);
918 tcg_temp_free_i64(xtl);
919 }
920
921 #undef GEN_XX2FORM
922 #undef GEN_XX3FORM
923 #undef GEN_XX2IFORM
924 #undef GEN_XX3_RC_FORM
925 #undef GEN_XX3FORM_DM
926 #undef VSX_LOGICAL