]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/translate/vmx-impl.c.inc
7af6d7217d92ae2727a4b3d58f92632044972848
[mirror_qemu.git] / target / ppc / translate / vmx-impl.c.inc
1 /*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7 /*** Altivec vector extension ***/
8 /* Altivec registers moves */
9
10 static inline TCGv_ptr gen_avr_ptr(int reg)
11 {
12 TCGv_ptr r = tcg_temp_new_ptr();
13 tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
14 return r;
15 }
16
17 #define GEN_VR_LDX(name, opc2, opc3) \
18 static void glue(gen_, name)(DisasContext *ctx) \
19 { \
20 TCGv EA; \
21 TCGv_i64 avr; \
22 if (unlikely(!ctx->altivec_enabled)) { \
23 gen_exception(ctx, POWERPC_EXCP_VPU); \
24 return; \
25 } \
26 gen_set_access_type(ctx, ACCESS_INT); \
27 avr = tcg_temp_new_i64(); \
28 EA = tcg_temp_new(); \
29 gen_addr_reg_index(ctx, EA); \
30 tcg_gen_andi_tl(EA, EA, ~0xf); \
31 /* \
32 * We only need to swap high and low halves. gen_qemu_ld64_i64 \
33 * does necessary 64-bit byteswap already. \
34 */ \
35 if (ctx->le_mode) { \
36 gen_qemu_ld64_i64(ctx, avr, EA); \
37 set_avr64(rD(ctx->opcode), avr, false); \
38 tcg_gen_addi_tl(EA, EA, 8); \
39 gen_qemu_ld64_i64(ctx, avr, EA); \
40 set_avr64(rD(ctx->opcode), avr, true); \
41 } else { \
42 gen_qemu_ld64_i64(ctx, avr, EA); \
43 set_avr64(rD(ctx->opcode), avr, true); \
44 tcg_gen_addi_tl(EA, EA, 8); \
45 gen_qemu_ld64_i64(ctx, avr, EA); \
46 set_avr64(rD(ctx->opcode), avr, false); \
47 } \
48 }
49
50 #define GEN_VR_STX(name, opc2, opc3) \
51 static void gen_st##name(DisasContext *ctx) \
52 { \
53 TCGv EA; \
54 TCGv_i64 avr; \
55 if (unlikely(!ctx->altivec_enabled)) { \
56 gen_exception(ctx, POWERPC_EXCP_VPU); \
57 return; \
58 } \
59 gen_set_access_type(ctx, ACCESS_INT); \
60 avr = tcg_temp_new_i64(); \
61 EA = tcg_temp_new(); \
62 gen_addr_reg_index(ctx, EA); \
63 tcg_gen_andi_tl(EA, EA, ~0xf); \
64 /* \
65 * We only need to swap high and low halves. gen_qemu_st64_i64 \
66 * does necessary 64-bit byteswap already. \
67 */ \
68 if (ctx->le_mode) { \
69 get_avr64(avr, rD(ctx->opcode), false); \
70 gen_qemu_st64_i64(ctx, avr, EA); \
71 tcg_gen_addi_tl(EA, EA, 8); \
72 get_avr64(avr, rD(ctx->opcode), true); \
73 gen_qemu_st64_i64(ctx, avr, EA); \
74 } else { \
75 get_avr64(avr, rD(ctx->opcode), true); \
76 gen_qemu_st64_i64(ctx, avr, EA); \
77 tcg_gen_addi_tl(EA, EA, 8); \
78 get_avr64(avr, rD(ctx->opcode), false); \
79 gen_qemu_st64_i64(ctx, avr, EA); \
80 } \
81 }
82
83 #define GEN_VR_LVE(name, opc2, opc3, size) \
84 static void gen_lve##name(DisasContext *ctx) \
85 { \
86 TCGv EA; \
87 TCGv_ptr rs; \
88 if (unlikely(!ctx->altivec_enabled)) { \
89 gen_exception(ctx, POWERPC_EXCP_VPU); \
90 return; \
91 } \
92 gen_set_access_type(ctx, ACCESS_INT); \
93 EA = tcg_temp_new(); \
94 gen_addr_reg_index(ctx, EA); \
95 if (size > 1) { \
96 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
97 } \
98 rs = gen_avr_ptr(rS(ctx->opcode)); \
99 gen_helper_lve##name(cpu_env, rs, EA); \
100 }
101
102 #define GEN_VR_STVE(name, opc2, opc3, size) \
103 static void gen_stve##name(DisasContext *ctx) \
104 { \
105 TCGv EA; \
106 TCGv_ptr rs; \
107 if (unlikely(!ctx->altivec_enabled)) { \
108 gen_exception(ctx, POWERPC_EXCP_VPU); \
109 return; \
110 } \
111 gen_set_access_type(ctx, ACCESS_INT); \
112 EA = tcg_temp_new(); \
113 gen_addr_reg_index(ctx, EA); \
114 if (size > 1) { \
115 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
116 } \
117 rs = gen_avr_ptr(rS(ctx->opcode)); \
118 gen_helper_stve##name(cpu_env, rs, EA); \
119 }
120
121 GEN_VR_LDX(lvx, 0x07, 0x03);
122 /* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
123 GEN_VR_LDX(lvxl, 0x07, 0x0B);
124
125 GEN_VR_LVE(bx, 0x07, 0x00, 1);
126 GEN_VR_LVE(hx, 0x07, 0x01, 2);
127 GEN_VR_LVE(wx, 0x07, 0x02, 4);
128
129 GEN_VR_STX(svx, 0x07, 0x07);
130 /* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
131 GEN_VR_STX(svxl, 0x07, 0x0F);
132
133 GEN_VR_STVE(bx, 0x07, 0x04, 1);
134 GEN_VR_STVE(hx, 0x07, 0x05, 2);
135 GEN_VR_STVE(wx, 0x07, 0x06, 4);
136
137 static void gen_mfvscr(DisasContext *ctx)
138 {
139 TCGv_i32 t;
140 TCGv_i64 avr;
141 if (unlikely(!ctx->altivec_enabled)) {
142 gen_exception(ctx, POWERPC_EXCP_VPU);
143 return;
144 }
145 avr = tcg_temp_new_i64();
146 tcg_gen_movi_i64(avr, 0);
147 set_avr64(rD(ctx->opcode), avr, true);
148 t = tcg_temp_new_i32();
149 gen_helper_mfvscr(t, cpu_env);
150 tcg_gen_extu_i32_i64(avr, t);
151 set_avr64(rD(ctx->opcode), avr, false);
152 }
153
154 static void gen_mtvscr(DisasContext *ctx)
155 {
156 TCGv_i32 val;
157 int bofs;
158
159 if (unlikely(!ctx->altivec_enabled)) {
160 gen_exception(ctx, POWERPC_EXCP_VPU);
161 return;
162 }
163
164 val = tcg_temp_new_i32();
165 bofs = avr_full_offset(rB(ctx->opcode));
166 #if HOST_BIG_ENDIAN
167 bofs += 3 * 4;
168 #endif
169
170 tcg_gen_ld_i32(val, cpu_env, bofs);
171 gen_helper_mtvscr(cpu_env, val);
172 }
173
174 static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
175 {
176 TCGv_i64 t0;
177 TCGv_i64 t1;
178 TCGv_i64 t2;
179 TCGv_i64 avr;
180 TCGv_i64 ten, z;
181
182 if (unlikely(!ctx->altivec_enabled)) {
183 gen_exception(ctx, POWERPC_EXCP_VPU);
184 return;
185 }
186
187 t0 = tcg_temp_new_i64();
188 t1 = tcg_temp_new_i64();
189 t2 = tcg_temp_new_i64();
190 avr = tcg_temp_new_i64();
191 ten = tcg_constant_i64(10);
192 z = tcg_constant_i64(0);
193
194 if (add_cin) {
195 get_avr64(avr, rA(ctx->opcode), false);
196 tcg_gen_mulu2_i64(t0, t1, avr, ten);
197 get_avr64(avr, rB(ctx->opcode), false);
198 tcg_gen_andi_i64(t2, avr, 0xF);
199 tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
200 set_avr64(rD(ctx->opcode), avr, false);
201 } else {
202 get_avr64(avr, rA(ctx->opcode), false);
203 tcg_gen_mulu2_i64(avr, t2, avr, ten);
204 set_avr64(rD(ctx->opcode), avr, false);
205 }
206
207 if (ret_carry) {
208 get_avr64(avr, rA(ctx->opcode), true);
209 tcg_gen_mulu2_i64(t0, t1, avr, ten);
210 tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
211 set_avr64(rD(ctx->opcode), avr, false);
212 set_avr64(rD(ctx->opcode), z, true);
213 } else {
214 get_avr64(avr, rA(ctx->opcode), true);
215 tcg_gen_mul_i64(t0, avr, ten);
216 tcg_gen_add_i64(avr, t0, t2);
217 set_avr64(rD(ctx->opcode), avr, true);
218 }
219 }
220
221 #define GEN_VX_VMUL10(name, add_cin, ret_carry) \
222 static void glue(gen_, name)(DisasContext *ctx) \
223 { gen_vx_vmul10(ctx, add_cin, ret_carry); }
224
225 GEN_VX_VMUL10(vmul10uq, 0, 0);
226 GEN_VX_VMUL10(vmul10euq, 1, 0);
227 GEN_VX_VMUL10(vmul10cuq, 0, 1);
228 GEN_VX_VMUL10(vmul10ecuq, 1, 1);
229
230 #define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \
231 static void glue(gen_, name)(DisasContext *ctx) \
232 { \
233 if (unlikely(!ctx->altivec_enabled)) { \
234 gen_exception(ctx, POWERPC_EXCP_VPU); \
235 return; \
236 } \
237 \
238 tcg_op(vece, \
239 avr_full_offset(rD(ctx->opcode)), \
240 avr_full_offset(rA(ctx->opcode)), \
241 avr_full_offset(rB(ctx->opcode)), \
242 16, 16); \
243 }
244
245 /* Logical operations */
246 GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16);
247 GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17);
248 GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18);
249 GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19);
250 GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20);
251 GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26);
252 GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
253 GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
254
255 #define GEN_VXFORM(name, opc2, opc3) \
256 static void glue(gen_, name)(DisasContext *ctx) \
257 { \
258 TCGv_ptr ra, rb, rd; \
259 if (unlikely(!ctx->altivec_enabled)) { \
260 gen_exception(ctx, POWERPC_EXCP_VPU); \
261 return; \
262 } \
263 ra = gen_avr_ptr(rA(ctx->opcode)); \
264 rb = gen_avr_ptr(rB(ctx->opcode)); \
265 rd = gen_avr_ptr(rD(ctx->opcode)); \
266 gen_helper_##name(rd, ra, rb); \
267 }
268
269 #define GEN_VXFORM_TRANS(name, opc2, opc3) \
270 static void glue(gen_, name)(DisasContext *ctx) \
271 { \
272 if (unlikely(!ctx->altivec_enabled)) { \
273 gen_exception(ctx, POWERPC_EXCP_VPU); \
274 return; \
275 } \
276 trans_##name(ctx); \
277 }
278
279 #define GEN_VXFORM_ENV(name, opc2, opc3) \
280 static void glue(gen_, name)(DisasContext *ctx) \
281 { \
282 TCGv_ptr ra, rb, rd; \
283 if (unlikely(!ctx->altivec_enabled)) { \
284 gen_exception(ctx, POWERPC_EXCP_VPU); \
285 return; \
286 } \
287 ra = gen_avr_ptr(rA(ctx->opcode)); \
288 rb = gen_avr_ptr(rB(ctx->opcode)); \
289 rd = gen_avr_ptr(rD(ctx->opcode)); \
290 gen_helper_##name(cpu_env, rd, ra, rb); \
291 }
292
293 #define GEN_VXFORM3(name, opc2, opc3) \
294 static void glue(gen_, name)(DisasContext *ctx) \
295 { \
296 TCGv_ptr ra, rb, rc, rd; \
297 if (unlikely(!ctx->altivec_enabled)) { \
298 gen_exception(ctx, POWERPC_EXCP_VPU); \
299 return; \
300 } \
301 ra = gen_avr_ptr(rA(ctx->opcode)); \
302 rb = gen_avr_ptr(rB(ctx->opcode)); \
303 rc = gen_avr_ptr(rC(ctx->opcode)); \
304 rd = gen_avr_ptr(rD(ctx->opcode)); \
305 gen_helper_##name(rd, ra, rb, rc); \
306 }
307
308 /*
309 * Support for Altivec instruction pairs that use bit 31 (Rc) as
310 * an opcode bit. In general, these pairs come from different
311 * versions of the ISA, so we must also support a pair of flags for
312 * each instruction.
313 */
314 #define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
315 static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
316 { \
317 if ((Rc(ctx->opcode) == 0) && \
318 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
319 gen_##name0(ctx); \
320 } else if ((Rc(ctx->opcode) == 1) && \
321 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
322 gen_##name1(ctx); \
323 } else { \
324 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
325 } \
326 }
327
328 /*
329 * We use this macro if one instruction is realized with direct
330 * translation, and second one with helper.
331 */
332 #define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
333 static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
334 { \
335 if ((Rc(ctx->opcode) == 0) && \
336 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
337 if (unlikely(!ctx->altivec_enabled)) { \
338 gen_exception(ctx, POWERPC_EXCP_VPU); \
339 return; \
340 } \
341 trans_##name0(ctx); \
342 } else if ((Rc(ctx->opcode) == 1) && \
343 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
344 gen_##name1(ctx); \
345 } else { \
346 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
347 } \
348 }
349
350 /* Adds support to provide invalid mask */
351 #define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \
352 name1, flg1, flg2_1, inval1) \
353 static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
354 { \
355 if ((Rc(ctx->opcode) == 0) && \
356 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \
357 !(ctx->opcode & inval0)) { \
358 gen_##name0(ctx); \
359 } else if ((Rc(ctx->opcode) == 1) && \
360 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
361 !(ctx->opcode & inval1)) { \
362 gen_##name1(ctx); \
363 } else { \
364 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
365 } \
366 }
367
368 #define GEN_VXFORM_HETRO(name, opc2, opc3) \
369 static void glue(gen_, name)(DisasContext *ctx) \
370 { \
371 TCGv_ptr rb; \
372 if (unlikely(!ctx->altivec_enabled)) { \
373 gen_exception(ctx, POWERPC_EXCP_VPU); \
374 return; \
375 } \
376 rb = gen_avr_ptr(rB(ctx->opcode)); \
377 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
378 }
379
380 GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
381 GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \
382 vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
383 GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
384 GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \
385 vmul10ecuq, PPC_NONE, PPC2_ISA300)
386 GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
387 GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
388 GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
389 GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
390 GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
391 GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
392 GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0);
393 GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1);
394 GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2);
395 GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3);
396 GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4);
397 GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5);
398 GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6);
399 GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7);
400 GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8);
401 GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9);
402 GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10);
403 GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11);
404 GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
405 GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
406 GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
407 GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
408 GEN_VXFORM(vmrghb, 6, 0);
409 GEN_VXFORM(vmrghh, 6, 1);
410 GEN_VXFORM(vmrghw, 6, 2);
411 GEN_VXFORM(vmrglb, 6, 4);
412 GEN_VXFORM(vmrglh, 6, 5);
413 GEN_VXFORM(vmrglw, 6, 6);
414
415 static void trans_vmrgew(DisasContext *ctx)
416 {
417 int VT = rD(ctx->opcode);
418 int VA = rA(ctx->opcode);
419 int VB = rB(ctx->opcode);
420 TCGv_i64 tmp = tcg_temp_new_i64();
421 TCGv_i64 avr = tcg_temp_new_i64();
422
423 get_avr64(avr, VB, true);
424 tcg_gen_shri_i64(tmp, avr, 32);
425 get_avr64(avr, VA, true);
426 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
427 set_avr64(VT, avr, true);
428
429 get_avr64(avr, VB, false);
430 tcg_gen_shri_i64(tmp, avr, 32);
431 get_avr64(avr, VA, false);
432 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
433 set_avr64(VT, avr, false);
434 }
435
436 static void trans_vmrgow(DisasContext *ctx)
437 {
438 int VT = rD(ctx->opcode);
439 int VA = rA(ctx->opcode);
440 int VB = rB(ctx->opcode);
441 TCGv_i64 t0 = tcg_temp_new_i64();
442 TCGv_i64 t1 = tcg_temp_new_i64();
443 TCGv_i64 avr = tcg_temp_new_i64();
444
445 get_avr64(t0, VB, true);
446 get_avr64(t1, VA, true);
447 tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
448 set_avr64(VT, avr, true);
449
450 get_avr64(t0, VB, false);
451 get_avr64(t1, VA, false);
452 tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
453 set_avr64(VT, avr, false);
454 }
455
456 /*
457 * lvsl VRT,RA,RB - Load Vector for Shift Left
458 *
459 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
460 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
461 * Bytes sh:sh+15 of X are placed into vD.
462 */
463 static void trans_lvsl(DisasContext *ctx)
464 {
465 int VT = rD(ctx->opcode);
466 TCGv_i64 result = tcg_temp_new_i64();
467 TCGv_i64 sh = tcg_temp_new_i64();
468 TCGv EA = tcg_temp_new();
469
470 /* Get sh(from description) by anding EA with 0xf. */
471 gen_addr_reg_index(ctx, EA);
472 tcg_gen_extu_tl_i64(sh, EA);
473 tcg_gen_andi_i64(sh, sh, 0xfULL);
474
475 /*
476 * Create bytes sh:sh+7 of X(from description) and place them in
477 * higher doubleword of vD.
478 */
479 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
480 tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
481 set_avr64(VT, result, true);
482 /*
483 * Create bytes sh+8:sh+15 of X(from description) and place them in
484 * lower doubleword of vD.
485 */
486 tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
487 set_avr64(VT, result, false);
488 }
489
490 /*
491 * lvsr VRT,RA,RB - Load Vector for Shift Right
492 *
493 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
494 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
495 * Bytes (16-sh):(31-sh) of X are placed into vD.
496 */
497 static void trans_lvsr(DisasContext *ctx)
498 {
499 int VT = rD(ctx->opcode);
500 TCGv_i64 result = tcg_temp_new_i64();
501 TCGv_i64 sh = tcg_temp_new_i64();
502 TCGv EA = tcg_temp_new();
503
504
505 /* Get sh(from description) by anding EA with 0xf. */
506 gen_addr_reg_index(ctx, EA);
507 tcg_gen_extu_tl_i64(sh, EA);
508 tcg_gen_andi_i64(sh, sh, 0xfULL);
509
510 /*
511 * Create bytes (16-sh):(23-sh) of X(from description) and place them in
512 * higher doubleword of vD.
513 */
514 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
515 tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
516 set_avr64(VT, result, true);
517 /*
518 * Create bytes (24-sh):(32-sh) of X(from description) and place them in
519 * lower doubleword of vD.
520 */
521 tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
522 set_avr64(VT, result, false);
523 }
524
525 /*
526 * vsl VRT,VRA,VRB - Vector Shift Left
527 *
528 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
529 * Lowest 3 bits in each byte element of register vB must be identical or
530 * result is undefined.
531 */
532 static void trans_vsl(DisasContext *ctx)
533 {
534 int VT = rD(ctx->opcode);
535 int VA = rA(ctx->opcode);
536 int VB = rB(ctx->opcode);
537 TCGv_i64 avr = tcg_temp_new_i64();
538 TCGv_i64 sh = tcg_temp_new_i64();
539 TCGv_i64 carry = tcg_temp_new_i64();
540 TCGv_i64 tmp = tcg_temp_new_i64();
541
542 /* Place bits 125-127 of vB in 'sh'. */
543 get_avr64(avr, VB, false);
544 tcg_gen_andi_i64(sh, avr, 0x07ULL);
545
546 /*
547 * Save highest 'sh' bits of lower doubleword element of vA in variable
548 * 'carry' and perform shift on lower doubleword.
549 */
550 get_avr64(avr, VA, false);
551 tcg_gen_subfi_i64(tmp, 32, sh);
552 tcg_gen_shri_i64(carry, avr, 32);
553 tcg_gen_shr_i64(carry, carry, tmp);
554 tcg_gen_shl_i64(avr, avr, sh);
555 set_avr64(VT, avr, false);
556
557 /*
558 * Perform shift on higher doubleword element of vA and replace lowest
559 * 'sh' bits with 'carry'.
560 */
561 get_avr64(avr, VA, true);
562 tcg_gen_shl_i64(avr, avr, sh);
563 tcg_gen_or_i64(avr, avr, carry);
564 set_avr64(VT, avr, true);
565 }
566
567 /*
568 * vsr VRT,VRA,VRB - Vector Shift Right
569 *
570 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
571 * Lowest 3 bits in each byte element of register vB must be identical or
572 * result is undefined.
573 */
574 static void trans_vsr(DisasContext *ctx)
575 {
576 int VT = rD(ctx->opcode);
577 int VA = rA(ctx->opcode);
578 int VB = rB(ctx->opcode);
579 TCGv_i64 avr = tcg_temp_new_i64();
580 TCGv_i64 sh = tcg_temp_new_i64();
581 TCGv_i64 carry = tcg_temp_new_i64();
582 TCGv_i64 tmp = tcg_temp_new_i64();
583
584 /* Place bits 125-127 of vB in 'sh'. */
585 get_avr64(avr, VB, false);
586 tcg_gen_andi_i64(sh, avr, 0x07ULL);
587
588 /*
589 * Save lowest 'sh' bits of higher doubleword element of vA in variable
590 * 'carry' and perform shift on higher doubleword.
591 */
592 get_avr64(avr, VA, true);
593 tcg_gen_subfi_i64(tmp, 32, sh);
594 tcg_gen_shli_i64(carry, avr, 32);
595 tcg_gen_shl_i64(carry, carry, tmp);
596 tcg_gen_shr_i64(avr, avr, sh);
597 set_avr64(VT, avr, true);
598 /*
599 * Perform shift on lower doubleword element of vA and replace highest
600 * 'sh' bits with 'carry'.
601 */
602 get_avr64(avr, VA, false);
603 tcg_gen_shr_i64(avr, avr, sh);
604 tcg_gen_or_i64(avr, avr, carry);
605 set_avr64(VT, avr, false);
606 }
607
608 /*
609 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
610 *
611 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
612 * register are concatenated and placed into ith byte of appropriate doubleword
613 * element in destination register.
614 *
615 * Following solution is done for both doubleword elements of source register
616 * in parallel, in order to reduce the number of instructions needed(that's why
617 * arrays are used):
618 * First, both doubleword elements of source register vB are placed in
619 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
620 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
621 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
622 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
623 * have to be shifted right for 7 and 8 places, respectively, in order to get
624 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
625 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
626 * After first 8 iteration(first loop), all the first bits are in their final
627 * places, all second bits but second bit from eight byte are in their places...
628 * only 1 eight bit from eight byte is in it's place). In second loop we do all
629 * operations symmetrically, in order to get other half of bits in their final
630 * spots. Results for first and second doubleword elements are saved in
631 * result[0] and result[1] respectively. In the end those results are saved in
632 * appropriate doubleword element of destination register vD.
633 */
634 static void trans_vgbbd(DisasContext *ctx)
635 {
636 int VT = rD(ctx->opcode);
637 int VB = rB(ctx->opcode);
638 TCGv_i64 tmp = tcg_temp_new_i64();
639 uint64_t mask = 0x8040201008040201ULL;
640 int i, j;
641
642 TCGv_i64 result[2];
643 result[0] = tcg_temp_new_i64();
644 result[1] = tcg_temp_new_i64();
645 TCGv_i64 avr[2];
646 avr[0] = tcg_temp_new_i64();
647 avr[1] = tcg_temp_new_i64();
648 TCGv_i64 tcg_mask = tcg_temp_new_i64();
649
650 tcg_gen_movi_i64(tcg_mask, mask);
651 for (j = 0; j < 2; j++) {
652 get_avr64(avr[j], VB, j);
653 tcg_gen_and_i64(result[j], avr[j], tcg_mask);
654 }
655 for (i = 1; i < 8; i++) {
656 tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
657 for (j = 0; j < 2; j++) {
658 tcg_gen_shri_i64(tmp, avr[j], i * 7);
659 tcg_gen_and_i64(tmp, tmp, tcg_mask);
660 tcg_gen_or_i64(result[j], result[j], tmp);
661 }
662 }
663 for (i = 1; i < 8; i++) {
664 tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
665 for (j = 0; j < 2; j++) {
666 tcg_gen_shli_i64(tmp, avr[j], i * 7);
667 tcg_gen_and_i64(tmp, tmp, tcg_mask);
668 tcg_gen_or_i64(result[j], result[j], tmp);
669 }
670 }
671 for (j = 0; j < 2; j++) {
672 set_avr64(VT, result[j], j);
673 }
674 }
675
676 /*
677 * vclzw VRT,VRB - Vector Count Leading Zeros Word
678 *
679 * Counting the number of leading zero bits of each word element in source
680 * register and placing result in appropriate word element of destination
681 * register.
682 */
683 static void trans_vclzw(DisasContext *ctx)
684 {
685 int VT = rD(ctx->opcode);
686 int VB = rB(ctx->opcode);
687 TCGv_i32 tmp = tcg_temp_new_i32();
688 int i;
689
690 /* Perform count for every word element using tcg_gen_clzi_i32. */
691 for (i = 0; i < 4; i++) {
692 tcg_gen_ld_i32(tmp, cpu_env,
693 offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
694 tcg_gen_clzi_i32(tmp, tmp, 32);
695 tcg_gen_st_i32(tmp, cpu_env,
696 offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
697 }
698 }
699
700 /*
701 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
702 *
703 * Counting the number of leading zero bits of each doubleword element in source
704 * register and placing result in appropriate doubleword element of destination
705 * register.
706 */
707 static void trans_vclzd(DisasContext *ctx)
708 {
709 int VT = rD(ctx->opcode);
710 int VB = rB(ctx->opcode);
711 TCGv_i64 avr = tcg_temp_new_i64();
712
713 /* high doubleword */
714 get_avr64(avr, VB, true);
715 tcg_gen_clzi_i64(avr, avr, 64);
716 set_avr64(VT, avr, true);
717
718 /* low doubleword */
719 get_avr64(avr, VB, false);
720 tcg_gen_clzi_i64(avr, avr, 64);
721 set_avr64(VT, avr, false);
722 }
723
724 GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
725 GEN_VXFORM(vsrv, 2, 28);
726 GEN_VXFORM(vslv, 2, 29);
727 GEN_VXFORM(vslo, 6, 16);
728 GEN_VXFORM(vsro, 6, 17);
729
730 static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
731 void (*gen_gvec)(unsigned, uint32_t, uint32_t,
732 uint32_t, uint32_t, uint32_t))
733 {
734 REQUIRE_VECTOR(ctx);
735
736 gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
737 avr_full_offset(a->vrb), 16, 16);
738
739 return true;
740 }
741
742 TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
743 TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
744 TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
745 TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
746
747 TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
748 TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
749 TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
750 TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
751
752 TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
753 TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
754 TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
755 TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
756
757 TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
758 TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
759 TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
760 TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
761
762 static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
763 {
764 TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
765 t1 = tcg_temp_new_vec_matching(vrb),
766 t2 = tcg_temp_new_vec_matching(vrb),
767 ones = tcg_constant_vec_matching(vrb, vece, -1);
768
769 /* Extract b and e */
770 tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
771
772 tcg_gen_shri_vec(vece, t0, vrb, 16);
773 tcg_gen_and_vec(vece, t0, t0, t2);
774
775 tcg_gen_shri_vec(vece, t1, vrb, 8);
776 tcg_gen_and_vec(vece, t1, t1, t2);
777
778 /* Compare b and e to negate the mask where begin > end */
779 tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
780
781 /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
782 tcg_gen_shrv_vec(vece, t0, ones, t0);
783 tcg_gen_shrv_vec(vece, t1, ones, t1);
784 tcg_gen_shri_vec(vece, t1, t1, 1);
785 tcg_gen_xor_vec(vece, t0, t0, t1);
786
787 /* negate the mask */
788 tcg_gen_xor_vec(vece, t0, t0, t2);
789
790 return t0;
791 }
792
793 static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
794 TCGv_vec vrb)
795 {
796 TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
797
798 /* Create the mask */
799 mask = do_vrl_mask_vec(vece, vrb);
800
801 /* Extract n */
802 tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
803 tcg_gen_and_vec(vece, n, vrb, n);
804
805 /* Rotate and mask */
806 tcg_gen_rotlv_vec(vece, vrt, vra, n);
807 tcg_gen_and_vec(vece, vrt, vrt, mask);
808 }
809
810 static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
811 {
812 static const TCGOpcode vecop_list[] = {
813 INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
814 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
815 };
816 static const GVecGen3 ops[2] = {
817 {
818 .fniv = gen_vrlnm_vec,
819 .fno = gen_helper_VRLWNM,
820 .opt_opc = vecop_list,
821 .load_dest = true,
822 .vece = MO_32
823 },
824 {
825 .fniv = gen_vrlnm_vec,
826 .fno = gen_helper_VRLDNM,
827 .opt_opc = vecop_list,
828 .load_dest = true,
829 .vece = MO_64
830 }
831 };
832
833 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
834 REQUIRE_VSX(ctx);
835
836 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
837 avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
838
839 return true;
840 }
841
842 TRANS(VRLWNM, do_vrlnm, MO_32)
843 TRANS(VRLDNM, do_vrlnm, MO_64)
844
845 static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
846 TCGv_vec vrb)
847 {
848 TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
849 tmp = tcg_temp_new_vec_matching(vrt);
850
851 /* Create the mask */
852 mask = do_vrl_mask_vec(vece, vrb);
853
854 /* Extract n */
855 tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
856 tcg_gen_and_vec(vece, n, vrb, n);
857
858 /* Rotate and insert */
859 tcg_gen_rotlv_vec(vece, tmp, vra, n);
860 tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
861 }
862
863 static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
864 {
865 static const TCGOpcode vecop_list[] = {
866 INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
867 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
868 };
869 static const GVecGen3 ops[2] = {
870 {
871 .fniv = gen_vrlmi_vec,
872 .fno = gen_helper_VRLWMI,
873 .opt_opc = vecop_list,
874 .load_dest = true,
875 .vece = MO_32
876 },
877 {
878 .fniv = gen_vrlnm_vec,
879 .fno = gen_helper_VRLDMI,
880 .opt_opc = vecop_list,
881 .load_dest = true,
882 .vece = MO_64
883 }
884 };
885
886 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
887 REQUIRE_VSX(ctx);
888
889 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
890 avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
891
892 return true;
893 }
894
895 TRANS(VRLWMI, do_vrlmi, MO_32)
896 TRANS(VRLDMI, do_vrlmi, MO_64)
897
898 static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
899 bool alg)
900 {
901 TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
902
903 REQUIRE_VECTOR(ctx);
904
905 n = tcg_temp_new_i64();
906 hi = tcg_temp_new_i64();
907 lo = tcg_temp_new_i64();
908 t0 = tcg_temp_new_i64();
909
910 get_avr64(lo, a->vra, false);
911 get_avr64(hi, a->vra, true);
912
913 get_avr64(n, a->vrb, true);
914
915 tcg_gen_andi_i64(t0, n, 64);
916 if (right) {
917 tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
918 if (alg) {
919 t1 = tcg_temp_new_i64();
920 tcg_gen_sari_i64(t1, lo, 63);
921 } else {
922 t1 = zero;
923 }
924 tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
925 } else {
926 tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
927 tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
928 }
929 tcg_gen_andi_i64(n, n, 0x3F);
930
931 if (right) {
932 if (alg) {
933 tcg_gen_sar_i64(t0, hi, n);
934 } else {
935 tcg_gen_shr_i64(t0, hi, n);
936 }
937 } else {
938 tcg_gen_shl_i64(t0, lo, n);
939 }
940 set_avr64(a->vrt, t0, right);
941
942 if (right) {
943 tcg_gen_shr_i64(lo, lo, n);
944 } else {
945 tcg_gen_shl_i64(hi, hi, n);
946 }
947 tcg_gen_xori_i64(n, n, 63);
948 if (right) {
949 tcg_gen_shl_i64(hi, hi, n);
950 tcg_gen_shli_i64(hi, hi, 1);
951 } else {
952 tcg_gen_shr_i64(lo, lo, n);
953 tcg_gen_shri_i64(lo, lo, 1);
954 }
955 tcg_gen_or_i64(hi, hi, lo);
956 set_avr64(a->vrt, hi, !right);
957 return true;
958 }
959
960 TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
961 TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
962 TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
963
964 static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
965 {
966 TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
967 ones = tcg_constant_i64(-1);
968
969 th = tcg_temp_new_i64();
970 tl = tcg_temp_new_i64();
971 t0 = tcg_temp_new_i64();
972 t1 = tcg_temp_new_i64();
973
974 /* m = ~0 >> b */
975 tcg_gen_andi_i64(t0, b, 64);
976 tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
977 tcg_gen_andi_i64(t0, b, 0x3F);
978 tcg_gen_shr_i64(mh, t1, t0);
979 tcg_gen_shr_i64(ml, ones, t0);
980 tcg_gen_xori_i64(t0, t0, 63);
981 tcg_gen_shl_i64(t1, t1, t0);
982 tcg_gen_shli_i64(t1, t1, 1);
983 tcg_gen_or_i64(ml, t1, ml);
984
985 /* t = ~0 >> e */
986 tcg_gen_andi_i64(t0, e, 64);
987 tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
988 tcg_gen_andi_i64(t0, e, 0x3F);
989 tcg_gen_shr_i64(th, t1, t0);
990 tcg_gen_shr_i64(tl, ones, t0);
991 tcg_gen_xori_i64(t0, t0, 63);
992 tcg_gen_shl_i64(t1, t1, t0);
993 tcg_gen_shli_i64(t1, t1, 1);
994 tcg_gen_or_i64(tl, t1, tl);
995
996 /* t = t >> 1 */
997 tcg_gen_extract2_i64(tl, tl, th, 1);
998 tcg_gen_shri_i64(th, th, 1);
999
1000 /* m = m ^ t */
1001 tcg_gen_xor_i64(mh, mh, th);
1002 tcg_gen_xor_i64(ml, ml, tl);
1003
1004 /* Negate the mask if begin > end */
1005 tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
1006
1007 tcg_gen_xor_i64(mh, mh, t0);
1008 tcg_gen_xor_i64(ml, ml, t0);
1009 }
1010
1011 static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
1012 bool insert)
1013 {
1014 TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
1015
1016 REQUIRE_VECTOR(ctx);
1017 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1018
1019 ah = tcg_temp_new_i64();
1020 al = tcg_temp_new_i64();
1021 vrb = tcg_temp_new_i64();
1022 n = tcg_temp_new_i64();
1023 t0 = tcg_temp_new_i64();
1024 t1 = tcg_temp_new_i64();
1025
1026 get_avr64(ah, a->vra, true);
1027 get_avr64(al, a->vra, false);
1028 get_avr64(vrb, a->vrb, true);
1029
1030 tcg_gen_mov_i64(t0, ah);
1031 tcg_gen_andi_i64(t1, vrb, 64);
1032 tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1033 tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1034 tcg_gen_andi_i64(n, vrb, 0x3F);
1035
1036 tcg_gen_shl_i64(t0, ah, n);
1037 tcg_gen_shl_i64(t1, al, n);
1038
1039 tcg_gen_xori_i64(n, n, 63);
1040
1041 tcg_gen_shr_i64(al, al, n);
1042 tcg_gen_shri_i64(al, al, 1);
1043 tcg_gen_or_i64(t0, al, t0);
1044
1045 tcg_gen_shr_i64(ah, ah, n);
1046 tcg_gen_shri_i64(ah, ah, 1);
1047 tcg_gen_or_i64(t1, ah, t1);
1048
1049 if (mask || insert) {
1050 tcg_gen_extract_i64(n, vrb, 8, 7);
1051 tcg_gen_extract_i64(vrb, vrb, 16, 7);
1052
1053 do_vrlq_mask(ah, al, vrb, n);
1054
1055 tcg_gen_and_i64(t0, t0, ah);
1056 tcg_gen_and_i64(t1, t1, al);
1057
1058 if (insert) {
1059 get_avr64(n, a->vrt, true);
1060 get_avr64(vrb, a->vrt, false);
1061 tcg_gen_andc_i64(n, n, ah);
1062 tcg_gen_andc_i64(vrb, vrb, al);
1063 tcg_gen_or_i64(t0, t0, n);
1064 tcg_gen_or_i64(t1, t1, vrb);
1065 }
1066 }
1067
1068 set_avr64(a->vrt, t0, true);
1069 set_avr64(a->vrt, t1, false);
1070 return true;
1071 }
1072
1073 TRANS(VRLQ, do_vector_rotl_quad, false, false)
1074 TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1075 TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1076
1077 #define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \
1078 static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \
1079 TCGv_vec sat, TCGv_vec a, \
1080 TCGv_vec b) \
1081 { \
1082 TCGv_vec x = tcg_temp_new_vec_matching(t); \
1083 glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b); \
1084 glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \
1085 tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \
1086 tcg_gen_or_vec(VECE, sat, sat, x); \
1087 } \
1088 static void glue(gen_, NAME)(DisasContext *ctx) \
1089 { \
1090 static const TCGOpcode vecop_list[] = { \
1091 glue(glue(INDEX_op_, NORM), _vec), \
1092 glue(glue(INDEX_op_, SAT), _vec), \
1093 INDEX_op_cmp_vec, 0 \
1094 }; \
1095 static const GVecGen4 g = { \
1096 .fniv = glue(glue(gen_, NAME), _vec), \
1097 .fno = glue(gen_helper_, NAME), \
1098 .opt_opc = vecop_list, \
1099 .write_aofs = true, \
1100 .vece = VECE, \
1101 }; \
1102 if (unlikely(!ctx->altivec_enabled)) { \
1103 gen_exception(ctx, POWERPC_EXCP_VPU); \
1104 return; \
1105 } \
1106 tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \
1107 offsetof(CPUPPCState, vscr_sat), \
1108 avr_full_offset(rA(ctx->opcode)), \
1109 avr_full_offset(rB(ctx->opcode)), \
1110 16, 16, &g); \
1111 }
1112
1113 GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
1114 GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \
1115 vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
1116 GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
1117 GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
1118 vmul10euq, PPC_NONE, PPC2_ISA300)
1119 GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
1120 GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
1121 GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
1122 GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
1123 GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
1124 GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
1125 GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
1126 GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
1127 GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
1128 GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
1129 GEN_VXFORM_TRANS(vsl, 2, 7);
1130 GEN_VXFORM_TRANS(vsr, 2, 11);
1131 GEN_VXFORM_ENV(vpkuhum, 7, 0);
1132 GEN_VXFORM_ENV(vpkuwum, 7, 1);
1133 GEN_VXFORM_ENV(vpkudum, 7, 17);
1134 GEN_VXFORM_ENV(vpkuhus, 7, 2);
1135 GEN_VXFORM_ENV(vpkuwus, 7, 3);
1136 GEN_VXFORM_ENV(vpkudus, 7, 19);
1137 GEN_VXFORM_ENV(vpkshus, 7, 4);
1138 GEN_VXFORM_ENV(vpkswus, 7, 5);
1139 GEN_VXFORM_ENV(vpksdus, 7, 21);
1140 GEN_VXFORM_ENV(vpkshss, 7, 6);
1141 GEN_VXFORM_ENV(vpkswss, 7, 7);
1142 GEN_VXFORM_ENV(vpksdss, 7, 23);
1143 GEN_VXFORM(vpkpx, 7, 12);
1144 GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1145 GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1146 GEN_VXFORM_ENV(vsum4shs, 4, 25);
1147 GEN_VXFORM_ENV(vsum2sws, 4, 26);
1148 GEN_VXFORM_ENV(vsumsws, 4, 30);
1149 GEN_VXFORM_ENV(vaddfp, 5, 0);
1150 GEN_VXFORM_ENV(vsubfp, 5, 1);
1151 GEN_VXFORM_ENV(vmaxfp, 5, 16);
1152 GEN_VXFORM_ENV(vminfp, 5, 17);
1153 GEN_VXFORM_HETRO(vextublx, 6, 24)
1154 GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1155 GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1156 GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1157 vextuwlx, PPC_NONE, PPC2_ISA300)
1158 GEN_VXFORM_HETRO(vextubrx, 6, 28)
1159 GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1160 GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1161 GEN_VXFORM_TRANS(lvsl, 6, 31)
1162 GEN_VXFORM_TRANS(lvsr, 6, 32)
1163 GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1164 vextuwrx, PPC_NONE, PPC2_ISA300)
1165
1166 #define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
1167 static void glue(gen_, name)(DisasContext *ctx) \
1168 { \
1169 TCGv_ptr ra, rb, rd; \
1170 if (unlikely(!ctx->altivec_enabled)) { \
1171 gen_exception(ctx, POWERPC_EXCP_VPU); \
1172 return; \
1173 } \
1174 ra = gen_avr_ptr(rA(ctx->opcode)); \
1175 rb = gen_avr_ptr(rB(ctx->opcode)); \
1176 rd = gen_avr_ptr(rD(ctx->opcode)); \
1177 gen_helper_##opname(cpu_env, rd, ra, rb); \
1178 }
1179
1180 #define GEN_VXRFORM(name, opc2, opc3) \
1181 GEN_VXRFORM1(name, name, #name, opc2, opc3) \
1182 GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1183
1184 /*
1185 * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1186 * bit but also use bit 21 as an actual Rc bit. In general, thse pairs
1187 * come from different versions of the ISA, so we must also support a
1188 * pair of flags for each instruction.
1189 */
1190 #define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
1191 static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
1192 { \
1193 if ((Rc(ctx->opcode) == 0) && \
1194 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1195 if (Rc21(ctx->opcode) == 0) { \
1196 gen_##name0(ctx); \
1197 } else { \
1198 gen_##name0##_(ctx); \
1199 } \
1200 } else if ((Rc(ctx->opcode) == 1) && \
1201 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1202 if (Rc21(ctx->opcode) == 0) { \
1203 gen_##name1(ctx); \
1204 } else { \
1205 gen_##name1##_(ctx); \
1206 } \
1207 } else { \
1208 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
1209 } \
1210 }
1211
1212 static void do_vcmp_rc(int vrt)
1213 {
1214 TCGv_i64 tmp, set, clr;
1215
1216 tmp = tcg_temp_new_i64();
1217 set = tcg_temp_new_i64();
1218 clr = tcg_temp_new_i64();
1219
1220 get_avr64(tmp, vrt, true);
1221 tcg_gen_mov_i64(set, tmp);
1222 get_avr64(tmp, vrt, false);
1223 tcg_gen_or_i64(clr, set, tmp);
1224 tcg_gen_and_i64(set, set, tmp);
1225
1226 tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1227 tcg_gen_shli_i64(clr, clr, 1);
1228
1229 tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1230 tcg_gen_shli_i64(set, set, 3);
1231
1232 tcg_gen_or_i64(tmp, set, clr);
1233 tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1234 }
1235
1236 static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1237 {
1238 REQUIRE_VECTOR(ctx);
1239
1240 tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1241 avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1242
1243 if (a->rc) {
1244 do_vcmp_rc(a->vrt);
1245 }
1246
1247 return true;
1248 }
1249
1250 TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1251 TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1252 TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1253 TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1254
1255 TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1256 TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1257 TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1258 TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1259 TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1260 TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1261 TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1262 TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1263
1264 TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1265 TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1266 TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1267
1268 static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1269 {
1270 TCGv_vec t0, t1, zero;
1271
1272 t0 = tcg_temp_new_vec_matching(t);
1273 t1 = tcg_temp_new_vec_matching(t);
1274 zero = tcg_constant_vec_matching(t, vece, 0);
1275
1276 tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1277 tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1278 tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1279
1280 tcg_gen_or_vec(vece, t, t, t0);
1281 tcg_gen_or_vec(vece, t, t, t1);
1282 }
1283
1284 static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1285 {
1286 static const TCGOpcode vecop_list[] = {
1287 INDEX_op_cmp_vec, 0
1288 };
1289 static const GVecGen3 ops[3] = {
1290 {
1291 .fniv = gen_vcmpnez_vec,
1292 .fno = gen_helper_VCMPNEZB,
1293 .opt_opc = vecop_list,
1294 .vece = MO_8
1295 },
1296 {
1297 .fniv = gen_vcmpnez_vec,
1298 .fno = gen_helper_VCMPNEZH,
1299 .opt_opc = vecop_list,
1300 .vece = MO_16
1301 },
1302 {
1303 .fniv = gen_vcmpnez_vec,
1304 .fno = gen_helper_VCMPNEZW,
1305 .opt_opc = vecop_list,
1306 .vece = MO_32
1307 }
1308 };
1309
1310 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1311 REQUIRE_VECTOR(ctx);
1312
1313 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1314 avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1315
1316 if (a->rc) {
1317 do_vcmp_rc(a->vrt);
1318 }
1319
1320 return true;
1321 }
1322
1323 TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1324 TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1325 TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1326
1327 static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1328 {
1329 TCGv_i64 t0, t1, t2;
1330
1331 t0 = tcg_temp_new_i64();
1332 t1 = tcg_temp_new_i64();
1333 t2 = tcg_temp_new_i64();
1334
1335 get_avr64(t0, a->vra, true);
1336 get_avr64(t1, a->vrb, true);
1337 tcg_gen_xor_i64(t2, t0, t1);
1338
1339 get_avr64(t0, a->vra, false);
1340 get_avr64(t1, a->vrb, false);
1341 tcg_gen_xor_i64(t1, t0, t1);
1342
1343 tcg_gen_or_i64(t1, t1, t2);
1344 tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0);
1345 tcg_gen_neg_i64(t1, t1);
1346
1347 set_avr64(a->vrt, t1, true);
1348 set_avr64(a->vrt, t1, false);
1349
1350 if (a->rc) {
1351 tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1352 tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1353 tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1354 }
1355 return true;
1356 }
1357
1358 static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1359 {
1360 TCGv_i64 t0, t1, t2;
1361
1362 t0 = tcg_temp_new_i64();
1363 t1 = tcg_temp_new_i64();
1364 t2 = tcg_temp_new_i64();
1365
1366 get_avr64(t0, a->vra, false);
1367 get_avr64(t1, a->vrb, false);
1368 tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1);
1369
1370 get_avr64(t0, a->vra, true);
1371 get_avr64(t1, a->vrb, true);
1372 tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1373 tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1374
1375 tcg_gen_or_i64(t1, t1, t2);
1376 tcg_gen_neg_i64(t1, t1);
1377
1378 set_avr64(a->vrt, t1, true);
1379 set_avr64(a->vrt, t1, false);
1380
1381 if (a->rc) {
1382 tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1383 tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1384 tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1385 }
1386 return true;
1387 }
1388
1389 TRANS(VCMPGTSQ, do_vcmpgtq, true)
1390 TRANS(VCMPGTUQ, do_vcmpgtq, false)
1391
1392 static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1393 {
1394 TCGv_i64 vra, vrb;
1395 TCGLabel *gt, *lt, *done;
1396
1397 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1398 REQUIRE_VECTOR(ctx);
1399
1400 vra = tcg_temp_new_i64();
1401 vrb = tcg_temp_new_i64();
1402 gt = gen_new_label();
1403 lt = gen_new_label();
1404 done = gen_new_label();
1405
1406 get_avr64(vra, a->vra, true);
1407 get_avr64(vrb, a->vrb, true);
1408 tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1409 tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1410
1411 get_avr64(vra, a->vra, false);
1412 get_avr64(vrb, a->vrb, false);
1413 tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1414 tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1415
1416 tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1417 tcg_gen_br(done);
1418
1419 gen_set_label(gt);
1420 tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1421 tcg_gen_br(done);
1422
1423 gen_set_label(lt);
1424 tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1425 tcg_gen_br(done);
1426
1427 gen_set_label(done);
1428 return true;
1429 }
1430
1431 TRANS(VCMPSQ, do_vcmpq, true)
1432 TRANS(VCMPUQ, do_vcmpq, false)
1433
1434 GEN_VXRFORM(vcmpeqfp, 3, 3)
1435 GEN_VXRFORM(vcmpgefp, 3, 7)
1436 GEN_VXRFORM(vcmpgtfp, 3, 11)
1437 GEN_VXRFORM(vcmpbfp, 3, 15)
1438
1439 static void gen_vsplti(DisasContext *ctx, int vece)
1440 {
1441 int simm;
1442
1443 if (unlikely(!ctx->altivec_enabled)) {
1444 gen_exception(ctx, POWERPC_EXCP_VPU);
1445 return;
1446 }
1447
1448 simm = SIMM5(ctx->opcode);
1449 tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1450 }
1451
1452 #define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1453 static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1454
1455 GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1456 GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1457 GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1458
1459 #define GEN_VXFORM_NOA(name, opc2, opc3) \
1460 static void glue(gen_, name)(DisasContext *ctx) \
1461 { \
1462 TCGv_ptr rb, rd; \
1463 if (unlikely(!ctx->altivec_enabled)) { \
1464 gen_exception(ctx, POWERPC_EXCP_VPU); \
1465 return; \
1466 } \
1467 rb = gen_avr_ptr(rB(ctx->opcode)); \
1468 rd = gen_avr_ptr(rD(ctx->opcode)); \
1469 gen_helper_##name(rd, rb); \
1470 }
1471
1472 #define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
1473 static void glue(gen_, name)(DisasContext *ctx) \
1474 { \
1475 TCGv_ptr rb, rd; \
1476 \
1477 if (unlikely(!ctx->altivec_enabled)) { \
1478 gen_exception(ctx, POWERPC_EXCP_VPU); \
1479 return; \
1480 } \
1481 rb = gen_avr_ptr(rB(ctx->opcode)); \
1482 rd = gen_avr_ptr(rD(ctx->opcode)); \
1483 gen_helper_##name(cpu_env, rd, rb); \
1484 }
1485
1486 #define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
1487 static void glue(gen_, name)(DisasContext *ctx) \
1488 { \
1489 TCGv_ptr rb, rd; \
1490 if (unlikely(!ctx->altivec_enabled)) { \
1491 gen_exception(ctx, POWERPC_EXCP_VPU); \
1492 return; \
1493 } \
1494 rb = gen_avr_ptr(rB(ctx->opcode)); \
1495 rd = gen_avr_ptr(rD(ctx->opcode)); \
1496 gen_helper_##name(rd, rb); \
1497 }
1498
1499 #define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \
1500 static void glue(gen_, name)(DisasContext *ctx) \
1501 { \
1502 TCGv_ptr rb; \
1503 if (unlikely(!ctx->altivec_enabled)) { \
1504 gen_exception(ctx, POWERPC_EXCP_VPU); \
1505 return; \
1506 } \
1507 rb = gen_avr_ptr(rB(ctx->opcode)); \
1508 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \
1509 }
1510 GEN_VXFORM_NOA(vupkhsb, 7, 8);
1511 GEN_VXFORM_NOA(vupkhsh, 7, 9);
1512 GEN_VXFORM_NOA(vupkhsw, 7, 25);
1513 GEN_VXFORM_NOA(vupklsb, 7, 10);
1514 GEN_VXFORM_NOA(vupklsh, 7, 11);
1515 GEN_VXFORM_NOA(vupklsw, 7, 27);
1516 GEN_VXFORM_NOA(vupkhpx, 7, 13);
1517 GEN_VXFORM_NOA(vupklpx, 7, 15);
1518 GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1519 GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1520 GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1521 GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1522 GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1523 GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1524 GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1525 GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1526
1527 static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
1528 {
1529 int i;
1530 TCGv_vec tmp = tcg_temp_new_vec_matching(b);
1531 /* MO_32 is 2, so 2 iteractions for MO_32 and 3 for MO_64 */
1532 for (i = 0; i < vece; i++) {
1533 tcg_gen_shri_vec(vece, tmp, b, (4 << (vece - i)));
1534 tcg_gen_xor_vec(vece, b, tmp, b);
1535 }
1536 tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1));
1537 }
1538
1539 /* vprtybw */
1540 static void gen_vprtyb_i32(TCGv_i32 t, TCGv_i32 b)
1541 {
1542 tcg_gen_ctpop_i32(t, b);
1543 tcg_gen_and_i32(t, t, tcg_constant_i32(1));
1544 }
1545
1546 /* vprtybd */
1547 static void gen_vprtyb_i64(TCGv_i64 t, TCGv_i64 b)
1548 {
1549 tcg_gen_ctpop_i64(t, b);
1550 tcg_gen_and_i64(t, t, tcg_constant_i64(1));
1551 }
1552
1553 static bool do_vx_vprtyb(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
1554 {
1555 static const TCGOpcode vecop_list[] = {
1556 INDEX_op_shri_vec, 0
1557 };
1558
1559 static const GVecGen2 op[] = {
1560 {
1561 .fniv = gen_vprtyb_vec,
1562 .fni4 = gen_vprtyb_i32,
1563 .opt_opc = vecop_list,
1564 .vece = MO_32
1565 },
1566 {
1567 .fniv = gen_vprtyb_vec,
1568 .fni8 = gen_vprtyb_i64,
1569 .opt_opc = vecop_list,
1570 .vece = MO_64
1571 },
1572 {
1573 .fno = gen_helper_VPRTYBQ,
1574 .vece = MO_128
1575 },
1576 };
1577
1578 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1579 REQUIRE_VECTOR(ctx);
1580
1581 tcg_gen_gvec_2(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
1582 16, 16, &op[vece - MO_32]);
1583
1584 return true;
1585 }
1586
1587 TRANS(VPRTYBW, do_vx_vprtyb, MO_32)
1588 TRANS(VPRTYBD, do_vx_vprtyb, MO_64)
1589 TRANS(VPRTYBQ, do_vx_vprtyb, MO_128)
1590
1591 static void gen_vsplt(DisasContext *ctx, int vece)
1592 {
1593 int uimm, dofs, bofs;
1594
1595 if (unlikely(!ctx->altivec_enabled)) {
1596 gen_exception(ctx, POWERPC_EXCP_VPU);
1597 return;
1598 }
1599
1600 uimm = UIMM5(ctx->opcode);
1601 bofs = avr_full_offset(rB(ctx->opcode));
1602 dofs = avr_full_offset(rD(ctx->opcode));
1603
1604 /* Experimental testing shows that hardware masks the immediate. */
1605 bofs += (uimm << vece) & 15;
1606 #if !HOST_BIG_ENDIAN
1607 bofs ^= 15;
1608 bofs &= ~((1 << vece) - 1);
1609 #endif
1610
1611 tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1612 }
1613
1614 #define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1615 static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1616
1617 #define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \
1618 static void glue(gen_, name)(DisasContext *ctx) \
1619 { \
1620 TCGv_ptr rb, rd; \
1621 TCGv_i32 uimm; \
1622 \
1623 if (unlikely(!ctx->altivec_enabled)) { \
1624 gen_exception(ctx, POWERPC_EXCP_VPU); \
1625 return; \
1626 } \
1627 uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
1628 rb = gen_avr_ptr(rB(ctx->opcode)); \
1629 rd = gen_avr_ptr(rD(ctx->opcode)); \
1630 gen_helper_##name(cpu_env, rd, rb, uimm); \
1631 }
1632
1633 #define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
1634 static void glue(gen_, name)(DisasContext *ctx) \
1635 { \
1636 TCGv_ptr rb, rd; \
1637 uint8_t uimm = UIMM4(ctx->opcode); \
1638 TCGv_i32 t0; \
1639 if (unlikely(!ctx->altivec_enabled)) { \
1640 gen_exception(ctx, POWERPC_EXCP_VPU); \
1641 return; \
1642 } \
1643 if (uimm > splat_max) { \
1644 uimm = 0; \
1645 } \
1646 t0 = tcg_temp_new_i32(); \
1647 tcg_gen_movi_i32(t0, uimm); \
1648 rb = gen_avr_ptr(rB(ctx->opcode)); \
1649 rd = gen_avr_ptr(rD(ctx->opcode)); \
1650 gen_helper_##name(rd, rb, t0); \
1651 }
1652
1653 GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1654 GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1655 GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1656 GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1657 GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1658 GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1659 GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1660 GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1661 GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1662 GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1663 GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1664 GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1665 vextractub, PPC_NONE, PPC2_ISA300);
1666 GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1667 vextractuh, PPC_NONE, PPC2_ISA300);
1668 GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1669 vextractuw, PPC_NONE, PPC2_ISA300);
1670
1671 static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1672 {
1673 /*
1674 * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1675 * to gather the bits. The masks can be created with
1676 *
1677 * uint64_t mask(uint64_t n, uint64_t step)
1678 * {
1679 * uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1680 * plen = n << step, m = 0;
1681 * for(int i = 0; i < 64/plen; i++) {
1682 * m |= p;
1683 * m = ror64(m, plen);
1684 * }
1685 * p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1686 * return m | p;
1687 * }
1688 *
1689 * But since there are few values of N, we'll use a lookup table to avoid
1690 * these calculations at runtime.
1691 */
1692 static const uint64_t mask[6][5] = {
1693 {
1694 0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1695 0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1696 },
1697 {
1698 0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1699 0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1700 },
1701 {
1702 /* For N >= 4, some mask operations can be elided */
1703 0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1704 0xFFFF000000000000ULL
1705 },
1706 {
1707 0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1708 },
1709 {
1710 0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1711 },
1712 {
1713 0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1714 }
1715 };
1716 uint64_t m;
1717 int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1718 TCGv_i64 hi, lo, t0, t1;
1719
1720 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1721 REQUIRE_VECTOR(ctx);
1722
1723 if (a->n < 2) {
1724 /*
1725 * "N can be any value between 2 and 7, inclusive." Otherwise, the
1726 * result is undefined, so we don't need to change RT. Also, N > 7 is
1727 * impossible since the immediate field is 3 bits only.
1728 */
1729 return true;
1730 }
1731
1732 hi = tcg_temp_new_i64();
1733 lo = tcg_temp_new_i64();
1734 t0 = tcg_temp_new_i64();
1735 t1 = tcg_temp_new_i64();
1736
1737 get_avr64(hi, a->vrb, true);
1738 get_avr64(lo, a->vrb, false);
1739
1740 /* Align the lower doubleword so we can use the same mask */
1741 tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1742
1743 /*
1744 * Starting from the most significant bit, gather every Nth bit with a
1745 * sequence of mask-shift-or operation. E.g.: for N=3
1746 * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1747 * & rep(0b100)
1748 * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1749 * << 2
1750 * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1751 * |
1752 * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1753 * & rep(0b110000)
1754 * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1755 * << 4
1756 * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1757 * |
1758 * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1759 * & rep(0b111100000000)
1760 * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1761 * << 8
1762 * ....EFGH........IJKL........MNOP........QRST........UV..........
1763 * |
1764 * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1765 * & rep(0b111111110000000000000000)
1766 * ABCDEFGH................IJKLMNOP................QRSTUV..........
1767 * << 16
1768 * ........IJKLMNOP................QRSTUV..........................
1769 * |
1770 * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1771 * & rep(0b111111111111111100000000000000000000000000000000)
1772 * ABCDEFGHIJKLMNOP................................QRSTUV..........
1773 * << 32
1774 * ................QRSTUV..........................................
1775 * |
1776 * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1777 */
1778 for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1779 m = mask[a->n - 2][i];
1780 if (m) {
1781 tcg_gen_andi_i64(hi, hi, m);
1782 tcg_gen_andi_i64(lo, lo, m);
1783 }
1784 if (sh < 64) {
1785 tcg_gen_shli_i64(t0, hi, sh);
1786 tcg_gen_shli_i64(t1, lo, sh);
1787 tcg_gen_or_i64(hi, t0, hi);
1788 tcg_gen_or_i64(lo, t1, lo);
1789 }
1790 }
1791
1792 tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1793 tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1794 tcg_gen_shri_i64(lo, lo, nbits);
1795 tcg_gen_or_i64(hi, hi, lo);
1796 tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1797 return true;
1798 }
1799
1800 static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1801 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1802 {
1803 TCGv_ptr vrt, vra, vrb;
1804 TCGv rc;
1805
1806 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1807 REQUIRE_VECTOR(ctx);
1808
1809 vrt = gen_avr_ptr(a->vrt);
1810 vra = gen_avr_ptr(a->vra);
1811 vrb = gen_avr_ptr(a->vrb);
1812 rc = tcg_temp_new();
1813
1814 tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1815 if (right) {
1816 tcg_gen_subfi_tl(rc, 32 - size, rc);
1817 }
1818 gen_helper(cpu_env, vrt, vra, vrb, rc);
1819 return true;
1820 }
1821
1822 TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1823 TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1824 TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1825 TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1826
1827 TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1828 TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1829 TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1830 TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1831
1832 static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1833 TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1834 {
1835 TCGv_ptr t;
1836 TCGv idx;
1837
1838 t = gen_avr_ptr(vrt);
1839 idx = tcg_temp_new();
1840
1841 tcg_gen_andi_tl(idx, ra, 0xF);
1842 if (right) {
1843 tcg_gen_subfi_tl(idx, 16 - size, idx);
1844 }
1845
1846 gen_helper(cpu_env, t, rb, idx);
1847 return true;
1848 }
1849
1850 static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1851 int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1852 {
1853 TCGv_i64 val;
1854
1855 val = tcg_temp_new_i64();
1856 get_avr64(val, vrb, true);
1857 return do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1858 }
1859
1860 static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1861 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1862 {
1863 TCGv_i64 val;
1864
1865 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1866 REQUIRE_VECTOR(ctx);
1867
1868 val = tcg_temp_new_i64();
1869 tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1870
1871 return do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1872 }
1873
1874 static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1875 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1876 {
1877 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1878 REQUIRE_VECTOR(ctx);
1879
1880 return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1881 gen_helper);
1882 }
1883
1884 static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1885 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1886 {
1887 TCGv_i64 val;
1888
1889 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1890 REQUIRE_VECTOR(ctx);
1891
1892 if (a->uim > (16 - size)) {
1893 /*
1894 * PowerISA v3.1 says that the resulting value is undefined in this
1895 * case, so just log a guest error and leave VRT unchanged. The
1896 * real hardware would do a partial insert, e.g. if VRT is zeroed and
1897 * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
1898 * VRT = 0x0000...00001234, but we don't bother to reproduce this
1899 * behavior as software shouldn't rely on it.
1900 */
1901 qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
1902 " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
1903 16 - size);
1904 return true;
1905 }
1906
1907 val = tcg_temp_new_i64();
1908 tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1909
1910 return do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
1911 gen_helper);
1912 }
1913
1914 static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1915 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1916 {
1917 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1918 REQUIRE_VECTOR(ctx);
1919
1920 if (a->uim > (16 - size)) {
1921 qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
1922 " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
1923 16 - size);
1924 return true;
1925 }
1926
1927 return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
1928 gen_helper);
1929 }
1930
1931 TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
1932 TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
1933 TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
1934 TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
1935
1936 TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
1937 TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
1938 TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
1939 TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
1940
1941 TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
1942 TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
1943
1944 TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
1945 TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
1946 TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
1947
1948 TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
1949 TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
1950 TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
1951
1952 TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
1953 TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
1954 TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
1955 TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
1956
1957 static void gen_vsldoi(DisasContext *ctx)
1958 {
1959 TCGv_ptr ra, rb, rd;
1960 TCGv_i32 sh;
1961 if (unlikely(!ctx->altivec_enabled)) {
1962 gen_exception(ctx, POWERPC_EXCP_VPU);
1963 return;
1964 }
1965 ra = gen_avr_ptr(rA(ctx->opcode));
1966 rb = gen_avr_ptr(rB(ctx->opcode));
1967 rd = gen_avr_ptr(rD(ctx->opcode));
1968 sh = tcg_const_i32(VSH(ctx->opcode));
1969 gen_helper_vsldoi(rd, ra, rb, sh);
1970 }
1971
1972 static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
1973 {
1974 TCGv_i64 t0, t1, t2;
1975
1976 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1977 REQUIRE_VECTOR(ctx);
1978
1979 t0 = tcg_temp_new_i64();
1980 t1 = tcg_temp_new_i64();
1981
1982 get_avr64(t0, a->vra, true);
1983 get_avr64(t1, a->vra, false);
1984
1985 if (a->sh != 0) {
1986 t2 = tcg_temp_new_i64();
1987
1988 get_avr64(t2, a->vrb, true);
1989
1990 tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
1991 tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
1992 }
1993
1994 set_avr64(a->vrt, t0, true);
1995 set_avr64(a->vrt, t1, false);
1996 return true;
1997 }
1998
1999 static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
2000 {
2001 TCGv_i64 t2, t1, t0;
2002
2003 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2004 REQUIRE_VECTOR(ctx);
2005
2006 t0 = tcg_temp_new_i64();
2007 t1 = tcg_temp_new_i64();
2008
2009 get_avr64(t0, a->vrb, false);
2010 get_avr64(t1, a->vrb, true);
2011
2012 if (a->sh != 0) {
2013 t2 = tcg_temp_new_i64();
2014
2015 get_avr64(t2, a->vra, false);
2016
2017 tcg_gen_extract2_i64(t0, t0, t1, a->sh);
2018 tcg_gen_extract2_i64(t1, t1, t2, a->sh);
2019 }
2020
2021 set_avr64(a->vrt, t0, false);
2022 set_avr64(a->vrt, t1, true);
2023 return true;
2024 }
2025
2026 static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2027 {
2028 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2029 REQUIRE_VECTOR(ctx);
2030
2031 tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2032 (8 << vece) - 1, 16, 16);
2033
2034 return true;
2035 }
2036
2037 TRANS(VEXPANDBM, do_vexpand, MO_8)
2038 TRANS(VEXPANDHM, do_vexpand, MO_16)
2039 TRANS(VEXPANDWM, do_vexpand, MO_32)
2040 TRANS(VEXPANDDM, do_vexpand, MO_64)
2041
2042 static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
2043 {
2044 TCGv_i64 tmp;
2045
2046 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2047 REQUIRE_VECTOR(ctx);
2048
2049 tmp = tcg_temp_new_i64();
2050
2051 get_avr64(tmp, a->vrb, true);
2052 tcg_gen_sari_i64(tmp, tmp, 63);
2053 set_avr64(a->vrt, tmp, false);
2054 set_avr64(a->vrt, tmp, true);
2055 return true;
2056 }
2057
2058 static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2059 {
2060 const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
2061 mask = dup_const(vece, 1 << (elem_width - 1));
2062 uint64_t i, j;
2063 TCGv_i64 lo, hi, t0, t1;
2064
2065 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2066 REQUIRE_VECTOR(ctx);
2067
2068 hi = tcg_temp_new_i64();
2069 lo = tcg_temp_new_i64();
2070 t0 = tcg_temp_new_i64();
2071 t1 = tcg_temp_new_i64();
2072
2073 get_avr64(lo, a->vrb, false);
2074 get_avr64(hi, a->vrb, true);
2075
2076 tcg_gen_andi_i64(lo, lo, mask);
2077 tcg_gen_andi_i64(hi, hi, mask);
2078
2079 /*
2080 * Gather the most significant bit of each element in the highest element
2081 * element. E.g. for bytes:
2082 * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2083 * & dup(1 << (elem_width - 1))
2084 * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2085 * << 32 - 4
2086 * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2087 * |
2088 * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2089 * << 16 - 2
2090 * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2091 * |
2092 * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2093 * << 8 - 1
2094 * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2095 * |
2096 * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2097 */
2098 for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2099 tcg_gen_shli_i64(t0, hi, j - i);
2100 tcg_gen_shli_i64(t1, lo, j - i);
2101 tcg_gen_or_i64(hi, hi, t0);
2102 tcg_gen_or_i64(lo, lo, t1);
2103 }
2104
2105 tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2106 tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2107 tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2108 return true;
2109 }
2110
2111 TRANS(VEXTRACTBM, do_vextractm, MO_8)
2112 TRANS(VEXTRACTHM, do_vextractm, MO_16)
2113 TRANS(VEXTRACTWM, do_vextractm, MO_32)
2114 TRANS(VEXTRACTDM, do_vextractm, MO_64)
2115
2116 static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2117 {
2118 TCGv_i64 tmp;
2119
2120 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2121 REQUIRE_VECTOR(ctx);
2122
2123 tmp = tcg_temp_new_i64();
2124
2125 get_avr64(tmp, a->vrb, true);
2126 tcg_gen_shri_i64(tmp, tmp, 63);
2127 tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2128 return true;
2129 }
2130
2131 static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2132 {
2133 const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2134 uint64_t c;
2135 int i, j;
2136 TCGv_i64 hi, lo, t0, t1;
2137
2138 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2139 REQUIRE_VECTOR(ctx);
2140
2141 hi = tcg_temp_new_i64();
2142 lo = tcg_temp_new_i64();
2143 t0 = tcg_temp_new_i64();
2144 t1 = tcg_temp_new_i64();
2145
2146 tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2147 tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2148 tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2149
2150 /*
2151 * Spread the bits into their respective elements.
2152 * E.g. for bytes:
2153 * 00000000000000000000000000000000000000000000000000000000abcdefgh
2154 * << 32 - 4
2155 * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2156 * |
2157 * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2158 * << 16 - 2
2159 * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2160 * |
2161 * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2162 * << 8 - 1
2163 * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2164 * |
2165 * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2166 * & dup(1)
2167 * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2168 * * 0xff
2169 * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2170 */
2171 for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2172 tcg_gen_shli_i64(t0, hi, j - i);
2173 tcg_gen_shli_i64(t1, lo, j - i);
2174 tcg_gen_or_i64(hi, hi, t0);
2175 tcg_gen_or_i64(lo, lo, t1);
2176 }
2177
2178 c = dup_const(vece, 1);
2179 tcg_gen_andi_i64(hi, hi, c);
2180 tcg_gen_andi_i64(lo, lo, c);
2181
2182 c = MAKE_64BIT_MASK(0, elem_width);
2183 tcg_gen_muli_i64(hi, hi, c);
2184 tcg_gen_muli_i64(lo, lo, c);
2185
2186 set_avr64(a->vrt, lo, false);
2187 set_avr64(a->vrt, hi, true);
2188 return true;
2189 }
2190
2191 TRANS(MTVSRBM, do_mtvsrm, MO_8)
2192 TRANS(MTVSRHM, do_mtvsrm, MO_16)
2193 TRANS(MTVSRWM, do_mtvsrm, MO_32)
2194 TRANS(MTVSRDM, do_mtvsrm, MO_64)
2195
2196 static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2197 {
2198 TCGv_i64 tmp;
2199
2200 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2201 REQUIRE_VECTOR(ctx);
2202
2203 tmp = tcg_temp_new_i64();
2204
2205 tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2206 tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2207 set_avr64(a->vrt, tmp, false);
2208 set_avr64(a->vrt, tmp, true);
2209 return true;
2210 }
2211
2212 static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2213 {
2214 const uint64_t mask = dup_const(MO_8, 1);
2215 uint64_t hi, lo;
2216
2217 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2218 REQUIRE_VECTOR(ctx);
2219
2220 hi = extract16(a->b, 8, 8);
2221 lo = extract16(a->b, 0, 8);
2222
2223 for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2224 hi |= hi << (j - i);
2225 lo |= lo << (j - i);
2226 }
2227
2228 hi = (hi & mask) * 0xFF;
2229 lo = (lo & mask) * 0xFF;
2230
2231 set_avr64(a->vrt, tcg_constant_i64(hi), true);
2232 set_avr64(a->vrt, tcg_constant_i64(lo), false);
2233
2234 return true;
2235 }
2236
2237 static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2238 {
2239 TCGv_i64 rt, vrb, mask;
2240 rt = tcg_const_i64(0);
2241 vrb = tcg_temp_new_i64();
2242 mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2243
2244 for (int i = 0; i < 2; i++) {
2245 get_avr64(vrb, a->vrb, i);
2246 if (a->mp) {
2247 tcg_gen_and_i64(vrb, mask, vrb);
2248 } else {
2249 tcg_gen_andc_i64(vrb, mask, vrb);
2250 }
2251 tcg_gen_ctpop_i64(vrb, vrb);
2252 tcg_gen_add_i64(rt, rt, vrb);
2253 }
2254
2255 tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
2256 tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
2257 return true;
2258 }
2259
2260 TRANS(VCNTMBB, do_vcntmb, MO_8)
2261 TRANS(VCNTMBH, do_vcntmb, MO_16)
2262 TRANS(VCNTMBW, do_vcntmb, MO_32)
2263 TRANS(VCNTMBD, do_vcntmb, MO_64)
2264
2265 static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2266 void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2267 {
2268 TCGv_ptr vrt, vrb;
2269
2270 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2271 REQUIRE_VECTOR(ctx);
2272
2273 vrt = gen_avr_ptr(a->vrt);
2274 vrb = gen_avr_ptr(a->vrb);
2275
2276 if (a->rc) {
2277 gen_helper(cpu_crf[6], vrt, vrb);
2278 } else {
2279 TCGv_i32 discard = tcg_temp_new_i32();
2280 gen_helper(discard, vrt, vrb);
2281 }
2282 return true;
2283 }
2284
2285 TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2286 TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2287 TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2288 TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2289
2290 static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2291 {
2292 TCGv_i64 rb, mh, ml, tmp,
2293 ones = tcg_constant_i64(-1),
2294 zero = tcg_constant_i64(0);
2295
2296 rb = tcg_temp_new_i64();
2297 mh = tcg_temp_new_i64();
2298 ml = tcg_temp_new_i64();
2299 tmp = tcg_temp_new_i64();
2300
2301 tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2302 tcg_gen_andi_i64(tmp, rb, 7);
2303 tcg_gen_shli_i64(tmp, tmp, 3);
2304 if (right) {
2305 tcg_gen_shr_i64(tmp, ones, tmp);
2306 } else {
2307 tcg_gen_shl_i64(tmp, ones, tmp);
2308 }
2309 tcg_gen_not_i64(tmp, tmp);
2310
2311 if (right) {
2312 tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2313 tmp, ones);
2314 tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2315 zero, tmp);
2316 tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2317 ml, ones);
2318 } else {
2319 tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2320 tmp, ones);
2321 tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2322 zero, tmp);
2323 tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2324 mh, ones);
2325 }
2326
2327 get_avr64(tmp, a->vra, true);
2328 tcg_gen_and_i64(tmp, tmp, mh);
2329 set_avr64(a->vrt, tmp, true);
2330
2331 get_avr64(tmp, a->vra, false);
2332 tcg_gen_and_i64(tmp, tmp, ml);
2333 set_avr64(a->vrt, tmp, false);
2334 return true;
2335 }
2336
2337 TRANS(VCLRLB, do_vclrb, false)
2338 TRANS(VCLRRB, do_vclrb, true)
2339
2340 #define GEN_VAFORM_PAIRED(name0, name1, opc2) \
2341 static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
2342 { \
2343 TCGv_ptr ra, rb, rc, rd; \
2344 if (unlikely(!ctx->altivec_enabled)) { \
2345 gen_exception(ctx, POWERPC_EXCP_VPU); \
2346 return; \
2347 } \
2348 ra = gen_avr_ptr(rA(ctx->opcode)); \
2349 rb = gen_avr_ptr(rB(ctx->opcode)); \
2350 rc = gen_avr_ptr(rC(ctx->opcode)); \
2351 rd = gen_avr_ptr(rD(ctx->opcode)); \
2352 if (Rc(ctx->opcode)) { \
2353 gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
2354 } else { \
2355 gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
2356 } \
2357 }
2358
2359 GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2360
2361 static bool do_va_helper(DisasContext *ctx, arg_VA *a,
2362 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2363 {
2364 TCGv_ptr vrt, vra, vrb, vrc;
2365 REQUIRE_VECTOR(ctx);
2366
2367 vrt = gen_avr_ptr(a->vrt);
2368 vra = gen_avr_ptr(a->vra);
2369 vrb = gen_avr_ptr(a->vrb);
2370 vrc = gen_avr_ptr(a->rc);
2371 gen_helper(vrt, vra, vrb, vrc);
2372 return true;
2373 }
2374
2375 TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
2376 TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
2377
2378 TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
2379 TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
2380
2381 TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
2382 TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
2383
2384 static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2385 TCGv_vec c)
2386 {
2387 tcg_gen_mul_vec(vece, t, a, b);
2388 tcg_gen_add_vec(vece, t, t, c);
2389 }
2390
2391 static bool trans_VMLADDUHM(DisasContext *ctx, arg_VA *a)
2392 {
2393 static const TCGOpcode vecop_list[] = {
2394 INDEX_op_add_vec, INDEX_op_mul_vec, 0
2395 };
2396
2397 static const GVecGen4 op = {
2398 .fno = gen_helper_VMLADDUHM,
2399 .fniv = gen_vmladduhm_vec,
2400 .opt_opc = vecop_list,
2401 .vece = MO_16
2402 };
2403
2404 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2405 REQUIRE_VECTOR(ctx);
2406
2407 tcg_gen_gvec_4(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2408 avr_full_offset(a->vrb), avr_full_offset(a->rc),
2409 16, 16, &op);
2410
2411 return true;
2412 }
2413
2414 static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2415 {
2416 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2417 REQUIRE_VECTOR(ctx);
2418
2419 tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2420 avr_full_offset(a->vrb), avr_full_offset(a->vra),
2421 16, 16);
2422
2423 return true;
2424 }
2425
2426 TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
2427 TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
2428 TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
2429 TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
2430
2431 static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
2432 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2433 {
2434 TCGv_ptr vrt, vra, vrb, vrc;
2435 REQUIRE_VECTOR(ctx);
2436
2437 vrt = gen_avr_ptr(a->vrt);
2438 vra = gen_avr_ptr(a->vra);
2439 vrb = gen_avr_ptr(a->vrb);
2440 vrc = gen_avr_ptr(a->rc);
2441 gen_helper(cpu_env, vrt, vra, vrb, vrc);
2442 return true;
2443 }
2444
2445 TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
2446 TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
2447
2448 TRANS_FLAGS(ALTIVEC, VMHADDSHS, do_va_env_helper, gen_helper_VMHADDSHS)
2449 TRANS_FLAGS(ALTIVEC, VMHRADDSHS, do_va_env_helper, gen_helper_VMHRADDSHS)
2450
2451 GEN_VXFORM_NOA(vclzb, 1, 28)
2452 GEN_VXFORM_NOA(vclzh, 1, 29)
2453 GEN_VXFORM_TRANS(vclzw, 1, 30)
2454 GEN_VXFORM_TRANS(vclzd, 1, 31)
2455
2456 static bool do_vneg(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2457 {
2458 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2459 REQUIRE_VECTOR(ctx);
2460
2461 tcg_gen_gvec_neg(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2462 16, 16);
2463 return true;
2464 }
2465
2466 TRANS(VNEGW, do_vneg, MO_32)
2467 TRANS(VNEGD, do_vneg, MO_64)
2468
2469 static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2470 {
2471 tcg_gen_sextract_i64(t, b, 0, 64 - s);
2472 }
2473
2474 static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2475 {
2476 tcg_gen_sextract_i32(t, b, 0, 32 - s);
2477 }
2478
2479 static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2480 {
2481 tcg_gen_shli_vec(vece, t, b, s);
2482 tcg_gen_sari_vec(vece, t, t, s);
2483 }
2484
2485 static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2486 {
2487 static const TCGOpcode vecop_list[] = {
2488 INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2489 };
2490
2491 static const GVecGen2i op[2] = {
2492 {
2493 .fni4 = gen_vexts_i32,
2494 .fniv = gen_vexts_vec,
2495 .opt_opc = vecop_list,
2496 .vece = MO_32
2497 },
2498 {
2499 .fni8 = gen_vexts_i64,
2500 .fniv = gen_vexts_vec,
2501 .opt_opc = vecop_list,
2502 .vece = MO_64
2503 },
2504 };
2505
2506 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2507 REQUIRE_VECTOR(ctx);
2508
2509 tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2510 16, 16, s, &op[vece - MO_32]);
2511
2512 return true;
2513 }
2514
2515 TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2516 TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2517 TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2518 TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2519 TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2520
2521 static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2522 {
2523 TCGv_i64 tmp;
2524
2525 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2526 REQUIRE_VECTOR(ctx);
2527
2528 tmp = tcg_temp_new_i64();
2529
2530 get_avr64(tmp, a->vrb, false);
2531 set_avr64(a->vrt, tmp, false);
2532 tcg_gen_sari_i64(tmp, tmp, 63);
2533 set_avr64(a->vrt, tmp, true);
2534 return true;
2535 }
2536
2537 GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2538 GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2539 GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2540 GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2541 GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2542 GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2543 GEN_VXFORM_NOA(vpopcntb, 1, 28)
2544 GEN_VXFORM_NOA(vpopcnth, 1, 29)
2545 GEN_VXFORM_NOA(vpopcntw, 1, 30)
2546 GEN_VXFORM_NOA(vpopcntd, 1, 31)
2547 GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2548 vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2549 GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2550 vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2551 GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2552 vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2553 GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2554 vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2555 GEN_VXFORM(vbpermd, 6, 23);
2556 GEN_VXFORM(vbpermq, 6, 21);
2557 GEN_VXFORM_TRANS(vgbbd, 6, 20);
2558 GEN_VXFORM(vpmsumb, 4, 16)
2559 GEN_VXFORM(vpmsumh, 4, 17)
2560 GEN_VXFORM(vpmsumw, 4, 18)
2561
2562 #define GEN_BCD(op) \
2563 static void gen_##op(DisasContext *ctx) \
2564 { \
2565 TCGv_ptr ra, rb, rd; \
2566 TCGv_i32 ps; \
2567 \
2568 if (unlikely(!ctx->altivec_enabled)) { \
2569 gen_exception(ctx, POWERPC_EXCP_VPU); \
2570 return; \
2571 } \
2572 \
2573 ra = gen_avr_ptr(rA(ctx->opcode)); \
2574 rb = gen_avr_ptr(rB(ctx->opcode)); \
2575 rd = gen_avr_ptr(rD(ctx->opcode)); \
2576 \
2577 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2578 \
2579 gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
2580 }
2581
2582 #define GEN_BCD2(op) \
2583 static void gen_##op(DisasContext *ctx) \
2584 { \
2585 TCGv_ptr rd, rb; \
2586 TCGv_i32 ps; \
2587 \
2588 if (unlikely(!ctx->altivec_enabled)) { \
2589 gen_exception(ctx, POWERPC_EXCP_VPU); \
2590 return; \
2591 } \
2592 \
2593 rb = gen_avr_ptr(rB(ctx->opcode)); \
2594 rd = gen_avr_ptr(rD(ctx->opcode)); \
2595 \
2596 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2597 \
2598 gen_helper_##op(cpu_crf[6], rd, rb, ps); \
2599 }
2600
2601 GEN_BCD(bcdadd)
2602 GEN_BCD(bcdsub)
2603 GEN_BCD2(bcdcfn)
2604 GEN_BCD2(bcdctn)
2605 GEN_BCD2(bcdcfz)
2606 GEN_BCD2(bcdctz)
2607 GEN_BCD2(bcdcfsq)
2608 GEN_BCD2(bcdctsq)
2609 GEN_BCD2(bcdsetsgn)
2610 GEN_BCD(bcdcpsgn);
2611 GEN_BCD(bcds);
2612 GEN_BCD(bcdus);
2613 GEN_BCD(bcdsr);
2614 GEN_BCD(bcdtrunc);
2615 GEN_BCD(bcdutrunc);
2616
2617 static void gen_xpnd04_1(DisasContext *ctx)
2618 {
2619 switch (opc4(ctx->opcode)) {
2620 case 0:
2621 gen_bcdctsq(ctx);
2622 break;
2623 case 2:
2624 gen_bcdcfsq(ctx);
2625 break;
2626 case 4:
2627 gen_bcdctz(ctx);
2628 break;
2629 case 5:
2630 gen_bcdctn(ctx);
2631 break;
2632 case 6:
2633 gen_bcdcfz(ctx);
2634 break;
2635 case 7:
2636 gen_bcdcfn(ctx);
2637 break;
2638 case 31:
2639 gen_bcdsetsgn(ctx);
2640 break;
2641 default:
2642 gen_invalid(ctx);
2643 break;
2644 }
2645 }
2646
2647 static void gen_xpnd04_2(DisasContext *ctx)
2648 {
2649 switch (opc4(ctx->opcode)) {
2650 case 0:
2651 gen_bcdctsq(ctx);
2652 break;
2653 case 2:
2654 gen_bcdcfsq(ctx);
2655 break;
2656 case 4:
2657 gen_bcdctz(ctx);
2658 break;
2659 case 6:
2660 gen_bcdcfz(ctx);
2661 break;
2662 case 7:
2663 gen_bcdcfn(ctx);
2664 break;
2665 case 31:
2666 gen_bcdsetsgn(ctx);
2667 break;
2668 default:
2669 gen_invalid(ctx);
2670 break;
2671 }
2672 }
2673
2674
2675 GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
2676 xpnd04_2, PPC_NONE, PPC2_ISA300)
2677
2678 GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2679 bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2680 GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
2681 bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2682 GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2683 bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2684 GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
2685 bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2686 GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
2687 bcdcpsgn, PPC_NONE, PPC2_ISA300)
2688 GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2689 bcds, PPC_NONE, PPC2_ISA300)
2690 GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2691 bcdus, PPC_NONE, PPC2_ISA300)
2692 GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
2693 bcdtrunc, PPC_NONE, PPC2_ISA300)
2694
2695 static void gen_vsbox(DisasContext *ctx)
2696 {
2697 TCGv_ptr ra, rd;
2698 if (unlikely(!ctx->altivec_enabled)) {
2699 gen_exception(ctx, POWERPC_EXCP_VPU);
2700 return;
2701 }
2702 ra = gen_avr_ptr(rA(ctx->opcode));
2703 rd = gen_avr_ptr(rD(ctx->opcode));
2704 gen_helper_vsbox(rd, ra);
2705 }
2706
2707 GEN_VXFORM(vcipher, 4, 20)
2708 GEN_VXFORM(vcipherlast, 4, 20)
2709 GEN_VXFORM(vncipher, 4, 21)
2710 GEN_VXFORM(vncipherlast, 4, 21)
2711
2712 GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2713 vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2714 GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2715 vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2716
2717 #define VSHASIGMA(op) \
2718 static void gen_##op(DisasContext *ctx) \
2719 { \
2720 TCGv_ptr ra, rd; \
2721 TCGv_i32 st_six; \
2722 if (unlikely(!ctx->altivec_enabled)) { \
2723 gen_exception(ctx, POWERPC_EXCP_VPU); \
2724 return; \
2725 } \
2726 ra = gen_avr_ptr(rA(ctx->opcode)); \
2727 rd = gen_avr_ptr(rD(ctx->opcode)); \
2728 st_six = tcg_const_i32(rB(ctx->opcode)); \
2729 gen_helper_##op(rd, ra, st_six); \
2730 }
2731
2732 VSHASIGMA(vshasigmaw)
2733 VSHASIGMA(vshasigmad)
2734
2735 GEN_VXFORM3(vpermxor, 22, 0xFF)
2736 GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2737 vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2738
2739 static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2740 {
2741 static const GVecGen3 g = {
2742 .fni8 = gen_helper_CFUGED,
2743 .vece = MO_64,
2744 };
2745
2746 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2747 REQUIRE_VECTOR(ctx);
2748
2749 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2750 avr_full_offset(a->vrb), 16, 16, &g);
2751
2752 return true;
2753 }
2754
2755 static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2756 {
2757 static const GVecGen3i g = {
2758 .fni8 = do_cntzdm,
2759 .vece = MO_64,
2760 };
2761
2762 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2763 REQUIRE_VECTOR(ctx);
2764
2765 tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2766 avr_full_offset(a->vrb), 16, 16, false, &g);
2767
2768 return true;
2769 }
2770
2771 static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2772 {
2773 static const GVecGen3i g = {
2774 .fni8 = do_cntzdm,
2775 .vece = MO_64,
2776 };
2777
2778 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2779 REQUIRE_VECTOR(ctx);
2780
2781 tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2782 avr_full_offset(a->vrb), 16, 16, true, &g);
2783
2784 return true;
2785 }
2786
2787 static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2788 {
2789 static const GVecGen3 g = {
2790 .fni8 = gen_helper_PDEPD,
2791 .vece = MO_64,
2792 };
2793
2794 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2795 REQUIRE_VECTOR(ctx);
2796
2797 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2798 avr_full_offset(a->vrb), 16, 16, &g);
2799
2800 return true;
2801 }
2802
2803 static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2804 {
2805 static const GVecGen3 g = {
2806 .fni8 = gen_helper_PEXTD,
2807 .vece = MO_64,
2808 };
2809
2810 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2811 REQUIRE_VECTOR(ctx);
2812
2813 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2814 avr_full_offset(a->vrb), 16, 16, &g);
2815
2816 return true;
2817 }
2818
2819 static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2820 {
2821 TCGv_i64 rl, rh, src1, src2;
2822 int dw;
2823
2824 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2825 REQUIRE_VECTOR(ctx);
2826
2827 rh = tcg_temp_new_i64();
2828 rl = tcg_temp_new_i64();
2829 src1 = tcg_temp_new_i64();
2830 src2 = tcg_temp_new_i64();
2831
2832 get_avr64(rl, a->rc, false);
2833 get_avr64(rh, a->rc, true);
2834
2835 for (dw = 0; dw < 2; dw++) {
2836 get_avr64(src1, a->vra, dw);
2837 get_avr64(src2, a->vrb, dw);
2838 tcg_gen_mulu2_i64(src1, src2, src1, src2);
2839 tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
2840 }
2841
2842 set_avr64(a->vrt, rl, false);
2843 set_avr64(a->vrt, rh, true);
2844 return true;
2845 }
2846
2847 static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
2848 {
2849 TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
2850
2851 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2852 REQUIRE_VECTOR(ctx);
2853
2854 tmp0 = tcg_temp_new_i64();
2855 tmp1 = tcg_temp_new_i64();
2856 prod1h = tcg_temp_new_i64();
2857 prod1l = tcg_temp_new_i64();
2858 prod0h = tcg_temp_new_i64();
2859 prod0l = tcg_temp_new_i64();
2860 zero = tcg_constant_i64(0);
2861
2862 /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
2863 get_avr64(tmp0, a->vra, false);
2864 get_avr64(tmp1, a->vrb, false);
2865 tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
2866
2867 /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
2868 get_avr64(tmp0, a->vra, true);
2869 get_avr64(tmp1, a->vrb, true);
2870 tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
2871
2872 /* Sum lower 64-bits elements */
2873 get_avr64(tmp1, a->rc, false);
2874 tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
2875 tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
2876
2877 /*
2878 * Discard lower 64-bits, leaving the carry into bit 64.
2879 * Then sum the higher 64-bit elements.
2880 */
2881 get_avr64(tmp1, a->rc, true);
2882 tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
2883 tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
2884 tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
2885
2886 /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
2887 set_avr64(a->vrt, tmp0, false);
2888 set_avr64(a->vrt, zero, true);
2889 return true;
2890 }
2891
2892 static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
2893 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
2894 {
2895 TCGv_ptr ra, rb, rd;
2896 REQUIRE_VECTOR(ctx);
2897
2898 ra = gen_avr_ptr(a->vra);
2899 rb = gen_avr_ptr(a->vrb);
2900 rd = gen_avr_ptr(a->vrt);
2901 gen_helper(rd, ra, rb);
2902 return true;
2903 }
2904
2905 TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
2906 TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
2907
2908 TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
2909
2910 TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
2911 TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
2912
2913 static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2914 {
2915 tcg_gen_not_vec(vece, a, a);
2916 tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b);
2917 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2918 }
2919
2920 static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2921 {
2922 tcg_gen_not_i32(a, a);
2923 tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b);
2924 }
2925
2926 static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2927 {
2928 tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b);
2929 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2930 }
2931
2932 static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2933 {
2934 tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b);
2935 }
2936
2937 static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add)
2938 {
2939 static const TCGOpcode vecop_list[] = {
2940 INDEX_op_cmp_vec, 0
2941 };
2942
2943 static const GVecGen3 op[] = {
2944 {
2945 .fniv = gen_VSUBCUW_vec,
2946 .fni4 = gen_VSUBCUW_i32,
2947 .opt_opc = vecop_list,
2948 .vece = MO_32
2949 },
2950 {
2951 .fniv = gen_VADDCUW_vec,
2952 .fni4 = gen_VADDCUW_i32,
2953 .opt_opc = vecop_list,
2954 .vece = MO_32
2955 },
2956 };
2957
2958 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2959 REQUIRE_VECTOR(ctx);
2960
2961 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2962 avr_full_offset(a->vrb), 16, 16, &op[add]);
2963
2964 return true;
2965 }
2966
2967 TRANS(VSUBCUW, do_vx_vaddsubcuw, 0)
2968 TRANS(VADDCUW, do_vx_vaddsubcuw, 1)
2969
2970 static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
2971 void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
2972 {
2973 TCGv_i64 vra, vrb, vrt0, vrt1;
2974 REQUIRE_VECTOR(ctx);
2975
2976 vra = tcg_temp_new_i64();
2977 vrb = tcg_temp_new_i64();
2978 vrt0 = tcg_temp_new_i64();
2979 vrt1 = tcg_temp_new_i64();
2980
2981 get_avr64(vra, a->vra, even);
2982 get_avr64(vrb, a->vrb, even);
2983 gen_mul(vrt0, vrt1, vra, vrb);
2984 set_avr64(a->vrt, vrt0, false);
2985 set_avr64(a->vrt, vrt1, true);
2986 return true;
2987 }
2988
2989 static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
2990 {
2991 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2992 REQUIRE_VECTOR(ctx);
2993
2994 tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
2995 avr_full_offset(a->vrb), 16, 16);
2996
2997 return true;
2998 }
2999
3000 TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
3001 TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
3002 TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
3003 TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
3004 TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
3005 TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
3006 TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
3007 TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
3008 TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
3009 TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
3010 TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
3011 TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
3012 TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
3013 TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
3014 TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
3015 TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
3016
3017 static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3018 {
3019 TCGv_i64 hh, lh, temp;
3020
3021 hh = tcg_temp_new_i64();
3022 lh = tcg_temp_new_i64();
3023 temp = tcg_temp_new_i64();
3024
3025 if (sign) {
3026 tcg_gen_ext32s_i64(lh, a);
3027 tcg_gen_ext32s_i64(temp, b);
3028 } else {
3029 tcg_gen_ext32u_i64(lh, a);
3030 tcg_gen_ext32u_i64(temp, b);
3031 }
3032 tcg_gen_mul_i64(lh, lh, temp);
3033
3034 if (sign) {
3035 tcg_gen_sari_i64(hh, a, 32);
3036 tcg_gen_sari_i64(temp, b, 32);
3037 } else {
3038 tcg_gen_shri_i64(hh, a, 32);
3039 tcg_gen_shri_i64(temp, b, 32);
3040 }
3041 tcg_gen_mul_i64(hh, hh, temp);
3042
3043 tcg_gen_shri_i64(lh, lh, 32);
3044 tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3045 }
3046
3047 static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3048 {
3049 TCGv_i64 tlow;
3050
3051 tlow = tcg_temp_new_i64();
3052 if (sign) {
3053 tcg_gen_muls2_i64(tlow, t, a, b);
3054 } else {
3055 tcg_gen_mulu2_i64(tlow, t, a, b);
3056 }
3057 }
3058
3059 static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3060 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3061 {
3062 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3063 REQUIRE_VECTOR(ctx);
3064
3065 TCGv_i64 vra, vrb, vrt;
3066 int i;
3067
3068 vra = tcg_temp_new_i64();
3069 vrb = tcg_temp_new_i64();
3070 vrt = tcg_temp_new_i64();
3071
3072 for (i = 0; i < 2; i++) {
3073 get_avr64(vra, a->vra, i);
3074 get_avr64(vrb, a->vrb, i);
3075 get_avr64(vrt, a->vrt, i);
3076
3077 func(vrt, vra, vrb, sign);
3078
3079 set_avr64(a->vrt, vrt, i);
3080 }
3081 return true;
3082 }
3083
3084 TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3085 TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3086 TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3087 TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3088
3089 static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
3090 void (*gen_shr_vec)(unsigned, TCGv_vec, TCGv_vec, int64_t))
3091 {
3092 TCGv_vec tmp = tcg_temp_new_vec_matching(t);
3093 tcg_gen_or_vec(vece, tmp, a, b);
3094 tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
3095 gen_shr_vec(vece, a, a, 1);
3096 gen_shr_vec(vece, b, b, 1);
3097 tcg_gen_add_vec(vece, t, a, b);
3098 tcg_gen_add_vec(vece, t, t, tmp);
3099 }
3100
3101 QEMU_FLATTEN
3102 static void gen_vavgu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3103 {
3104 do_vavg(vece, t, a, b, tcg_gen_shri_vec);
3105 }
3106
3107 QEMU_FLATTEN
3108 static void gen_vavgs(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3109 {
3110 do_vavg(vece, t, a, b, tcg_gen_sari_vec);
3111 }
3112
3113 static bool do_vx_vavg(DisasContext *ctx, arg_VX *a, int sign, int vece)
3114 {
3115 static const TCGOpcode vecop_list_s[] = {
3116 INDEX_op_add_vec, INDEX_op_sari_vec, 0
3117 };
3118 static const TCGOpcode vecop_list_u[] = {
3119 INDEX_op_add_vec, INDEX_op_shri_vec, 0
3120 };
3121
3122 static const GVecGen3 op[2][3] = {
3123 {
3124 {
3125 .fniv = gen_vavgu,
3126 .fno = gen_helper_VAVGUB,
3127 .opt_opc = vecop_list_u,
3128 .vece = MO_8
3129 },
3130 {
3131 .fniv = gen_vavgu,
3132 .fno = gen_helper_VAVGUH,
3133 .opt_opc = vecop_list_u,
3134 .vece = MO_16
3135 },
3136 {
3137 .fniv = gen_vavgu,
3138 .fno = gen_helper_VAVGUW,
3139 .opt_opc = vecop_list_u,
3140 .vece = MO_32
3141 },
3142 },
3143 {
3144 {
3145 .fniv = gen_vavgs,
3146 .fno = gen_helper_VAVGSB,
3147 .opt_opc = vecop_list_s,
3148 .vece = MO_8
3149 },
3150 {
3151 .fniv = gen_vavgs,
3152 .fno = gen_helper_VAVGSH,
3153 .opt_opc = vecop_list_s,
3154 .vece = MO_16
3155 },
3156 {
3157 .fniv = gen_vavgs,
3158 .fno = gen_helper_VAVGSW,
3159 .opt_opc = vecop_list_s,
3160 .vece = MO_32
3161 },
3162 },
3163 };
3164
3165 REQUIRE_VECTOR(ctx);
3166
3167 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3168 avr_full_offset(a->vrb), 16, 16, &op[sign][vece]);
3169
3170
3171 return true;
3172 }
3173
3174
3175 TRANS_FLAGS(ALTIVEC, VAVGSB, do_vx_vavg, 1, MO_8)
3176 TRANS_FLAGS(ALTIVEC, VAVGSH, do_vx_vavg, 1, MO_16)
3177 TRANS_FLAGS(ALTIVEC, VAVGSW, do_vx_vavg, 1, MO_32)
3178 TRANS_FLAGS(ALTIVEC, VAVGUB, do_vx_vavg, 0, MO_8)
3179 TRANS_FLAGS(ALTIVEC, VAVGUH, do_vx_vavg, 0, MO_16)
3180 TRANS_FLAGS(ALTIVEC, VAVGUW, do_vx_vavg, 0, MO_32)
3181
3182 static void gen_vabsdu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3183 {
3184 tcg_gen_umax_vec(vece, t, a, b);
3185 tcg_gen_umin_vec(vece, a, a, b);
3186 tcg_gen_sub_vec(vece, t, t, a);
3187 }
3188
3189 static bool do_vabsdu(DisasContext *ctx, arg_VX *a, const int vece)
3190 {
3191 static const TCGOpcode vecop_list[] = {
3192 INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
3193 };
3194
3195 static const GVecGen3 op[] = {
3196 {
3197 .fniv = gen_vabsdu,
3198 .fno = gen_helper_VABSDUB,
3199 .opt_opc = vecop_list,
3200 .vece = MO_8
3201 },
3202 {
3203 .fniv = gen_vabsdu,
3204 .fno = gen_helper_VABSDUH,
3205 .opt_opc = vecop_list,
3206 .vece = MO_16
3207 },
3208 {
3209 .fniv = gen_vabsdu,
3210 .fno = gen_helper_VABSDUW,
3211 .opt_opc = vecop_list,
3212 .vece = MO_32
3213 },
3214 };
3215
3216 REQUIRE_VECTOR(ctx);
3217
3218 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3219 avr_full_offset(a->vrb), 16, 16, &op[vece]);
3220
3221 return true;
3222 }
3223
3224 TRANS_FLAGS2(ISA300, VABSDUB, do_vabsdu, MO_8)
3225 TRANS_FLAGS2(ISA300, VABSDUH, do_vabsdu, MO_16)
3226 TRANS_FLAGS2(ISA300, VABSDUW, do_vabsdu, MO_32)
3227
3228 static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
3229 void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3230 void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3231 {
3232 const GVecGen3 op = {
3233 .fni4 = func_32,
3234 .fni8 = func_64,
3235 .vece = vece
3236 };
3237
3238 REQUIRE_VECTOR(ctx);
3239
3240 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3241 avr_full_offset(a->vrb), 16, 16, &op);
3242
3243 return true;
3244 }
3245
3246 #define DIVU32(NAME, DIV) \
3247 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
3248 { \
3249 TCGv_i32 zero = tcg_constant_i32(0); \
3250 TCGv_i32 one = tcg_constant_i32(1); \
3251 tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b); \
3252 DIV(t, a, b); \
3253 }
3254
3255 #define DIVS32(NAME, DIV) \
3256 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
3257 { \
3258 TCGv_i32 t0 = tcg_temp_new_i32(); \
3259 TCGv_i32 t1 = tcg_temp_new_i32(); \
3260 tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN); \
3261 tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1); \
3262 tcg_gen_and_i32(t0, t0, t1); \
3263 tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0); \
3264 tcg_gen_or_i32(t0, t0, t1); \
3265 tcg_gen_movi_i32(t1, 0); \
3266 tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b); \
3267 DIV(t, a, b); \
3268 }
3269
3270 #define DIVU64(NAME, DIV) \
3271 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
3272 { \
3273 TCGv_i64 zero = tcg_constant_i64(0); \
3274 TCGv_i64 one = tcg_constant_i64(1); \
3275 tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b); \
3276 DIV(t, a, b); \
3277 }
3278
3279 #define DIVS64(NAME, DIV) \
3280 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
3281 { \
3282 TCGv_i64 t0 = tcg_temp_new_i64(); \
3283 TCGv_i64 t1 = tcg_temp_new_i64(); \
3284 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN); \
3285 tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1); \
3286 tcg_gen_and_i64(t0, t0, t1); \
3287 tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0); \
3288 tcg_gen_or_i64(t0, t0, t1); \
3289 tcg_gen_movi_i64(t1, 0); \
3290 tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b); \
3291 DIV(t, a, b); \
3292 }
3293
3294 DIVS32(do_divsw, tcg_gen_div_i32)
3295 DIVU32(do_divuw, tcg_gen_divu_i32)
3296 DIVS64(do_divsd, tcg_gen_div_i64)
3297 DIVU64(do_divud, tcg_gen_divu_i64)
3298
3299 TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
3300 TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
3301 TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
3302 TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
3303 TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
3304 TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
3305
3306 static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3307 {
3308 TCGv_i64 val1, val2;
3309
3310 val1 = tcg_temp_new_i64();
3311 val2 = tcg_temp_new_i64();
3312
3313 tcg_gen_ext_i32_i64(val1, a);
3314 tcg_gen_ext_i32_i64(val2, b);
3315
3316 /* (a << 32)/b */
3317 tcg_gen_shli_i64(val1, val1, 32);
3318 tcg_gen_div_i64(val1, val1, val2);
3319
3320 /* if quotient doesn't fit in 32 bits the result is undefined */
3321 tcg_gen_extrl_i64_i32(t, val1);
3322 }
3323
3324 static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3325 {
3326 TCGv_i64 val1, val2;
3327
3328 val1 = tcg_temp_new_i64();
3329 val2 = tcg_temp_new_i64();
3330
3331 tcg_gen_extu_i32_i64(val1, a);
3332 tcg_gen_extu_i32_i64(val2, b);
3333
3334 /* (a << 32)/b */
3335 tcg_gen_shli_i64(val1, val1, 32);
3336 tcg_gen_divu_i64(val1, val1, val2);
3337
3338 /* if quotient doesn't fit in 32 bits the result is undefined */
3339 tcg_gen_extrl_i64_i32(t, val1);
3340 }
3341
3342 DIVS32(do_divesw, do_dives_i32)
3343 DIVU32(do_diveuw, do_diveu_i32)
3344
3345 DIVS32(do_modsw, tcg_gen_rem_i32)
3346 DIVU32(do_moduw, tcg_gen_remu_i32)
3347 DIVS64(do_modsd, tcg_gen_rem_i64)
3348 DIVU64(do_modud, tcg_gen_remu_i64)
3349
3350 TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
3351 TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
3352 TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
3353 TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
3354 TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
3355 TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
3356
3357 TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
3358 TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
3359 TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
3360 TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
3361 TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3362 TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3363
3364 #undef DIVS32
3365 #undef DIVU32
3366 #undef DIVS64
3367 #undef DIVU64
3368
3369 #undef GEN_VR_LDX
3370 #undef GEN_VR_STX
3371 #undef GEN_VR_LVE
3372 #undef GEN_VR_STVE
3373
3374 #undef GEN_VX_LOGICAL
3375 #undef GEN_VX_LOGICAL_207
3376 #undef GEN_VXFORM
3377 #undef GEN_VXFORM_207
3378 #undef GEN_VXFORM_DUAL
3379 #undef GEN_VXRFORM_DUAL
3380 #undef GEN_VXRFORM1
3381 #undef GEN_VXRFORM
3382 #undef GEN_VXFORM_VSPLTI
3383 #undef GEN_VXFORM_NOA
3384 #undef GEN_VXFORM_UIMM
3385 #undef GEN_VAFORM_PAIRED
3386
3387 #undef GEN_BCD2