]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/translate/fixedpoint-impl.c.inc
Merge tag 'pull-ppc-20220920' of https://gitlab.com/danielhb/qemu into staging
[mirror_qemu.git] / target / ppc / translate / fixedpoint-impl.c.inc
1 /*
2 * Power ISA decode for Fixed-Point Facility instructions
3 *
4 * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 /*
21 * Fixed-Point Load/Store Instructions
22 */
23
24 static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update,
25 bool store, MemOp mop)
26 {
27 TCGv ea;
28
29 if (update && (ra == 0 || (!store && ra == rt))) {
30 gen_invalid(ctx);
31 return true;
32 }
33 gen_set_access_type(ctx, ACCESS_INT);
34
35 ea = do_ea_calc(ctx, ra, displ);
36 mop ^= ctx->default_tcg_memop_mask;
37 if (store) {
38 tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
39 } else {
40 tcg_gen_qemu_ld_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
41 }
42 if (update) {
43 tcg_gen_mov_tl(cpu_gpr[ra], ea);
44 }
45 tcg_temp_free(ea);
46
47 return true;
48 }
49
50 static bool do_ldst_D(DisasContext *ctx, arg_D *a, bool update, bool store,
51 MemOp mop)
52 {
53 return do_ldst(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, mop);
54 }
55
56 static bool do_ldst_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update,
57 bool store, MemOp mop)
58 {
59 arg_D d;
60 if (!resolve_PLS_D(ctx, &d, a)) {
61 return true;
62 }
63 return do_ldst_D(ctx, &d, update, store, mop);
64 }
65
66 static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update,
67 bool store, MemOp mop)
68 {
69 return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop);
70 }
71
72 static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
73 {
74 #if defined(TARGET_PPC64)
75 TCGv ea;
76 TCGv_i64 low_addr_gpr, high_addr_gpr;
77 MemOp mop;
78
79 REQUIRE_INSNS_FLAGS(ctx, 64BX);
80
81 if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) {
82 /* lq and stq were privileged prior to V. 2.07 */
83 REQUIRE_SV(ctx);
84
85 if (ctx->le_mode) {
86 gen_align_no_le(ctx);
87 return true;
88 }
89 }
90
91 if (!store && unlikely(a->ra == a->rt)) {
92 gen_invalid(ctx);
93 return true;
94 }
95
96 gen_set_access_type(ctx, ACCESS_INT);
97 ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si));
98
99 if (prefixed || !ctx->le_mode) {
100 low_addr_gpr = cpu_gpr[a->rt];
101 high_addr_gpr = cpu_gpr[a->rt + 1];
102 } else {
103 low_addr_gpr = cpu_gpr[a->rt + 1];
104 high_addr_gpr = cpu_gpr[a->rt];
105 }
106
107 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
108 if (HAVE_ATOMIC128) {
109 mop = DEF_MEMOP(MO_128);
110 TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx));
111 if (store) {
112 if (ctx->le_mode) {
113 gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr,
114 high_addr_gpr, oi);
115 } else {
116 gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr,
117 low_addr_gpr, oi);
118
119 }
120 } else {
121 if (ctx->le_mode) {
122 gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi);
123 tcg_gen_ld_i64(high_addr_gpr, cpu_env,
124 offsetof(CPUPPCState, retxh));
125 } else {
126 gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi);
127 tcg_gen_ld_i64(low_addr_gpr, cpu_env,
128 offsetof(CPUPPCState, retxh));
129 }
130 }
131 } else {
132 /* Restart with exclusive lock. */
133 gen_helper_exit_atomic(cpu_env);
134 ctx->base.is_jmp = DISAS_NORETURN;
135 }
136 } else {
137 mop = DEF_MEMOP(MO_UQ);
138 if (store) {
139 tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
140 } else {
141 tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
142 }
143
144 gen_addr_add(ctx, ea, ea, 8);
145
146 if (store) {
147 tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
148 } else {
149 tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
150 }
151 }
152 tcg_temp_free(ea);
153 #else
154 qemu_build_not_reached();
155 #endif
156
157 return true;
158 }
159
160 static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
161 {
162 arg_D d;
163 if (!resolve_PLS_D(ctx, &d, a)) {
164 return true;
165 }
166
167 return do_ldst_quad(ctx, &d, store, true);
168 }
169
170 /* Load Byte and Zero */
171 TRANS(LBZ, do_ldst_D, false, false, MO_UB)
172 TRANS(LBZX, do_ldst_X, false, false, MO_UB)
173 TRANS(LBZU, do_ldst_D, true, false, MO_UB)
174 TRANS(LBZUX, do_ldst_X, true, false, MO_UB)
175 TRANS(PLBZ, do_ldst_PLS_D, false, false, MO_UB)
176
177 /* Load Halfword and Zero */
178 TRANS(LHZ, do_ldst_D, false, false, MO_UW)
179 TRANS(LHZX, do_ldst_X, false, false, MO_UW)
180 TRANS(LHZU, do_ldst_D, true, false, MO_UW)
181 TRANS(LHZUX, do_ldst_X, true, false, MO_UW)
182 TRANS(PLHZ, do_ldst_PLS_D, false, false, MO_UW)
183
184 /* Load Halfword Algebraic */
185 TRANS(LHA, do_ldst_D, false, false, MO_SW)
186 TRANS(LHAX, do_ldst_X, false, false, MO_SW)
187 TRANS(LHAU, do_ldst_D, true, false, MO_SW)
188 TRANS(LHAXU, do_ldst_X, true, false, MO_SW)
189 TRANS(PLHA, do_ldst_PLS_D, false, false, MO_SW)
190
191 /* Load Word and Zero */
192 TRANS(LWZ, do_ldst_D, false, false, MO_UL)
193 TRANS(LWZX, do_ldst_X, false, false, MO_UL)
194 TRANS(LWZU, do_ldst_D, true, false, MO_UL)
195 TRANS(LWZUX, do_ldst_X, true, false, MO_UL)
196 TRANS(PLWZ, do_ldst_PLS_D, false, false, MO_UL)
197
198 /* Load Word Algebraic */
199 TRANS64(LWA, do_ldst_D, false, false, MO_SL)
200 TRANS64(LWAX, do_ldst_X, false, false, MO_SL)
201 TRANS64(LWAUX, do_ldst_X, true, false, MO_SL)
202 TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL)
203
204 /* Load Doubleword */
205 TRANS64(LD, do_ldst_D, false, false, MO_UQ)
206 TRANS64(LDX, do_ldst_X, false, false, MO_UQ)
207 TRANS64(LDU, do_ldst_D, true, false, MO_UQ)
208 TRANS64(LDUX, do_ldst_X, true, false, MO_UQ)
209 TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ)
210
211 /* Load Quadword */
212 TRANS64(LQ, do_ldst_quad, false, false);
213 TRANS64(PLQ, do_ldst_quad_PLS_D, false);
214
215 /* Store Byte */
216 TRANS(STB, do_ldst_D, false, true, MO_UB)
217 TRANS(STBX, do_ldst_X, false, true, MO_UB)
218 TRANS(STBU, do_ldst_D, true, true, MO_UB)
219 TRANS(STBUX, do_ldst_X, true, true, MO_UB)
220 TRANS(PSTB, do_ldst_PLS_D, false, true, MO_UB)
221
222 /* Store Halfword */
223 TRANS(STH, do_ldst_D, false, true, MO_UW)
224 TRANS(STHX, do_ldst_X, false, true, MO_UW)
225 TRANS(STHU, do_ldst_D, true, true, MO_UW)
226 TRANS(STHUX, do_ldst_X, true, true, MO_UW)
227 TRANS(PSTH, do_ldst_PLS_D, false, true, MO_UW)
228
229 /* Store Word */
230 TRANS(STW, do_ldst_D, false, true, MO_UL)
231 TRANS(STWX, do_ldst_X, false, true, MO_UL)
232 TRANS(STWU, do_ldst_D, true, true, MO_UL)
233 TRANS(STWUX, do_ldst_X, true, true, MO_UL)
234 TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL)
235
236 /* Store Doubleword */
237 TRANS64(STD, do_ldst_D, false, true, MO_UQ)
238 TRANS64(STDX, do_ldst_X, false, true, MO_UQ)
239 TRANS64(STDU, do_ldst_D, true, true, MO_UQ)
240 TRANS64(STDUX, do_ldst_X, true, true, MO_UQ)
241 TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ)
242
243 /* Store Quadword */
244 TRANS64(STQ, do_ldst_quad, true, false);
245 TRANS64(PSTQ, do_ldst_quad_PLS_D, true);
246
247 /*
248 * Fixed-Point Compare Instructions
249 */
250
251 static bool do_cmp_X(DisasContext *ctx, arg_X_bfl *a, bool s)
252 {
253 if ((ctx->insns_flags & PPC_64B) == 0) {
254 /*
255 * For 32-bit implementations, The Programming Environments Manual says
256 * that "the L field must be cleared, otherwise the instruction form is
257 * invalid." It seems, however, that most 32-bit CPUs ignore invalid
258 * forms (e.g., section "Instruction Formats" of the 405 and 440
259 * manuals, "Integer Compare Instructions" of the 601 manual), with the
260 * notable exception of the e500 and e500mc, where L=1 was reported to
261 * cause an exception.
262 */
263 if (a->l) {
264 if ((ctx->insns_flags2 & PPC2_BOOKE206)) {
265 /*
266 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc),
267 * generate an illegal instruction exception.
268 */
269 return false;
270 } else {
271 qemu_log_mask(LOG_GUEST_ERROR,
272 "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n",
273 s ? "" : "L", ctx->cia);
274 }
275 }
276 gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
277 return true;
278 }
279
280 /* For 64-bit implementations, deal with bit L accordingly. */
281 if (a->l) {
282 gen_op_cmp(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
283 } else {
284 gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
285 }
286 return true;
287 }
288
289 static bool do_cmp_D(DisasContext *ctx, arg_D_bf *a, bool s)
290 {
291 if ((ctx->insns_flags & PPC_64B) == 0) {
292 /*
293 * For 32-bit implementations, The Programming Environments Manual says
294 * that "the L field must be cleared, otherwise the instruction form is
295 * invalid." It seems, however, that most 32-bit CPUs ignore invalid
296 * forms (e.g., section "Instruction Formats" of the 405 and 440
297 * manuals, "Integer Compare Instructions" of the 601 manual), with the
298 * notable exception of the e500 and e500mc, where L=1 was reported to
299 * cause an exception.
300 */
301 if (a->l) {
302 if ((ctx->insns_flags2 & PPC2_BOOKE206)) {
303 /*
304 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc),
305 * generate an illegal instruction exception.
306 */
307 return false;
308 } else {
309 qemu_log_mask(LOG_GUEST_ERROR,
310 "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n",
311 s ? "I" : "LI", ctx->cia);
312 }
313 }
314 gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
315 return true;
316 }
317
318 /* For 64-bit implementations, deal with bit L accordingly. */
319 if (a->l) {
320 gen_op_cmp(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
321 } else {
322 gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
323 }
324 return true;
325 }
326
327 TRANS(CMP, do_cmp_X, true);
328 TRANS(CMPL, do_cmp_X, false);
329 TRANS(CMPI, do_cmp_D, true);
330 TRANS(CMPLI, do_cmp_D, false);
331
332 /*
333 * Fixed-Point Arithmetic Instructions
334 */
335
336 static bool trans_ADDI(DisasContext *ctx, arg_D *a)
337 {
338 if (a->ra) {
339 tcg_gen_addi_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si);
340 } else {
341 tcg_gen_movi_tl(cpu_gpr[a->rt], a->si);
342 }
343 return true;
344 }
345
346 static bool trans_PADDI(DisasContext *ctx, arg_PLS_D *a)
347 {
348 arg_D d;
349 if (!resolve_PLS_D(ctx, &d, a)) {
350 return true;
351 }
352 return trans_ADDI(ctx, &d);
353 }
354
355 static bool trans_ADDIS(DisasContext *ctx, arg_D *a)
356 {
357 a->si <<= 16;
358 return trans_ADDI(ctx, a);
359 }
360
361 static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a)
362 {
363 REQUIRE_INSNS_FLAGS2(ctx, ISA300);
364 tcg_gen_movi_tl(cpu_gpr[a->rt], ctx->base.pc_next + (a->d << 16));
365 return true;
366 }
367
368 static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a)
369 {
370 gen_invalid(ctx);
371 return true;
372 }
373
374 static bool trans_PNOP(DisasContext *ctx, arg_PNOP *a)
375 {
376 return true;
377 }
378
379 static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev)
380 {
381 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
382 uint32_t mask = 0x08 >> (a->bi & 0x03);
383 TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE;
384 TCGv temp = tcg_temp_new();
385
386 tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]);
387 tcg_gen_andi_tl(temp, temp, mask);
388 tcg_gen_setcondi_tl(cond, cpu_gpr[a->rt], temp, 0);
389 if (neg) {
390 tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]);
391 }
392 tcg_temp_free(temp);
393
394 return true;
395 }
396
397 TRANS(SETBC, do_set_bool_cond, false, false)
398 TRANS(SETBCR, do_set_bool_cond, false, true)
399 TRANS(SETNBC, do_set_bool_cond, true, false)
400 TRANS(SETNBCR, do_set_bool_cond, true, true)
401
402 static bool trans_CFUGED(DisasContext *ctx, arg_X *a)
403 {
404 REQUIRE_64BIT(ctx);
405 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
406 #if defined(TARGET_PPC64)
407 gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
408 #else
409 qemu_build_not_reached();
410 #endif
411 return true;
412 }
413
414 static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail)
415 {
416 TCGv_i64 t0, t1;
417
418 t0 = tcg_temp_new_i64();
419 t1 = tcg_temp_new_i64();
420
421 tcg_gen_and_i64(t0, src, mask);
422 if (trail) {
423 tcg_gen_ctzi_i64(t0, t0, -1);
424 } else {
425 tcg_gen_clzi_i64(t0, t0, -1);
426 }
427
428 tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1);
429 tcg_gen_andi_i64(t0, t0, 63);
430 tcg_gen_xori_i64(t0, t0, 63);
431 if (trail) {
432 tcg_gen_shl_i64(t0, mask, t0);
433 tcg_gen_shl_i64(t0, t0, t1);
434 } else {
435 tcg_gen_shr_i64(t0, mask, t0);
436 tcg_gen_shr_i64(t0, t0, t1);
437 }
438
439 tcg_gen_ctpop_i64(dst, t0);
440
441 tcg_temp_free_i64(t0);
442 tcg_temp_free_i64(t1);
443 }
444
445 static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a)
446 {
447 REQUIRE_64BIT(ctx);
448 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
449 #if defined(TARGET_PPC64)
450 do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false);
451 #else
452 qemu_build_not_reached();
453 #endif
454 return true;
455 }
456
457 static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a)
458 {
459 REQUIRE_64BIT(ctx);
460 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
461 #if defined(TARGET_PPC64)
462 do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true);
463 #else
464 qemu_build_not_reached();
465 #endif
466 return true;
467 }
468
469 static bool trans_PDEPD(DisasContext *ctx, arg_X *a)
470 {
471 REQUIRE_64BIT(ctx);
472 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
473 #if defined(TARGET_PPC64)
474 gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
475 #else
476 qemu_build_not_reached();
477 #endif
478 return true;
479 }
480
481 static bool trans_PEXTD(DisasContext *ctx, arg_X *a)
482 {
483 REQUIRE_64BIT(ctx);
484 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
485 #if defined(TARGET_PPC64)
486 gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
487 #else
488 qemu_build_not_reached();
489 #endif
490 return true;
491 }
492
493 static bool trans_ADDG6S(DisasContext *ctx, arg_X *a)
494 {
495 const uint64_t carry_bits = 0x1111111111111111ULL;
496 TCGv t0, t1, carry, zero = tcg_constant_tl(0);
497
498 REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
499
500 t0 = tcg_temp_new();
501 t1 = tcg_const_tl(0);
502 carry = tcg_const_tl(0);
503
504 for (int i = 0; i < 16; i++) {
505 tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4);
506 tcg_gen_andi_tl(t0, t0, 0xf);
507 tcg_gen_add_tl(t1, t1, t0);
508
509 tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4);
510 tcg_gen_andi_tl(t0, t0, 0xf);
511 tcg_gen_add_tl(t1, t1, t0);
512
513 tcg_gen_andi_tl(t1, t1, 0x10);
514 tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero);
515
516 tcg_gen_shli_tl(t0, t1, i * 4);
517 tcg_gen_or_tl(carry, carry, t0);
518 }
519
520 tcg_gen_xori_tl(carry, carry, (target_long)carry_bits);
521 tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6);
522
523 tcg_temp_free(t0);
524 tcg_temp_free(t1);
525 tcg_temp_free(carry);
526
527 return true;
528 }
529
530 static bool trans_CDTBCD(DisasContext *ctx, arg_X_sa *a)
531 {
532 REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
533 gen_helper_CDTBCD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
534 return true;
535 }
536
537 static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a)
538 {
539 REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
540 gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
541 return true;
542 }
543
544 static bool do_hash(DisasContext *ctx, arg_X *a, bool priv,
545 void (*helper)(TCGv_ptr, TCGv, TCGv, TCGv))
546 {
547 TCGv ea;
548
549 if (!(ctx->insns_flags2 & PPC2_ISA310)) {
550 /* if version is before v3.1, this operation is a nop */
551 return true;
552 }
553
554 if (priv) {
555 /* if instruction is privileged but the context is in user space */
556 REQUIRE_SV(ctx);
557 }
558
559 if (unlikely(a->ra == 0)) {
560 /* if RA=0, the instruction form is invalid */
561 gen_invalid(ctx);
562 return true;
563 }
564
565 ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt));
566 helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]);
567
568 tcg_temp_free(ea);
569
570 return true;
571 }
572
573 TRANS(HASHST, do_hash, false, gen_helper_HASHST)
574 TRANS(HASHCHK, do_hash, false, gen_helper_HASHCHK)
575 TRANS(HASHSTP, do_hash, true, gen_helper_HASHSTP)
576 TRANS(HASHCHKP, do_hash, true, gen_helper_HASHCHKP)