]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/translate.c
target/ppc: Fix gen_tlbsx_booke206
[mirror_qemu.git] / target / ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 #include "qemu/main-loop.h"
30 #include "exec/cpu_ldst.h"
31
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34
35 #include "exec/translator.h"
36 #include "exec/log.h"
37 #include "qemu/atomic128.h"
38 #include "spr_common.h"
39 #include "power8-pmu.h"
40
41 #include "qemu/qemu-print.h"
42 #include "qapi/error.h"
43
44 #define CPU_SINGLE_STEP 0x1
45 #define CPU_BRANCH_STEP 0x2
46
47 /* Include definitions for instructions classes and implementations flags */
48 /* #define PPC_DEBUG_DISAS */
49
50 #ifdef PPC_DEBUG_DISAS
51 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
52 #else
53 # define LOG_DISAS(...) do { } while (0)
54 #endif
55 /*****************************************************************************/
56 /* Code translation helpers */
57
58 /* global register indexes */
59 static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */
60 + 10 * 4 + 22 * 5 /* SPE GPRh */
61 + 8 * 5 /* CRF */];
62 static TCGv cpu_gpr[32];
63 static TCGv cpu_gprh[32];
64 static TCGv_i32 cpu_crf[8];
65 static TCGv cpu_nip;
66 static TCGv cpu_msr;
67 static TCGv cpu_ctr;
68 static TCGv cpu_lr;
69 #if defined(TARGET_PPC64)
70 static TCGv cpu_cfar;
71 #endif
72 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
73 static TCGv cpu_reserve;
74 static TCGv cpu_reserve_val;
75 static TCGv cpu_reserve_val2;
76 static TCGv cpu_fpscr;
77 static TCGv_i32 cpu_access_type;
78
79 #include "exec/gen-icount.h"
80
81 void ppc_translate_init(void)
82 {
83 int i;
84 char *p;
85 size_t cpu_reg_names_size;
86
87 p = cpu_reg_names;
88 cpu_reg_names_size = sizeof(cpu_reg_names);
89
90 for (i = 0; i < 8; i++) {
91 snprintf(p, cpu_reg_names_size, "crf%d", i);
92 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
93 offsetof(CPUPPCState, crf[i]), p);
94 p += 5;
95 cpu_reg_names_size -= 5;
96 }
97
98 for (i = 0; i < 32; i++) {
99 snprintf(p, cpu_reg_names_size, "r%d", i);
100 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
101 offsetof(CPUPPCState, gpr[i]), p);
102 p += (i < 10) ? 3 : 4;
103 cpu_reg_names_size -= (i < 10) ? 3 : 4;
104 snprintf(p, cpu_reg_names_size, "r%dH", i);
105 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
106 offsetof(CPUPPCState, gprh[i]), p);
107 p += (i < 10) ? 4 : 5;
108 cpu_reg_names_size -= (i < 10) ? 4 : 5;
109 }
110
111 cpu_nip = tcg_global_mem_new(cpu_env,
112 offsetof(CPUPPCState, nip), "nip");
113
114 cpu_msr = tcg_global_mem_new(cpu_env,
115 offsetof(CPUPPCState, msr), "msr");
116
117 cpu_ctr = tcg_global_mem_new(cpu_env,
118 offsetof(CPUPPCState, ctr), "ctr");
119
120 cpu_lr = tcg_global_mem_new(cpu_env,
121 offsetof(CPUPPCState, lr), "lr");
122
123 #if defined(TARGET_PPC64)
124 cpu_cfar = tcg_global_mem_new(cpu_env,
125 offsetof(CPUPPCState, cfar), "cfar");
126 #endif
127
128 cpu_xer = tcg_global_mem_new(cpu_env,
129 offsetof(CPUPPCState, xer), "xer");
130 cpu_so = tcg_global_mem_new(cpu_env,
131 offsetof(CPUPPCState, so), "SO");
132 cpu_ov = tcg_global_mem_new(cpu_env,
133 offsetof(CPUPPCState, ov), "OV");
134 cpu_ca = tcg_global_mem_new(cpu_env,
135 offsetof(CPUPPCState, ca), "CA");
136 cpu_ov32 = tcg_global_mem_new(cpu_env,
137 offsetof(CPUPPCState, ov32), "OV32");
138 cpu_ca32 = tcg_global_mem_new(cpu_env,
139 offsetof(CPUPPCState, ca32), "CA32");
140
141 cpu_reserve = tcg_global_mem_new(cpu_env,
142 offsetof(CPUPPCState, reserve_addr),
143 "reserve_addr");
144 cpu_reserve_val = tcg_global_mem_new(cpu_env,
145 offsetof(CPUPPCState, reserve_val),
146 "reserve_val");
147 cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
148 offsetof(CPUPPCState, reserve_val2),
149 "reserve_val2");
150
151 cpu_fpscr = tcg_global_mem_new(cpu_env,
152 offsetof(CPUPPCState, fpscr), "fpscr");
153
154 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUPPCState, access_type),
156 "access_type");
157 }
158
159 /* internal defines */
160 struct DisasContext {
161 DisasContextBase base;
162 target_ulong cia; /* current instruction address */
163 uint32_t opcode;
164 /* Routine used to access memory */
165 bool pr, hv, dr, le_mode;
166 bool lazy_tlb_flush;
167 bool need_access_type;
168 int mem_idx;
169 int access_type;
170 /* Translation flags */
171 MemOp default_tcg_memop_mask;
172 #if defined(TARGET_PPC64)
173 bool sf_mode;
174 bool has_cfar;
175 #endif
176 bool fpu_enabled;
177 bool altivec_enabled;
178 bool vsx_enabled;
179 bool spe_enabled;
180 bool tm_enabled;
181 bool gtse;
182 bool hr;
183 bool mmcr0_pmcc0;
184 bool mmcr0_pmcc1;
185 bool mmcr0_pmcjce;
186 bool pmc_other;
187 bool pmu_insn_cnt;
188 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
189 int singlestep_enabled;
190 uint32_t flags;
191 uint64_t insns_flags;
192 uint64_t insns_flags2;
193 };
194
195 #define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */
196 #define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */
197 #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */
198 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */
199
200 /* Return true iff byteswap is needed in a scalar memop */
201 static inline bool need_byteswap(const DisasContext *ctx)
202 {
203 #if TARGET_BIG_ENDIAN
204 return ctx->le_mode;
205 #else
206 return !ctx->le_mode;
207 #endif
208 }
209
210 /* True when active word size < size of target_long. */
211 #ifdef TARGET_PPC64
212 # define NARROW_MODE(C) (!(C)->sf_mode)
213 #else
214 # define NARROW_MODE(C) 0
215 #endif
216
217 struct opc_handler_t {
218 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
219 uint32_t inval1;
220 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
221 uint32_t inval2;
222 /* instruction type */
223 uint64_t type;
224 /* extended instruction type */
225 uint64_t type2;
226 /* handler */
227 void (*handler)(DisasContext *ctx);
228 };
229
230 /* SPR load/store helpers */
231 static inline void gen_load_spr(TCGv t, int reg)
232 {
233 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
234 }
235
236 static inline void gen_store_spr(int reg, TCGv t)
237 {
238 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
239 }
240
241 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
242 {
243 if (ctx->need_access_type && ctx->access_type != access_type) {
244 tcg_gen_movi_i32(cpu_access_type, access_type);
245 ctx->access_type = access_type;
246 }
247 }
248
249 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
250 {
251 if (NARROW_MODE(ctx)) {
252 nip = (uint32_t)nip;
253 }
254 tcg_gen_movi_tl(cpu_nip, nip);
255 }
256
257 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
258 {
259 TCGv_i32 t0, t1;
260
261 /*
262 * These are all synchronous exceptions, we set the PC back to the
263 * faulting instruction
264 */
265 gen_update_nip(ctx, ctx->cia);
266 t0 = tcg_const_i32(excp);
267 t1 = tcg_const_i32(error);
268 gen_helper_raise_exception_err(cpu_env, t0, t1);
269 ctx->base.is_jmp = DISAS_NORETURN;
270 }
271
272 static void gen_exception(DisasContext *ctx, uint32_t excp)
273 {
274 TCGv_i32 t0;
275
276 /*
277 * These are all synchronous exceptions, we set the PC back to the
278 * faulting instruction
279 */
280 gen_update_nip(ctx, ctx->cia);
281 t0 = tcg_const_i32(excp);
282 gen_helper_raise_exception(cpu_env, t0);
283 ctx->base.is_jmp = DISAS_NORETURN;
284 }
285
286 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
287 target_ulong nip)
288 {
289 TCGv_i32 t0;
290
291 gen_update_nip(ctx, nip);
292 t0 = tcg_const_i32(excp);
293 gen_helper_raise_exception(cpu_env, t0);
294 ctx->base.is_jmp = DISAS_NORETURN;
295 }
296
297 static void gen_icount_io_start(DisasContext *ctx)
298 {
299 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
300 gen_io_start();
301 /*
302 * An I/O instruction must be last in the TB.
303 * Chain to the next TB, and let the code from gen_tb_start
304 * decide if we need to return to the main loop.
305 * Doing this first also allows this value to be overridden.
306 */
307 ctx->base.is_jmp = DISAS_TOO_MANY;
308 }
309 }
310
311 #if !defined(CONFIG_USER_ONLY)
312 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
313 {
314 gen_icount_io_start(ctx);
315 gen_helper_ppc_maybe_interrupt(cpu_env);
316 }
317 #endif
318
319 /*
320 * Tells the caller what is the appropriate exception to generate and prepares
321 * SPR registers for this exception.
322 *
323 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
324 * POWERPC_EXCP_DEBUG (on BookE).
325 */
326 static uint32_t gen_prep_dbgex(DisasContext *ctx)
327 {
328 if (ctx->flags & POWERPC_FLAG_DE) {
329 target_ulong dbsr = 0;
330 if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
331 dbsr = DBCR0_ICMP;
332 } else {
333 /* Must have been branch */
334 dbsr = DBCR0_BRT;
335 }
336 TCGv t0 = tcg_temp_new();
337 gen_load_spr(t0, SPR_BOOKE_DBSR);
338 tcg_gen_ori_tl(t0, t0, dbsr);
339 gen_store_spr(SPR_BOOKE_DBSR, t0);
340 return POWERPC_EXCP_DEBUG;
341 } else {
342 return POWERPC_EXCP_TRACE;
343 }
344 }
345
346 static void gen_debug_exception(DisasContext *ctx)
347 {
348 gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
349 ctx->base.is_jmp = DISAS_NORETURN;
350 }
351
352 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
353 {
354 /* Will be converted to program check if needed */
355 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
356 }
357
358 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
359 {
360 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
361 }
362
363 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
364 {
365 /* Will be converted to program check if needed */
366 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
367 }
368
369 /*****************************************************************************/
370 /* SPR READ/WRITE CALLBACKS */
371
372 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
373 {
374 #if 0
375 sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
376 printf("ERROR: try to access SPR %d !\n", sprn);
377 #endif
378 }
379
380 /* #define PPC_DUMP_SPR_ACCESSES */
381
382 /*
383 * Generic callbacks:
384 * do nothing but store/retrieve spr value
385 */
386 static void spr_load_dump_spr(int sprn)
387 {
388 #ifdef PPC_DUMP_SPR_ACCESSES
389 TCGv_i32 t0 = tcg_const_i32(sprn);
390 gen_helper_load_dump_spr(cpu_env, t0);
391 #endif
392 }
393
394 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
395 {
396 gen_load_spr(cpu_gpr[gprn], sprn);
397 spr_load_dump_spr(sprn);
398 }
399
400 static void spr_store_dump_spr(int sprn)
401 {
402 #ifdef PPC_DUMP_SPR_ACCESSES
403 TCGv_i32 t0 = tcg_const_i32(sprn);
404 gen_helper_store_dump_spr(cpu_env, t0);
405 #endif
406 }
407
408 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
409 {
410 gen_store_spr(sprn, cpu_gpr[gprn]);
411 spr_store_dump_spr(sprn);
412 }
413
414 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
415 {
416 spr_write_generic(ctx, sprn, gprn);
417
418 /*
419 * SPR_CTRL writes must force a new translation block,
420 * allowing the PMU to calculate the run latch events with
421 * more accuracy.
422 */
423 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
424 }
425
426 #if !defined(CONFIG_USER_ONLY)
427 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
428 {
429 #ifdef TARGET_PPC64
430 TCGv t0 = tcg_temp_new();
431 tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
432 gen_store_spr(sprn, t0);
433 spr_store_dump_spr(sprn);
434 #else
435 spr_write_generic(ctx, sprn, gprn);
436 #endif
437 }
438
439 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
440 {
441 TCGv t0 = tcg_temp_new();
442 TCGv t1 = tcg_temp_new();
443 gen_load_spr(t0, sprn);
444 tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
445 tcg_gen_and_tl(t0, t0, t1);
446 gen_store_spr(sprn, t0);
447 }
448
449 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
450 {
451 }
452
453 #endif
454
455 /* SPR common to all PowerPC */
456 /* XER */
457 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
458 {
459 TCGv dst = cpu_gpr[gprn];
460 TCGv t0 = tcg_temp_new();
461 TCGv t1 = tcg_temp_new();
462 TCGv t2 = tcg_temp_new();
463 tcg_gen_mov_tl(dst, cpu_xer);
464 tcg_gen_shli_tl(t0, cpu_so, XER_SO);
465 tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
466 tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
467 tcg_gen_or_tl(t0, t0, t1);
468 tcg_gen_or_tl(dst, dst, t2);
469 tcg_gen_or_tl(dst, dst, t0);
470 if (is_isa300(ctx)) {
471 tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
472 tcg_gen_or_tl(dst, dst, t0);
473 tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
474 tcg_gen_or_tl(dst, dst, t0);
475 }
476 }
477
478 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
479 {
480 TCGv src = cpu_gpr[gprn];
481 /* Write all flags, while reading back check for isa300 */
482 tcg_gen_andi_tl(cpu_xer, src,
483 ~((1u << XER_SO) |
484 (1u << XER_OV) | (1u << XER_OV32) |
485 (1u << XER_CA) | (1u << XER_CA32)));
486 tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
487 tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
488 tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
489 tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
490 tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
491 }
492
493 /* LR */
494 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
495 {
496 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
497 }
498
499 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
500 {
501 tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
502 }
503
504 /* CFAR */
505 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
506 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
507 {
508 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
509 }
510
511 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
512 {
513 tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
514 }
515 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
516
517 /* CTR */
518 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
519 {
520 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
521 }
522
523 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
524 {
525 tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
526 }
527
528 /* User read access to SPR */
529 /* USPRx */
530 /* UMMCRx */
531 /* UPMCx */
532 /* USIA */
533 /* UDECR */
534 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
535 {
536 gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
537 }
538
539 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
540 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
541 {
542 gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
543 }
544 #endif
545
546 /* SPR common to all non-embedded PowerPC */
547 /* DECR */
548 #if !defined(CONFIG_USER_ONLY)
549 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
550 {
551 gen_icount_io_start(ctx);
552 gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
553 }
554
555 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
556 {
557 gen_icount_io_start(ctx);
558 gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
559 }
560 #endif
561
562 /* SPR common to all non-embedded PowerPC, except 601 */
563 /* Time base */
564 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
565 {
566 gen_icount_io_start(ctx);
567 gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
568 }
569
570 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
571 {
572 gen_icount_io_start(ctx);
573 gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
574 }
575
576 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
577 {
578 gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
579 }
580
581 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
582 {
583 gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
584 }
585
586 #if !defined(CONFIG_USER_ONLY)
587 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
588 {
589 gen_icount_io_start(ctx);
590 gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
591 }
592
593 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
594 {
595 gen_icount_io_start(ctx);
596 gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
597 }
598
599 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
600 {
601 gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
602 }
603
604 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
605 {
606 gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
607 }
608
609 #if defined(TARGET_PPC64)
610 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
611 {
612 gen_icount_io_start(ctx);
613 gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
614 }
615
616 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
617 {
618 gen_icount_io_start(ctx);
619 gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
620 }
621
622 /* HDECR */
623 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
624 {
625 gen_icount_io_start(ctx);
626 gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
627 }
628
629 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
630 {
631 gen_icount_io_start(ctx);
632 gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
633 }
634
635 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
636 {
637 gen_icount_io_start(ctx);
638 gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
639 }
640
641 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
642 {
643 gen_icount_io_start(ctx);
644 gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
645 }
646
647 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
648 {
649 gen_icount_io_start(ctx);
650 gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
651 }
652
653 #endif
654 #endif
655
656 #if !defined(CONFIG_USER_ONLY)
657 /* IBAT0U...IBAT0U */
658 /* IBAT0L...IBAT7L */
659 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
660 {
661 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
662 offsetof(CPUPPCState,
663 IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
664 }
665
666 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
667 {
668 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
669 offsetof(CPUPPCState,
670 IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
671 }
672
673 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
674 {
675 TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
676 gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
677 }
678
679 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
680 {
681 TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
682 gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
683 }
684
685 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
686 {
687 TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
688 gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
689 }
690
691 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
692 {
693 TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
694 gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
695 }
696
697 /* DBAT0U...DBAT7U */
698 /* DBAT0L...DBAT7L */
699 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
700 {
701 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
702 offsetof(CPUPPCState,
703 DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
704 }
705
706 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
707 {
708 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
709 offsetof(CPUPPCState,
710 DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
711 }
712
713 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
714 {
715 TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
716 gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
717 }
718
719 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
720 {
721 TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
722 gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
723 }
724
725 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
726 {
727 TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
728 gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
729 }
730
731 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
732 {
733 TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
734 gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
735 }
736
737 /* SDR1 */
738 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
739 {
740 gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
741 }
742
743 #if defined(TARGET_PPC64)
744 /* 64 bits PowerPC specific SPRs */
745 /* PIDR */
746 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
747 {
748 gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
749 }
750
751 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
752 {
753 gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
754 }
755
756 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
757 {
758 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
759 }
760
761 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
762 {
763 TCGv t0 = tcg_temp_new();
764 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
765 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
766 }
767 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
768 {
769 gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
770 }
771
772 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
773 {
774 gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
775 }
776
777 /* DPDES */
778 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
779 {
780 gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
781 }
782
783 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
784 {
785 gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
786 }
787 #endif
788 #endif
789
790 /* PowerPC 40x specific registers */
791 #if !defined(CONFIG_USER_ONLY)
792 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
793 {
794 gen_icount_io_start(ctx);
795 gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
796 }
797
798 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
799 {
800 gen_icount_io_start(ctx);
801 gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
802 }
803
804 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
805 {
806 gen_icount_io_start(ctx);
807 gen_store_spr(sprn, cpu_gpr[gprn]);
808 gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
809 /* We must stop translation as we may have rebooted */
810 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
811 }
812
813 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
814 {
815 gen_icount_io_start(ctx);
816 gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
817 }
818
819 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
820 {
821 gen_icount_io_start(ctx);
822 gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
823 }
824
825 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
826 {
827 gen_icount_io_start(ctx);
828 gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
829 }
830
831 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
832 {
833 TCGv t0 = tcg_temp_new();
834 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
835 gen_helper_store_40x_pid(cpu_env, t0);
836 }
837
838 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
839 {
840 gen_icount_io_start(ctx);
841 gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
842 }
843
844 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
845 {
846 gen_icount_io_start(ctx);
847 gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
848 }
849 #endif
850
851 /* PIR */
852 #if !defined(CONFIG_USER_ONLY)
853 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
854 {
855 TCGv t0 = tcg_temp_new();
856 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
857 gen_store_spr(SPR_PIR, t0);
858 }
859 #endif
860
861 /* SPE specific registers */
862 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
863 {
864 TCGv_i32 t0 = tcg_temp_new_i32();
865 tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
866 tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
867 }
868
869 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
870 {
871 TCGv_i32 t0 = tcg_temp_new_i32();
872 tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
873 tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
874 }
875
876 #if !defined(CONFIG_USER_ONLY)
877 /* Callback used to write the exception vector base */
878 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
879 {
880 TCGv t0 = tcg_temp_new();
881 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
882 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
883 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
884 gen_store_spr(sprn, t0);
885 }
886
887 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
888 {
889 int sprn_offs;
890
891 if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
892 sprn_offs = sprn - SPR_BOOKE_IVOR0;
893 } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
894 sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
895 } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
896 sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
897 } else {
898 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
899 " vector 0x%03x\n", sprn);
900 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
901 return;
902 }
903
904 TCGv t0 = tcg_temp_new();
905 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
906 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
907 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
908 gen_store_spr(sprn, t0);
909 }
910 #endif
911
912 #ifdef TARGET_PPC64
913 #ifndef CONFIG_USER_ONLY
914 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
915 {
916 TCGv t0 = tcg_temp_new();
917 TCGv t1 = tcg_temp_new();
918 TCGv t2 = tcg_temp_new();
919
920 /*
921 * Note, the HV=1 PR=0 case is handled earlier by simply using
922 * spr_write_generic for HV mode in the SPR table
923 */
924
925 /* Build insertion mask into t1 based on context */
926 if (ctx->pr) {
927 gen_load_spr(t1, SPR_UAMOR);
928 } else {
929 gen_load_spr(t1, SPR_AMOR);
930 }
931
932 /* Mask new bits into t2 */
933 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
934
935 /* Load AMR and clear new bits in t0 */
936 gen_load_spr(t0, SPR_AMR);
937 tcg_gen_andc_tl(t0, t0, t1);
938
939 /* Or'in new bits and write it out */
940 tcg_gen_or_tl(t0, t0, t2);
941 gen_store_spr(SPR_AMR, t0);
942 spr_store_dump_spr(SPR_AMR);
943 }
944
945 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
946 {
947 TCGv t0 = tcg_temp_new();
948 TCGv t1 = tcg_temp_new();
949 TCGv t2 = tcg_temp_new();
950
951 /*
952 * Note, the HV=1 case is handled earlier by simply using
953 * spr_write_generic for HV mode in the SPR table
954 */
955
956 /* Build insertion mask into t1 based on context */
957 gen_load_spr(t1, SPR_AMOR);
958
959 /* Mask new bits into t2 */
960 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
961
962 /* Load AMR and clear new bits in t0 */
963 gen_load_spr(t0, SPR_UAMOR);
964 tcg_gen_andc_tl(t0, t0, t1);
965
966 /* Or'in new bits and write it out */
967 tcg_gen_or_tl(t0, t0, t2);
968 gen_store_spr(SPR_UAMOR, t0);
969 spr_store_dump_spr(SPR_UAMOR);
970 }
971
972 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
973 {
974 TCGv t0 = tcg_temp_new();
975 TCGv t1 = tcg_temp_new();
976 TCGv t2 = tcg_temp_new();
977
978 /*
979 * Note, the HV=1 case is handled earlier by simply using
980 * spr_write_generic for HV mode in the SPR table
981 */
982
983 /* Build insertion mask into t1 based on context */
984 gen_load_spr(t1, SPR_AMOR);
985
986 /* Mask new bits into t2 */
987 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
988
989 /* Load AMR and clear new bits in t0 */
990 gen_load_spr(t0, SPR_IAMR);
991 tcg_gen_andc_tl(t0, t0, t1);
992
993 /* Or'in new bits and write it out */
994 tcg_gen_or_tl(t0, t0, t2);
995 gen_store_spr(SPR_IAMR, t0);
996 spr_store_dump_spr(SPR_IAMR);
997 }
998 #endif
999 #endif
1000
1001 #ifndef CONFIG_USER_ONLY
1002 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1003 {
1004 gen_helper_fixup_thrm(cpu_env);
1005 gen_load_spr(cpu_gpr[gprn], sprn);
1006 spr_load_dump_spr(sprn);
1007 }
1008 #endif /* !CONFIG_USER_ONLY */
1009
1010 #if !defined(CONFIG_USER_ONLY)
1011 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1012 {
1013 TCGv t0 = tcg_temp_new();
1014
1015 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1016 gen_store_spr(sprn, t0);
1017 }
1018
1019 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1020 {
1021 TCGv t0 = tcg_temp_new();
1022
1023 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1024 gen_store_spr(sprn, t0);
1025 }
1026
1027 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1028 {
1029 TCGv t0 = tcg_temp_new();
1030
1031 tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1032 ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1033 gen_store_spr(sprn, t0);
1034 }
1035
1036 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1037 {
1038 gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
1039 }
1040
1041 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1042 {
1043 TCGv_i32 t0 = tcg_const_i32(sprn);
1044 gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
1045 }
1046 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1047 {
1048 gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
1049 }
1050 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1051 {
1052 gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
1053 }
1054
1055 #endif
1056
1057 #if !defined(CONFIG_USER_ONLY)
1058 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1059 {
1060 TCGv val = tcg_temp_new();
1061 tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1062 gen_store_spr(SPR_BOOKE_MAS3, val);
1063 tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1064 gen_store_spr(SPR_BOOKE_MAS7, val);
1065 }
1066
1067 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1068 {
1069 TCGv mas7 = tcg_temp_new();
1070 TCGv mas3 = tcg_temp_new();
1071 gen_load_spr(mas7, SPR_BOOKE_MAS7);
1072 tcg_gen_shli_tl(mas7, mas7, 32);
1073 gen_load_spr(mas3, SPR_BOOKE_MAS3);
1074 tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1075 }
1076
1077 #endif
1078
1079 #ifdef TARGET_PPC64
1080 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1081 int bit, int sprn, int cause)
1082 {
1083 TCGv_i32 t1 = tcg_const_i32(bit);
1084 TCGv_i32 t2 = tcg_const_i32(sprn);
1085 TCGv_i32 t3 = tcg_const_i32(cause);
1086
1087 gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
1088 }
1089
1090 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1091 int bit, int sprn, int cause)
1092 {
1093 TCGv_i32 t1 = tcg_const_i32(bit);
1094 TCGv_i32 t2 = tcg_const_i32(sprn);
1095 TCGv_i32 t3 = tcg_const_i32(cause);
1096
1097 gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
1098 }
1099
1100 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1101 {
1102 TCGv spr_up = tcg_temp_new();
1103 TCGv spr = tcg_temp_new();
1104
1105 gen_load_spr(spr, sprn - 1);
1106 tcg_gen_shri_tl(spr_up, spr, 32);
1107 tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1108 }
1109
1110 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1111 {
1112 TCGv spr = tcg_temp_new();
1113
1114 gen_load_spr(spr, sprn - 1);
1115 tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1116 gen_store_spr(sprn - 1, spr);
1117 }
1118
1119 #if !defined(CONFIG_USER_ONLY)
1120 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1121 {
1122 TCGv hmer = tcg_temp_new();
1123
1124 gen_load_spr(hmer, sprn);
1125 tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1126 gen_store_spr(sprn, hmer);
1127 spr_store_dump_spr(sprn);
1128 }
1129
1130 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1131 {
1132 gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
1133 }
1134 #endif /* !defined(CONFIG_USER_ONLY) */
1135
1136 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1137 {
1138 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1139 spr_read_generic(ctx, gprn, sprn);
1140 }
1141
1142 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1143 {
1144 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1145 spr_write_generic(ctx, sprn, gprn);
1146 }
1147
1148 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1149 {
1150 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1151 spr_read_generic(ctx, gprn, sprn);
1152 }
1153
1154 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1155 {
1156 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1157 spr_write_generic(ctx, sprn, gprn);
1158 }
1159
1160 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1161 {
1162 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1163 spr_read_prev_upper32(ctx, gprn, sprn);
1164 }
1165
1166 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1167 {
1168 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1169 spr_write_prev_upper32(ctx, sprn, gprn);
1170 }
1171
1172 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1173 {
1174 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1175 spr_read_generic(ctx, gprn, sprn);
1176 }
1177
1178 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1179 {
1180 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1181 spr_write_generic(ctx, sprn, gprn);
1182 }
1183
1184 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1185 {
1186 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1187 spr_read_prev_upper32(ctx, gprn, sprn);
1188 }
1189
1190 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1191 {
1192 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1193 spr_write_prev_upper32(ctx, sprn, gprn);
1194 }
1195
1196 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1197 {
1198 TCGv t0 = tcg_temp_new();
1199
1200 /*
1201 * Access to the (H)DEXCR in problem state is done using separated
1202 * SPR indexes which are 16 below the SPR indexes which have full
1203 * access to the (H)DEXCR in privileged state. Problem state can
1204 * only read bits 32:63, bits 0:31 return 0.
1205 *
1206 * See section 9.3.1-9.3.2 of PowerISA v3.1B
1207 */
1208
1209 gen_load_spr(t0, sprn + 16);
1210 tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1211 }
1212 #endif
1213
1214 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
1215 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1216
1217 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
1218 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1219
1220 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
1221 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1222
1223 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
1224 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1225
1226 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
1227 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1228
1229 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1230 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1231
1232 typedef struct opcode_t {
1233 unsigned char opc1, opc2, opc3, opc4;
1234 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1235 unsigned char pad[4];
1236 #endif
1237 opc_handler_t handler;
1238 const char *oname;
1239 } opcode_t;
1240
1241 static void gen_priv_opc(DisasContext *ctx)
1242 {
1243 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1244 }
1245
1246 /* Helpers for priv. check */
1247 #define GEN_PRIV(CTX) \
1248 do { \
1249 gen_priv_opc(CTX); return; \
1250 } while (0)
1251
1252 #if defined(CONFIG_USER_ONLY)
1253 #define CHK_HV(CTX) GEN_PRIV(CTX)
1254 #define CHK_SV(CTX) GEN_PRIV(CTX)
1255 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1256 #else
1257 #define CHK_HV(CTX) \
1258 do { \
1259 if (unlikely(ctx->pr || !ctx->hv)) {\
1260 GEN_PRIV(CTX); \
1261 } \
1262 } while (0)
1263 #define CHK_SV(CTX) \
1264 do { \
1265 if (unlikely(ctx->pr)) { \
1266 GEN_PRIV(CTX); \
1267 } \
1268 } while (0)
1269 #define CHK_HVRM(CTX) \
1270 do { \
1271 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1272 GEN_PRIV(CTX); \
1273 } \
1274 } while (0)
1275 #endif
1276
1277 #define CHK_NONE(CTX)
1278
1279 /*****************************************************************************/
1280 /* PowerPC instructions table */
1281
1282 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
1283 { \
1284 .opc1 = op1, \
1285 .opc2 = op2, \
1286 .opc3 = op3, \
1287 .opc4 = 0xff, \
1288 .handler = { \
1289 .inval1 = invl, \
1290 .type = _typ, \
1291 .type2 = _typ2, \
1292 .handler = &gen_##name, \
1293 }, \
1294 .oname = stringify(name), \
1295 }
1296 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
1297 { \
1298 .opc1 = op1, \
1299 .opc2 = op2, \
1300 .opc3 = op3, \
1301 .opc4 = 0xff, \
1302 .handler = { \
1303 .inval1 = invl1, \
1304 .inval2 = invl2, \
1305 .type = _typ, \
1306 .type2 = _typ2, \
1307 .handler = &gen_##name, \
1308 }, \
1309 .oname = stringify(name), \
1310 }
1311 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
1312 { \
1313 .opc1 = op1, \
1314 .opc2 = op2, \
1315 .opc3 = op3, \
1316 .opc4 = 0xff, \
1317 .handler = { \
1318 .inval1 = invl, \
1319 .type = _typ, \
1320 .type2 = _typ2, \
1321 .handler = &gen_##name, \
1322 }, \
1323 .oname = onam, \
1324 }
1325 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
1326 { \
1327 .opc1 = op1, \
1328 .opc2 = op2, \
1329 .opc3 = op3, \
1330 .opc4 = op4, \
1331 .handler = { \
1332 .inval1 = invl, \
1333 .type = _typ, \
1334 .type2 = _typ2, \
1335 .handler = &gen_##name, \
1336 }, \
1337 .oname = stringify(name), \
1338 }
1339 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
1340 { \
1341 .opc1 = op1, \
1342 .opc2 = op2, \
1343 .opc3 = op3, \
1344 .opc4 = op4, \
1345 .handler = { \
1346 .inval1 = invl, \
1347 .type = _typ, \
1348 .type2 = _typ2, \
1349 .handler = &gen_##name, \
1350 }, \
1351 .oname = onam, \
1352 }
1353
1354 /* Invalid instruction */
1355 static void gen_invalid(DisasContext *ctx)
1356 {
1357 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1358 }
1359
1360 static opc_handler_t invalid_handler = {
1361 .inval1 = 0xFFFFFFFF,
1362 .inval2 = 0xFFFFFFFF,
1363 .type = PPC_NONE,
1364 .type2 = PPC_NONE,
1365 .handler = gen_invalid,
1366 };
1367
1368 /*** Integer comparison ***/
1369
1370 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1371 {
1372 TCGv t0 = tcg_temp_new();
1373 TCGv t1 = tcg_temp_new();
1374 TCGv_i32 t = tcg_temp_new_i32();
1375
1376 tcg_gen_movi_tl(t0, CRF_EQ);
1377 tcg_gen_movi_tl(t1, CRF_LT);
1378 tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1379 t0, arg0, arg1, t1, t0);
1380 tcg_gen_movi_tl(t1, CRF_GT);
1381 tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1382 t0, arg0, arg1, t1, t0);
1383
1384 tcg_gen_trunc_tl_i32(t, t0);
1385 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1386 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1387 }
1388
1389 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1390 {
1391 TCGv t0 = tcg_const_tl(arg1);
1392 gen_op_cmp(arg0, t0, s, crf);
1393 }
1394
1395 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1396 {
1397 TCGv t0, t1;
1398 t0 = tcg_temp_new();
1399 t1 = tcg_temp_new();
1400 if (s) {
1401 tcg_gen_ext32s_tl(t0, arg0);
1402 tcg_gen_ext32s_tl(t1, arg1);
1403 } else {
1404 tcg_gen_ext32u_tl(t0, arg0);
1405 tcg_gen_ext32u_tl(t1, arg1);
1406 }
1407 gen_op_cmp(t0, t1, s, crf);
1408 }
1409
1410 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1411 {
1412 TCGv t0 = tcg_const_tl(arg1);
1413 gen_op_cmp32(arg0, t0, s, crf);
1414 }
1415
1416 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1417 {
1418 if (NARROW_MODE(ctx)) {
1419 gen_op_cmpi32(reg, 0, 1, 0);
1420 } else {
1421 gen_op_cmpi(reg, 0, 1, 0);
1422 }
1423 }
1424
1425 /* cmprb - range comparison: isupper, isaplha, islower*/
1426 static void gen_cmprb(DisasContext *ctx)
1427 {
1428 TCGv_i32 src1 = tcg_temp_new_i32();
1429 TCGv_i32 src2 = tcg_temp_new_i32();
1430 TCGv_i32 src2lo = tcg_temp_new_i32();
1431 TCGv_i32 src2hi = tcg_temp_new_i32();
1432 TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1433
1434 tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1435 tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1436
1437 tcg_gen_andi_i32(src1, src1, 0xFF);
1438 tcg_gen_ext8u_i32(src2lo, src2);
1439 tcg_gen_shri_i32(src2, src2, 8);
1440 tcg_gen_ext8u_i32(src2hi, src2);
1441
1442 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1443 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1444 tcg_gen_and_i32(crf, src2lo, src2hi);
1445
1446 if (ctx->opcode & 0x00200000) {
1447 tcg_gen_shri_i32(src2, src2, 8);
1448 tcg_gen_ext8u_i32(src2lo, src2);
1449 tcg_gen_shri_i32(src2, src2, 8);
1450 tcg_gen_ext8u_i32(src2hi, src2);
1451 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1452 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1453 tcg_gen_and_i32(src2lo, src2lo, src2hi);
1454 tcg_gen_or_i32(crf, crf, src2lo);
1455 }
1456 tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1457 }
1458
1459 #if defined(TARGET_PPC64)
1460 /* cmpeqb */
1461 static void gen_cmpeqb(DisasContext *ctx)
1462 {
1463 gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1464 cpu_gpr[rB(ctx->opcode)]);
1465 }
1466 #endif
1467
1468 /* isel (PowerPC 2.03 specification) */
1469 static void gen_isel(DisasContext *ctx)
1470 {
1471 uint32_t bi = rC(ctx->opcode);
1472 uint32_t mask = 0x08 >> (bi & 0x03);
1473 TCGv t0 = tcg_temp_new();
1474 TCGv zr;
1475
1476 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1477 tcg_gen_andi_tl(t0, t0, mask);
1478
1479 zr = tcg_const_tl(0);
1480 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1481 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1482 cpu_gpr[rB(ctx->opcode)]);
1483 }
1484
1485 /* cmpb: PowerPC 2.05 specification */
1486 static void gen_cmpb(DisasContext *ctx)
1487 {
1488 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1489 cpu_gpr[rB(ctx->opcode)]);
1490 }
1491
1492 /*** Integer arithmetic ***/
1493
1494 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1495 TCGv arg1, TCGv arg2, int sub)
1496 {
1497 TCGv t0 = tcg_temp_new();
1498
1499 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1500 tcg_gen_xor_tl(t0, arg1, arg2);
1501 if (sub) {
1502 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1503 } else {
1504 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1505 }
1506 if (NARROW_MODE(ctx)) {
1507 tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1508 if (is_isa300(ctx)) {
1509 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1510 }
1511 } else {
1512 if (is_isa300(ctx)) {
1513 tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1514 }
1515 tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1516 }
1517 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1518 }
1519
1520 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1521 TCGv res, TCGv arg0, TCGv arg1,
1522 TCGv ca32, int sub)
1523 {
1524 TCGv t0;
1525
1526 if (!is_isa300(ctx)) {
1527 return;
1528 }
1529
1530 t0 = tcg_temp_new();
1531 if (sub) {
1532 tcg_gen_eqv_tl(t0, arg0, arg1);
1533 } else {
1534 tcg_gen_xor_tl(t0, arg0, arg1);
1535 }
1536 tcg_gen_xor_tl(t0, t0, res);
1537 tcg_gen_extract_tl(ca32, t0, 32, 1);
1538 }
1539
1540 /* Common add function */
1541 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1542 TCGv arg2, TCGv ca, TCGv ca32,
1543 bool add_ca, bool compute_ca,
1544 bool compute_ov, bool compute_rc0)
1545 {
1546 TCGv t0 = ret;
1547
1548 if (compute_ca || compute_ov) {
1549 t0 = tcg_temp_new();
1550 }
1551
1552 if (compute_ca) {
1553 if (NARROW_MODE(ctx)) {
1554 /*
1555 * Caution: a non-obvious corner case of the spec is that
1556 * we must produce the *entire* 64-bit addition, but
1557 * produce the carry into bit 32.
1558 */
1559 TCGv t1 = tcg_temp_new();
1560 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
1561 tcg_gen_add_tl(t0, arg1, arg2);
1562 if (add_ca) {
1563 tcg_gen_add_tl(t0, t0, ca);
1564 }
1565 tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */
1566 tcg_gen_extract_tl(ca, ca, 32, 1);
1567 if (is_isa300(ctx)) {
1568 tcg_gen_mov_tl(ca32, ca);
1569 }
1570 } else {
1571 TCGv zero = tcg_const_tl(0);
1572 if (add_ca) {
1573 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1574 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1575 } else {
1576 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1577 }
1578 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1579 }
1580 } else {
1581 tcg_gen_add_tl(t0, arg1, arg2);
1582 if (add_ca) {
1583 tcg_gen_add_tl(t0, t0, ca);
1584 }
1585 }
1586
1587 if (compute_ov) {
1588 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1589 }
1590 if (unlikely(compute_rc0)) {
1591 gen_set_Rc0(ctx, t0);
1592 }
1593
1594 if (t0 != ret) {
1595 tcg_gen_mov_tl(ret, t0);
1596 }
1597 }
1598 /* Add functions with two operands */
1599 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \
1600 static void glue(gen_, name)(DisasContext *ctx) \
1601 { \
1602 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
1603 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1604 ca, glue(ca, 32), \
1605 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1606 }
1607 /* Add functions with one operand and one immediate */
1608 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \
1609 add_ca, compute_ca, compute_ov) \
1610 static void glue(gen_, name)(DisasContext *ctx) \
1611 { \
1612 TCGv t0 = tcg_const_tl(const_val); \
1613 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
1614 cpu_gpr[rA(ctx->opcode)], t0, \
1615 ca, glue(ca, 32), \
1616 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1617 }
1618
1619 /* add add. addo addo. */
1620 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1621 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1622 /* addc addc. addco addco. */
1623 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1624 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1625 /* adde adde. addeo addeo. */
1626 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1627 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1628 /* addme addme. addmeo addmeo. */
1629 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1630 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1631 /* addex */
1632 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1633 /* addze addze. addzeo addzeo.*/
1634 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1635 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1636 /* addic addic.*/
1637 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1638 {
1639 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1640 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1641 c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1642 }
1643
1644 static void gen_addic(DisasContext *ctx)
1645 {
1646 gen_op_addic(ctx, 0);
1647 }
1648
1649 static void gen_addic_(DisasContext *ctx)
1650 {
1651 gen_op_addic(ctx, 1);
1652 }
1653
1654 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1655 TCGv arg2, int sign, int compute_ov)
1656 {
1657 TCGv_i32 t0 = tcg_temp_new_i32();
1658 TCGv_i32 t1 = tcg_temp_new_i32();
1659 TCGv_i32 t2 = tcg_temp_new_i32();
1660 TCGv_i32 t3 = tcg_temp_new_i32();
1661
1662 tcg_gen_trunc_tl_i32(t0, arg1);
1663 tcg_gen_trunc_tl_i32(t1, arg2);
1664 if (sign) {
1665 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1666 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1667 tcg_gen_and_i32(t2, t2, t3);
1668 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1669 tcg_gen_or_i32(t2, t2, t3);
1670 tcg_gen_movi_i32(t3, 0);
1671 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1672 tcg_gen_div_i32(t3, t0, t1);
1673 tcg_gen_extu_i32_tl(ret, t3);
1674 } else {
1675 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1676 tcg_gen_movi_i32(t3, 0);
1677 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1678 tcg_gen_divu_i32(t3, t0, t1);
1679 tcg_gen_extu_i32_tl(ret, t3);
1680 }
1681 if (compute_ov) {
1682 tcg_gen_extu_i32_tl(cpu_ov, t2);
1683 if (is_isa300(ctx)) {
1684 tcg_gen_extu_i32_tl(cpu_ov32, t2);
1685 }
1686 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1687 }
1688
1689 if (unlikely(Rc(ctx->opcode) != 0)) {
1690 gen_set_Rc0(ctx, ret);
1691 }
1692 }
1693 /* Div functions */
1694 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1695 static void glue(gen_, name)(DisasContext *ctx) \
1696 { \
1697 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1698 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1699 sign, compute_ov); \
1700 }
1701 /* divwu divwu. divwuo divwuo. */
1702 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1703 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1704 /* divw divw. divwo divwo. */
1705 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1706 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1707
1708 /* div[wd]eu[o][.] */
1709 #define GEN_DIVE(name, hlpr, compute_ov) \
1710 static void gen_##name(DisasContext *ctx) \
1711 { \
1712 TCGv_i32 t0 = tcg_const_i32(compute_ov); \
1713 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1714 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1715 if (unlikely(Rc(ctx->opcode) != 0)) { \
1716 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1717 } \
1718 }
1719
1720 GEN_DIVE(divweu, divweu, 0);
1721 GEN_DIVE(divweuo, divweu, 1);
1722 GEN_DIVE(divwe, divwe, 0);
1723 GEN_DIVE(divweo, divwe, 1);
1724
1725 #if defined(TARGET_PPC64)
1726 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1727 TCGv arg2, int sign, int compute_ov)
1728 {
1729 TCGv_i64 t0 = tcg_temp_new_i64();
1730 TCGv_i64 t1 = tcg_temp_new_i64();
1731 TCGv_i64 t2 = tcg_temp_new_i64();
1732 TCGv_i64 t3 = tcg_temp_new_i64();
1733
1734 tcg_gen_mov_i64(t0, arg1);
1735 tcg_gen_mov_i64(t1, arg2);
1736 if (sign) {
1737 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1738 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1739 tcg_gen_and_i64(t2, t2, t3);
1740 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1741 tcg_gen_or_i64(t2, t2, t3);
1742 tcg_gen_movi_i64(t3, 0);
1743 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1744 tcg_gen_div_i64(ret, t0, t1);
1745 } else {
1746 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1747 tcg_gen_movi_i64(t3, 0);
1748 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1749 tcg_gen_divu_i64(ret, t0, t1);
1750 }
1751 if (compute_ov) {
1752 tcg_gen_mov_tl(cpu_ov, t2);
1753 if (is_isa300(ctx)) {
1754 tcg_gen_mov_tl(cpu_ov32, t2);
1755 }
1756 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1757 }
1758
1759 if (unlikely(Rc(ctx->opcode) != 0)) {
1760 gen_set_Rc0(ctx, ret);
1761 }
1762 }
1763
1764 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1765 static void glue(gen_, name)(DisasContext *ctx) \
1766 { \
1767 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1768 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1769 sign, compute_ov); \
1770 }
1771 /* divdu divdu. divduo divduo. */
1772 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1773 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1774 /* divd divd. divdo divdo. */
1775 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1776 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1777
1778 GEN_DIVE(divdeu, divdeu, 0);
1779 GEN_DIVE(divdeuo, divdeu, 1);
1780 GEN_DIVE(divde, divde, 0);
1781 GEN_DIVE(divdeo, divde, 1);
1782 #endif
1783
1784 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1785 TCGv arg2, int sign)
1786 {
1787 TCGv_i32 t0 = tcg_temp_new_i32();
1788 TCGv_i32 t1 = tcg_temp_new_i32();
1789
1790 tcg_gen_trunc_tl_i32(t0, arg1);
1791 tcg_gen_trunc_tl_i32(t1, arg2);
1792 if (sign) {
1793 TCGv_i32 t2 = tcg_temp_new_i32();
1794 TCGv_i32 t3 = tcg_temp_new_i32();
1795 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1796 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1797 tcg_gen_and_i32(t2, t2, t3);
1798 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1799 tcg_gen_or_i32(t2, t2, t3);
1800 tcg_gen_movi_i32(t3, 0);
1801 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1802 tcg_gen_rem_i32(t3, t0, t1);
1803 tcg_gen_ext_i32_tl(ret, t3);
1804 } else {
1805 TCGv_i32 t2 = tcg_const_i32(1);
1806 TCGv_i32 t3 = tcg_const_i32(0);
1807 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1808 tcg_gen_remu_i32(t3, t0, t1);
1809 tcg_gen_extu_i32_tl(ret, t3);
1810 }
1811 }
1812
1813 #define GEN_INT_ARITH_MODW(name, opc3, sign) \
1814 static void glue(gen_, name)(DisasContext *ctx) \
1815 { \
1816 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
1817 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1818 sign); \
1819 }
1820
1821 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1822 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1823
1824 #if defined(TARGET_PPC64)
1825 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1826 TCGv arg2, int sign)
1827 {
1828 TCGv_i64 t0 = tcg_temp_new_i64();
1829 TCGv_i64 t1 = tcg_temp_new_i64();
1830
1831 tcg_gen_mov_i64(t0, arg1);
1832 tcg_gen_mov_i64(t1, arg2);
1833 if (sign) {
1834 TCGv_i64 t2 = tcg_temp_new_i64();
1835 TCGv_i64 t3 = tcg_temp_new_i64();
1836 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1837 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1838 tcg_gen_and_i64(t2, t2, t3);
1839 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1840 tcg_gen_or_i64(t2, t2, t3);
1841 tcg_gen_movi_i64(t3, 0);
1842 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1843 tcg_gen_rem_i64(ret, t0, t1);
1844 } else {
1845 TCGv_i64 t2 = tcg_const_i64(1);
1846 TCGv_i64 t3 = tcg_const_i64(0);
1847 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1848 tcg_gen_remu_i64(ret, t0, t1);
1849 }
1850 }
1851
1852 #define GEN_INT_ARITH_MODD(name, opc3, sign) \
1853 static void glue(gen_, name)(DisasContext *ctx) \
1854 { \
1855 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
1856 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1857 sign); \
1858 }
1859
1860 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1861 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1862 #endif
1863
1864 /* mulhw mulhw. */
1865 static void gen_mulhw(DisasContext *ctx)
1866 {
1867 TCGv_i32 t0 = tcg_temp_new_i32();
1868 TCGv_i32 t1 = tcg_temp_new_i32();
1869
1870 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1871 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1872 tcg_gen_muls2_i32(t0, t1, t0, t1);
1873 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1874 if (unlikely(Rc(ctx->opcode) != 0)) {
1875 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1876 }
1877 }
1878
1879 /* mulhwu mulhwu. */
1880 static void gen_mulhwu(DisasContext *ctx)
1881 {
1882 TCGv_i32 t0 = tcg_temp_new_i32();
1883 TCGv_i32 t1 = tcg_temp_new_i32();
1884
1885 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1886 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1887 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1888 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1889 if (unlikely(Rc(ctx->opcode) != 0)) {
1890 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1891 }
1892 }
1893
1894 /* mullw mullw. */
1895 static void gen_mullw(DisasContext *ctx)
1896 {
1897 #if defined(TARGET_PPC64)
1898 TCGv_i64 t0, t1;
1899 t0 = tcg_temp_new_i64();
1900 t1 = tcg_temp_new_i64();
1901 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1902 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1903 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1904 #else
1905 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1906 cpu_gpr[rB(ctx->opcode)]);
1907 #endif
1908 if (unlikely(Rc(ctx->opcode) != 0)) {
1909 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1910 }
1911 }
1912
1913 /* mullwo mullwo. */
1914 static void gen_mullwo(DisasContext *ctx)
1915 {
1916 TCGv_i32 t0 = tcg_temp_new_i32();
1917 TCGv_i32 t1 = tcg_temp_new_i32();
1918
1919 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1920 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1921 tcg_gen_muls2_i32(t0, t1, t0, t1);
1922 #if defined(TARGET_PPC64)
1923 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1924 #else
1925 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1926 #endif
1927
1928 tcg_gen_sari_i32(t0, t0, 31);
1929 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1930 tcg_gen_extu_i32_tl(cpu_ov, t0);
1931 if (is_isa300(ctx)) {
1932 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1933 }
1934 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1935
1936 if (unlikely(Rc(ctx->opcode) != 0)) {
1937 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1938 }
1939 }
1940
1941 /* mulli */
1942 static void gen_mulli(DisasContext *ctx)
1943 {
1944 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1945 SIMM(ctx->opcode));
1946 }
1947
1948 #if defined(TARGET_PPC64)
1949 /* mulhd mulhd. */
1950 static void gen_mulhd(DisasContext *ctx)
1951 {
1952 TCGv lo = tcg_temp_new();
1953 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1954 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1955 if (unlikely(Rc(ctx->opcode) != 0)) {
1956 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1957 }
1958 }
1959
1960 /* mulhdu mulhdu. */
1961 static void gen_mulhdu(DisasContext *ctx)
1962 {
1963 TCGv lo = tcg_temp_new();
1964 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1965 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1966 if (unlikely(Rc(ctx->opcode) != 0)) {
1967 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1968 }
1969 }
1970
1971 /* mulld mulld. */
1972 static void gen_mulld(DisasContext *ctx)
1973 {
1974 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1975 cpu_gpr[rB(ctx->opcode)]);
1976 if (unlikely(Rc(ctx->opcode) != 0)) {
1977 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1978 }
1979 }
1980
1981 /* mulldo mulldo. */
1982 static void gen_mulldo(DisasContext *ctx)
1983 {
1984 TCGv_i64 t0 = tcg_temp_new_i64();
1985 TCGv_i64 t1 = tcg_temp_new_i64();
1986
1987 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1988 cpu_gpr[rB(ctx->opcode)]);
1989 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1990
1991 tcg_gen_sari_i64(t0, t0, 63);
1992 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1993 if (is_isa300(ctx)) {
1994 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1995 }
1996 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1997
1998 if (unlikely(Rc(ctx->opcode) != 0)) {
1999 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2000 }
2001 }
2002 #endif
2003
2004 /* Common subf function */
2005 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2006 TCGv arg2, bool add_ca, bool compute_ca,
2007 bool compute_ov, bool compute_rc0)
2008 {
2009 TCGv t0 = ret;
2010
2011 if (compute_ca || compute_ov) {
2012 t0 = tcg_temp_new();
2013 }
2014
2015 if (compute_ca) {
2016 /* dest = ~arg1 + arg2 [+ ca]. */
2017 if (NARROW_MODE(ctx)) {
2018 /*
2019 * Caution: a non-obvious corner case of the spec is that
2020 * we must produce the *entire* 64-bit addition, but
2021 * produce the carry into bit 32.
2022 */
2023 TCGv inv1 = tcg_temp_new();
2024 TCGv t1 = tcg_temp_new();
2025 tcg_gen_not_tl(inv1, arg1);
2026 if (add_ca) {
2027 tcg_gen_add_tl(t0, arg2, cpu_ca);
2028 } else {
2029 tcg_gen_addi_tl(t0, arg2, 1);
2030 }
2031 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
2032 tcg_gen_add_tl(t0, t0, inv1);
2033 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
2034 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2035 if (is_isa300(ctx)) {
2036 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2037 }
2038 } else if (add_ca) {
2039 TCGv zero, inv1 = tcg_temp_new();
2040 tcg_gen_not_tl(inv1, arg1);
2041 zero = tcg_const_tl(0);
2042 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2043 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2044 gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2045 } else {
2046 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2047 tcg_gen_sub_tl(t0, arg2, arg1);
2048 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2049 }
2050 } else if (add_ca) {
2051 /*
2052 * Since we're ignoring carry-out, we can simplify the
2053 * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2054 */
2055 tcg_gen_sub_tl(t0, arg2, arg1);
2056 tcg_gen_add_tl(t0, t0, cpu_ca);
2057 tcg_gen_subi_tl(t0, t0, 1);
2058 } else {
2059 tcg_gen_sub_tl(t0, arg2, arg1);
2060 }
2061
2062 if (compute_ov) {
2063 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2064 }
2065 if (unlikely(compute_rc0)) {
2066 gen_set_Rc0(ctx, t0);
2067 }
2068
2069 if (t0 != ret) {
2070 tcg_gen_mov_tl(ret, t0);
2071 }
2072 }
2073 /* Sub functions with Two operands functions */
2074 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
2075 static void glue(gen_, name)(DisasContext *ctx) \
2076 { \
2077 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
2078 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
2079 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
2080 }
2081 /* Sub functions with one operand and one immediate */
2082 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
2083 add_ca, compute_ca, compute_ov) \
2084 static void glue(gen_, name)(DisasContext *ctx) \
2085 { \
2086 TCGv t0 = tcg_const_tl(const_val); \
2087 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
2088 cpu_gpr[rA(ctx->opcode)], t0, \
2089 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
2090 }
2091 /* subf subf. subfo subfo. */
2092 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2093 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2094 /* subfc subfc. subfco subfco. */
2095 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2096 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2097 /* subfe subfe. subfeo subfo. */
2098 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2099 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2100 /* subfme subfme. subfmeo subfmeo. */
2101 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2102 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2103 /* subfze subfze. subfzeo subfzeo.*/
2104 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2105 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2106
2107 /* subfic */
2108 static void gen_subfic(DisasContext *ctx)
2109 {
2110 TCGv c = tcg_const_tl(SIMM(ctx->opcode));
2111 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2112 c, 0, 1, 0, 0);
2113 }
2114
2115 /* neg neg. nego nego. */
2116 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2117 {
2118 TCGv zero = tcg_const_tl(0);
2119 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2120 zero, 0, 0, compute_ov, Rc(ctx->opcode));
2121 }
2122
2123 static void gen_neg(DisasContext *ctx)
2124 {
2125 tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2126 if (unlikely(Rc(ctx->opcode))) {
2127 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2128 }
2129 }
2130
2131 static void gen_nego(DisasContext *ctx)
2132 {
2133 gen_op_arith_neg(ctx, 1);
2134 }
2135
2136 /*** Integer logical ***/
2137 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
2138 static void glue(gen_, name)(DisasContext *ctx) \
2139 { \
2140 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
2141 cpu_gpr[rB(ctx->opcode)]); \
2142 if (unlikely(Rc(ctx->opcode) != 0)) \
2143 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
2144 }
2145
2146 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
2147 static void glue(gen_, name)(DisasContext *ctx) \
2148 { \
2149 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
2150 if (unlikely(Rc(ctx->opcode) != 0)) \
2151 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
2152 }
2153
2154 /* and & and. */
2155 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2156 /* andc & andc. */
2157 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2158
2159 /* andi. */
2160 static void gen_andi_(DisasContext *ctx)
2161 {
2162 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2163 UIMM(ctx->opcode));
2164 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2165 }
2166
2167 /* andis. */
2168 static void gen_andis_(DisasContext *ctx)
2169 {
2170 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2171 UIMM(ctx->opcode) << 16);
2172 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2173 }
2174
2175 /* cntlzw */
2176 static void gen_cntlzw(DisasContext *ctx)
2177 {
2178 TCGv_i32 t = tcg_temp_new_i32();
2179
2180 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2181 tcg_gen_clzi_i32(t, t, 32);
2182 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2183
2184 if (unlikely(Rc(ctx->opcode) != 0)) {
2185 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2186 }
2187 }
2188
2189 /* cnttzw */
2190 static void gen_cnttzw(DisasContext *ctx)
2191 {
2192 TCGv_i32 t = tcg_temp_new_i32();
2193
2194 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2195 tcg_gen_ctzi_i32(t, t, 32);
2196 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2197
2198 if (unlikely(Rc(ctx->opcode) != 0)) {
2199 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2200 }
2201 }
2202
2203 /* eqv & eqv. */
2204 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2205 /* extsb & extsb. */
2206 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2207 /* extsh & extsh. */
2208 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2209 /* nand & nand. */
2210 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2211 /* nor & nor. */
2212 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2213
2214 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2215 static void gen_pause(DisasContext *ctx)
2216 {
2217 TCGv_i32 t0 = tcg_const_i32(0);
2218 tcg_gen_st_i32(t0, cpu_env,
2219 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2220
2221 /* Stop translation, this gives other CPUs a chance to run */
2222 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2223 }
2224 #endif /* defined(TARGET_PPC64) */
2225
2226 /* or & or. */
2227 static void gen_or(DisasContext *ctx)
2228 {
2229 int rs, ra, rb;
2230
2231 rs = rS(ctx->opcode);
2232 ra = rA(ctx->opcode);
2233 rb = rB(ctx->opcode);
2234 /* Optimisation for mr. ri case */
2235 if (rs != ra || rs != rb) {
2236 if (rs != rb) {
2237 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2238 } else {
2239 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2240 }
2241 if (unlikely(Rc(ctx->opcode) != 0)) {
2242 gen_set_Rc0(ctx, cpu_gpr[ra]);
2243 }
2244 } else if (unlikely(Rc(ctx->opcode) != 0)) {
2245 gen_set_Rc0(ctx, cpu_gpr[rs]);
2246 #if defined(TARGET_PPC64)
2247 } else if (rs != 0) { /* 0 is nop */
2248 int prio = 0;
2249
2250 switch (rs) {
2251 case 1:
2252 /* Set process priority to low */
2253 prio = 2;
2254 break;
2255 case 6:
2256 /* Set process priority to medium-low */
2257 prio = 3;
2258 break;
2259 case 2:
2260 /* Set process priority to normal */
2261 prio = 4;
2262 break;
2263 #if !defined(CONFIG_USER_ONLY)
2264 case 31:
2265 if (!ctx->pr) {
2266 /* Set process priority to very low */
2267 prio = 1;
2268 }
2269 break;
2270 case 5:
2271 if (!ctx->pr) {
2272 /* Set process priority to medium-hight */
2273 prio = 5;
2274 }
2275 break;
2276 case 3:
2277 if (!ctx->pr) {
2278 /* Set process priority to high */
2279 prio = 6;
2280 }
2281 break;
2282 case 7:
2283 if (ctx->hv && !ctx->pr) {
2284 /* Set process priority to very high */
2285 prio = 7;
2286 }
2287 break;
2288 #endif
2289 default:
2290 break;
2291 }
2292 if (prio) {
2293 TCGv t0 = tcg_temp_new();
2294 gen_load_spr(t0, SPR_PPR);
2295 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2296 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2297 gen_store_spr(SPR_PPR, t0);
2298 }
2299 #if !defined(CONFIG_USER_ONLY)
2300 /*
2301 * Pause out of TCG otherwise spin loops with smt_low eat too
2302 * much CPU and the kernel hangs. This applies to all
2303 * encodings other than no-op, e.g., miso(rs=26), yield(27),
2304 * mdoio(29), mdoom(30), and all currently undefined.
2305 */
2306 gen_pause(ctx);
2307 #endif
2308 #endif
2309 }
2310 }
2311 /* orc & orc. */
2312 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2313
2314 /* xor & xor. */
2315 static void gen_xor(DisasContext *ctx)
2316 {
2317 /* Optimisation for "set to zero" case */
2318 if (rS(ctx->opcode) != rB(ctx->opcode)) {
2319 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2320 cpu_gpr[rB(ctx->opcode)]);
2321 } else {
2322 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2323 }
2324 if (unlikely(Rc(ctx->opcode) != 0)) {
2325 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2326 }
2327 }
2328
2329 /* ori */
2330 static void gen_ori(DisasContext *ctx)
2331 {
2332 target_ulong uimm = UIMM(ctx->opcode);
2333
2334 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2335 return;
2336 }
2337 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2338 }
2339
2340 /* oris */
2341 static void gen_oris(DisasContext *ctx)
2342 {
2343 target_ulong uimm = UIMM(ctx->opcode);
2344
2345 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2346 /* NOP */
2347 return;
2348 }
2349 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2350 uimm << 16);
2351 }
2352
2353 /* xori */
2354 static void gen_xori(DisasContext *ctx)
2355 {
2356 target_ulong uimm = UIMM(ctx->opcode);
2357
2358 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2359 /* NOP */
2360 return;
2361 }
2362 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2363 }
2364
2365 /* xoris */
2366 static void gen_xoris(DisasContext *ctx)
2367 {
2368 target_ulong uimm = UIMM(ctx->opcode);
2369
2370 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2371 /* NOP */
2372 return;
2373 }
2374 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2375 uimm << 16);
2376 }
2377
2378 /* popcntb : PowerPC 2.03 specification */
2379 static void gen_popcntb(DisasContext *ctx)
2380 {
2381 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2382 }
2383
2384 static void gen_popcntw(DisasContext *ctx)
2385 {
2386 #if defined(TARGET_PPC64)
2387 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2388 #else
2389 tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2390 #endif
2391 }
2392
2393 #if defined(TARGET_PPC64)
2394 /* popcntd: PowerPC 2.06 specification */
2395 static void gen_popcntd(DisasContext *ctx)
2396 {
2397 tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2398 }
2399 #endif
2400
2401 /* prtyw: PowerPC 2.05 specification */
2402 static void gen_prtyw(DisasContext *ctx)
2403 {
2404 TCGv ra = cpu_gpr[rA(ctx->opcode)];
2405 TCGv rs = cpu_gpr[rS(ctx->opcode)];
2406 TCGv t0 = tcg_temp_new();
2407 tcg_gen_shri_tl(t0, rs, 16);
2408 tcg_gen_xor_tl(ra, rs, t0);
2409 tcg_gen_shri_tl(t0, ra, 8);
2410 tcg_gen_xor_tl(ra, ra, t0);
2411 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2412 }
2413
2414 #if defined(TARGET_PPC64)
2415 /* prtyd: PowerPC 2.05 specification */
2416 static void gen_prtyd(DisasContext *ctx)
2417 {
2418 TCGv ra = cpu_gpr[rA(ctx->opcode)];
2419 TCGv rs = cpu_gpr[rS(ctx->opcode)];
2420 TCGv t0 = tcg_temp_new();
2421 tcg_gen_shri_tl(t0, rs, 32);
2422 tcg_gen_xor_tl(ra, rs, t0);
2423 tcg_gen_shri_tl(t0, ra, 16);
2424 tcg_gen_xor_tl(ra, ra, t0);
2425 tcg_gen_shri_tl(t0, ra, 8);
2426 tcg_gen_xor_tl(ra, ra, t0);
2427 tcg_gen_andi_tl(ra, ra, 1);
2428 }
2429 #endif
2430
2431 #if defined(TARGET_PPC64)
2432 /* bpermd */
2433 static void gen_bpermd(DisasContext *ctx)
2434 {
2435 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2436 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2437 }
2438 #endif
2439
2440 #if defined(TARGET_PPC64)
2441 /* extsw & extsw. */
2442 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2443
2444 /* cntlzd */
2445 static void gen_cntlzd(DisasContext *ctx)
2446 {
2447 tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2448 if (unlikely(Rc(ctx->opcode) != 0)) {
2449 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2450 }
2451 }
2452
2453 /* cnttzd */
2454 static void gen_cnttzd(DisasContext *ctx)
2455 {
2456 tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2457 if (unlikely(Rc(ctx->opcode) != 0)) {
2458 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2459 }
2460 }
2461
2462 /* darn */
2463 static void gen_darn(DisasContext *ctx)
2464 {
2465 int l = L(ctx->opcode);
2466
2467 if (l > 2) {
2468 tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2469 } else {
2470 gen_icount_io_start(ctx);
2471 if (l == 0) {
2472 gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2473 } else {
2474 /* Return 64-bit random for both CRN and RRN */
2475 gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2476 }
2477 }
2478 }
2479 #endif
2480
2481 /*** Integer rotate ***/
2482
2483 /* rlwimi & rlwimi. */
2484 static void gen_rlwimi(DisasContext *ctx)
2485 {
2486 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2487 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2488 uint32_t sh = SH(ctx->opcode);
2489 uint32_t mb = MB(ctx->opcode);
2490 uint32_t me = ME(ctx->opcode);
2491
2492 if (sh == (31 - me) && mb <= me) {
2493 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2494 } else {
2495 target_ulong mask;
2496 bool mask_in_32b = true;
2497 TCGv t1;
2498
2499 #if defined(TARGET_PPC64)
2500 mb += 32;
2501 me += 32;
2502 #endif
2503 mask = MASK(mb, me);
2504
2505 #if defined(TARGET_PPC64)
2506 if (mask > 0xffffffffu) {
2507 mask_in_32b = false;
2508 }
2509 #endif
2510 t1 = tcg_temp_new();
2511 if (mask_in_32b) {
2512 TCGv_i32 t0 = tcg_temp_new_i32();
2513 tcg_gen_trunc_tl_i32(t0, t_rs);
2514 tcg_gen_rotli_i32(t0, t0, sh);
2515 tcg_gen_extu_i32_tl(t1, t0);
2516 } else {
2517 #if defined(TARGET_PPC64)
2518 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2519 tcg_gen_rotli_i64(t1, t1, sh);
2520 #else
2521 g_assert_not_reached();
2522 #endif
2523 }
2524
2525 tcg_gen_andi_tl(t1, t1, mask);
2526 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2527 tcg_gen_or_tl(t_ra, t_ra, t1);
2528 }
2529 if (unlikely(Rc(ctx->opcode) != 0)) {
2530 gen_set_Rc0(ctx, t_ra);
2531 }
2532 }
2533
2534 /* rlwinm & rlwinm. */
2535 static void gen_rlwinm(DisasContext *ctx)
2536 {
2537 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2538 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2539 int sh = SH(ctx->opcode);
2540 int mb = MB(ctx->opcode);
2541 int me = ME(ctx->opcode);
2542 int len = me - mb + 1;
2543 int rsh = (32 - sh) & 31;
2544
2545 if (sh != 0 && len > 0 && me == (31 - sh)) {
2546 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2547 } else if (me == 31 && rsh + len <= 32) {
2548 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2549 } else {
2550 target_ulong mask;
2551 bool mask_in_32b = true;
2552 #if defined(TARGET_PPC64)
2553 mb += 32;
2554 me += 32;
2555 #endif
2556 mask = MASK(mb, me);
2557 #if defined(TARGET_PPC64)
2558 if (mask > 0xffffffffu) {
2559 mask_in_32b = false;
2560 }
2561 #endif
2562 if (mask_in_32b) {
2563 if (sh == 0) {
2564 tcg_gen_andi_tl(t_ra, t_rs, mask);
2565 } else {
2566 TCGv_i32 t0 = tcg_temp_new_i32();
2567 tcg_gen_trunc_tl_i32(t0, t_rs);
2568 tcg_gen_rotli_i32(t0, t0, sh);
2569 tcg_gen_andi_i32(t0, t0, mask);
2570 tcg_gen_extu_i32_tl(t_ra, t0);
2571 }
2572 } else {
2573 #if defined(TARGET_PPC64)
2574 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2575 tcg_gen_rotli_i64(t_ra, t_ra, sh);
2576 tcg_gen_andi_i64(t_ra, t_ra, mask);
2577 #else
2578 g_assert_not_reached();
2579 #endif
2580 }
2581 }
2582 if (unlikely(Rc(ctx->opcode) != 0)) {
2583 gen_set_Rc0(ctx, t_ra);
2584 }
2585 }
2586
2587 /* rlwnm & rlwnm. */
2588 static void gen_rlwnm(DisasContext *ctx)
2589 {
2590 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2591 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2592 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2593 uint32_t mb = MB(ctx->opcode);
2594 uint32_t me = ME(ctx->opcode);
2595 target_ulong mask;
2596 bool mask_in_32b = true;
2597
2598 #if defined(TARGET_PPC64)
2599 mb += 32;
2600 me += 32;
2601 #endif
2602 mask = MASK(mb, me);
2603
2604 #if defined(TARGET_PPC64)
2605 if (mask > 0xffffffffu) {
2606 mask_in_32b = false;
2607 }
2608 #endif
2609 if (mask_in_32b) {
2610 TCGv_i32 t0 = tcg_temp_new_i32();
2611 TCGv_i32 t1 = tcg_temp_new_i32();
2612 tcg_gen_trunc_tl_i32(t0, t_rb);
2613 tcg_gen_trunc_tl_i32(t1, t_rs);
2614 tcg_gen_andi_i32(t0, t0, 0x1f);
2615 tcg_gen_rotl_i32(t1, t1, t0);
2616 tcg_gen_extu_i32_tl(t_ra, t1);
2617 } else {
2618 #if defined(TARGET_PPC64)
2619 TCGv_i64 t0 = tcg_temp_new_i64();
2620 tcg_gen_andi_i64(t0, t_rb, 0x1f);
2621 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2622 tcg_gen_rotl_i64(t_ra, t_ra, t0);
2623 #else
2624 g_assert_not_reached();
2625 #endif
2626 }
2627
2628 tcg_gen_andi_tl(t_ra, t_ra, mask);
2629
2630 if (unlikely(Rc(ctx->opcode) != 0)) {
2631 gen_set_Rc0(ctx, t_ra);
2632 }
2633 }
2634
2635 #if defined(TARGET_PPC64)
2636 #define GEN_PPC64_R2(name, opc1, opc2) \
2637 static void glue(gen_, name##0)(DisasContext *ctx) \
2638 { \
2639 gen_##name(ctx, 0); \
2640 } \
2641 \
2642 static void glue(gen_, name##1)(DisasContext *ctx) \
2643 { \
2644 gen_##name(ctx, 1); \
2645 }
2646 #define GEN_PPC64_R4(name, opc1, opc2) \
2647 static void glue(gen_, name##0)(DisasContext *ctx) \
2648 { \
2649 gen_##name(ctx, 0, 0); \
2650 } \
2651 \
2652 static void glue(gen_, name##1)(DisasContext *ctx) \
2653 { \
2654 gen_##name(ctx, 0, 1); \
2655 } \
2656 \
2657 static void glue(gen_, name##2)(DisasContext *ctx) \
2658 { \
2659 gen_##name(ctx, 1, 0); \
2660 } \
2661 \
2662 static void glue(gen_, name##3)(DisasContext *ctx) \
2663 { \
2664 gen_##name(ctx, 1, 1); \
2665 }
2666
2667 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2668 {
2669 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2670 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2671 int len = me - mb + 1;
2672 int rsh = (64 - sh) & 63;
2673
2674 if (sh != 0 && len > 0 && me == (63 - sh)) {
2675 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2676 } else if (me == 63 && rsh + len <= 64) {
2677 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2678 } else {
2679 tcg_gen_rotli_tl(t_ra, t_rs, sh);
2680 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2681 }
2682 if (unlikely(Rc(ctx->opcode) != 0)) {
2683 gen_set_Rc0(ctx, t_ra);
2684 }
2685 }
2686
2687 /* rldicl - rldicl. */
2688 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2689 {
2690 uint32_t sh, mb;
2691
2692 sh = SH(ctx->opcode) | (shn << 5);
2693 mb = MB(ctx->opcode) | (mbn << 5);
2694 gen_rldinm(ctx, mb, 63, sh);
2695 }
2696 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2697
2698 /* rldicr - rldicr. */
2699 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2700 {
2701 uint32_t sh, me;
2702
2703 sh = SH(ctx->opcode) | (shn << 5);
2704 me = MB(ctx->opcode) | (men << 5);
2705 gen_rldinm(ctx, 0, me, sh);
2706 }
2707 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2708
2709 /* rldic - rldic. */
2710 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2711 {
2712 uint32_t sh, mb;
2713
2714 sh = SH(ctx->opcode) | (shn << 5);
2715 mb = MB(ctx->opcode) | (mbn << 5);
2716 gen_rldinm(ctx, mb, 63 - sh, sh);
2717 }
2718 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2719
2720 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2721 {
2722 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2723 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2724 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2725 TCGv t0;
2726
2727 t0 = tcg_temp_new();
2728 tcg_gen_andi_tl(t0, t_rb, 0x3f);
2729 tcg_gen_rotl_tl(t_ra, t_rs, t0);
2730
2731 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2732 if (unlikely(Rc(ctx->opcode) != 0)) {
2733 gen_set_Rc0(ctx, t_ra);
2734 }
2735 }
2736
2737 /* rldcl - rldcl. */
2738 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2739 {
2740 uint32_t mb;
2741
2742 mb = MB(ctx->opcode) | (mbn << 5);
2743 gen_rldnm(ctx, mb, 63);
2744 }
2745 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2746
2747 /* rldcr - rldcr. */
2748 static inline void gen_rldcr(DisasContext *ctx, int men)
2749 {
2750 uint32_t me;
2751
2752 me = MB(ctx->opcode) | (men << 5);
2753 gen_rldnm(ctx, 0, me);
2754 }
2755 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2756
2757 /* rldimi - rldimi. */
2758 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2759 {
2760 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2761 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2762 uint32_t sh = SH(ctx->opcode) | (shn << 5);
2763 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2764 uint32_t me = 63 - sh;
2765
2766 if (mb <= me) {
2767 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2768 } else {
2769 target_ulong mask = MASK(mb, me);
2770 TCGv t1 = tcg_temp_new();
2771
2772 tcg_gen_rotli_tl(t1, t_rs, sh);
2773 tcg_gen_andi_tl(t1, t1, mask);
2774 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2775 tcg_gen_or_tl(t_ra, t_ra, t1);
2776 }
2777 if (unlikely(Rc(ctx->opcode) != 0)) {
2778 gen_set_Rc0(ctx, t_ra);
2779 }
2780 }
2781 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2782 #endif
2783
2784 /*** Integer shift ***/
2785
2786 /* slw & slw. */
2787 static void gen_slw(DisasContext *ctx)
2788 {
2789 TCGv t0, t1;
2790
2791 t0 = tcg_temp_new();
2792 /* AND rS with a mask that is 0 when rB >= 0x20 */
2793 #if defined(TARGET_PPC64)
2794 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2795 tcg_gen_sari_tl(t0, t0, 0x3f);
2796 #else
2797 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2798 tcg_gen_sari_tl(t0, t0, 0x1f);
2799 #endif
2800 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2801 t1 = tcg_temp_new();
2802 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2803 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2804 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2805 if (unlikely(Rc(ctx->opcode) != 0)) {
2806 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2807 }
2808 }
2809
2810 /* sraw & sraw. */
2811 static void gen_sraw(DisasContext *ctx)
2812 {
2813 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2814 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2815 if (unlikely(Rc(ctx->opcode) != 0)) {
2816 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2817 }
2818 }
2819
2820 /* srawi & srawi. */
2821 static void gen_srawi(DisasContext *ctx)
2822 {
2823 int sh = SH(ctx->opcode);
2824 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2825 TCGv src = cpu_gpr[rS(ctx->opcode)];
2826 if (sh == 0) {
2827 tcg_gen_ext32s_tl(dst, src);
2828 tcg_gen_movi_tl(cpu_ca, 0);
2829 if (is_isa300(ctx)) {
2830 tcg_gen_movi_tl(cpu_ca32, 0);
2831 }
2832 } else {
2833 TCGv t0;
2834 tcg_gen_ext32s_tl(dst, src);
2835 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2836 t0 = tcg_temp_new();
2837 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2838 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2839 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2840 if (is_isa300(ctx)) {
2841 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2842 }
2843 tcg_gen_sari_tl(dst, dst, sh);
2844 }
2845 if (unlikely(Rc(ctx->opcode) != 0)) {
2846 gen_set_Rc0(ctx, dst);
2847 }
2848 }
2849
2850 /* srw & srw. */
2851 static void gen_srw(DisasContext *ctx)
2852 {
2853 TCGv t0, t1;
2854
2855 t0 = tcg_temp_new();
2856 /* AND rS with a mask that is 0 when rB >= 0x20 */
2857 #if defined(TARGET_PPC64)
2858 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2859 tcg_gen_sari_tl(t0, t0, 0x3f);
2860 #else
2861 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2862 tcg_gen_sari_tl(t0, t0, 0x1f);
2863 #endif
2864 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2865 tcg_gen_ext32u_tl(t0, t0);
2866 t1 = tcg_temp_new();
2867 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2868 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2869 if (unlikely(Rc(ctx->opcode) != 0)) {
2870 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2871 }
2872 }
2873
2874 #if defined(TARGET_PPC64)
2875 /* sld & sld. */
2876 static void gen_sld(DisasContext *ctx)
2877 {
2878 TCGv t0, t1;
2879
2880 t0 = tcg_temp_new();
2881 /* AND rS with a mask that is 0 when rB >= 0x40 */
2882 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2883 tcg_gen_sari_tl(t0, t0, 0x3f);
2884 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2885 t1 = tcg_temp_new();
2886 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2887 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2888 if (unlikely(Rc(ctx->opcode) != 0)) {
2889 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2890 }
2891 }
2892
2893 /* srad & srad. */
2894 static void gen_srad(DisasContext *ctx)
2895 {
2896 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2897 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2898 if (unlikely(Rc(ctx->opcode) != 0)) {
2899 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2900 }
2901 }
2902 /* sradi & sradi. */
2903 static inline void gen_sradi(DisasContext *ctx, int n)
2904 {
2905 int sh = SH(ctx->opcode) + (n << 5);
2906 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2907 TCGv src = cpu_gpr[rS(ctx->opcode)];
2908 if (sh == 0) {
2909 tcg_gen_mov_tl(dst, src);
2910 tcg_gen_movi_tl(cpu_ca, 0);
2911 if (is_isa300(ctx)) {
2912 tcg_gen_movi_tl(cpu_ca32, 0);
2913 }
2914 } else {
2915 TCGv t0;
2916 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2917 t0 = tcg_temp_new();
2918 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2919 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2920 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2921 if (is_isa300(ctx)) {
2922 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2923 }
2924 tcg_gen_sari_tl(dst, src, sh);
2925 }
2926 if (unlikely(Rc(ctx->opcode) != 0)) {
2927 gen_set_Rc0(ctx, dst);
2928 }
2929 }
2930
2931 static void gen_sradi0(DisasContext *ctx)
2932 {
2933 gen_sradi(ctx, 0);
2934 }
2935
2936 static void gen_sradi1(DisasContext *ctx)
2937 {
2938 gen_sradi(ctx, 1);
2939 }
2940
2941 /* extswsli & extswsli. */
2942 static inline void gen_extswsli(DisasContext *ctx, int n)
2943 {
2944 int sh = SH(ctx->opcode) + (n << 5);
2945 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2946 TCGv src = cpu_gpr[rS(ctx->opcode)];
2947
2948 tcg_gen_ext32s_tl(dst, src);
2949 tcg_gen_shli_tl(dst, dst, sh);
2950 if (unlikely(Rc(ctx->opcode) != 0)) {
2951 gen_set_Rc0(ctx, dst);
2952 }
2953 }
2954
2955 static void gen_extswsli0(DisasContext *ctx)
2956 {
2957 gen_extswsli(ctx, 0);
2958 }
2959
2960 static void gen_extswsli1(DisasContext *ctx)
2961 {
2962 gen_extswsli(ctx, 1);
2963 }
2964
2965 /* srd & srd. */
2966 static void gen_srd(DisasContext *ctx)
2967 {
2968 TCGv t0, t1;
2969
2970 t0 = tcg_temp_new();
2971 /* AND rS with a mask that is 0 when rB >= 0x40 */
2972 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2973 tcg_gen_sari_tl(t0, t0, 0x3f);
2974 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2975 t1 = tcg_temp_new();
2976 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2977 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2978 if (unlikely(Rc(ctx->opcode) != 0)) {
2979 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2980 }
2981 }
2982 #endif
2983
2984 /*** Addressing modes ***/
2985 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
2986 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2987 target_long maskl)
2988 {
2989 target_long simm = SIMM(ctx->opcode);
2990
2991 simm &= ~maskl;
2992 if (rA(ctx->opcode) == 0) {
2993 if (NARROW_MODE(ctx)) {
2994 simm = (uint32_t)simm;
2995 }
2996 tcg_gen_movi_tl(EA, simm);
2997 } else if (likely(simm != 0)) {
2998 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2999 if (NARROW_MODE(ctx)) {
3000 tcg_gen_ext32u_tl(EA, EA);
3001 }
3002 } else {
3003 if (NARROW_MODE(ctx)) {
3004 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3005 } else {
3006 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3007 }
3008 }
3009 }
3010
3011 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3012 {
3013 if (rA(ctx->opcode) == 0) {
3014 if (NARROW_MODE(ctx)) {
3015 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3016 } else {
3017 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3018 }
3019 } else {
3020 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3021 if (NARROW_MODE(ctx)) {
3022 tcg_gen_ext32u_tl(EA, EA);
3023 }
3024 }
3025 }
3026
3027 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3028 {
3029 if (rA(ctx->opcode) == 0) {
3030 tcg_gen_movi_tl(EA, 0);
3031 } else if (NARROW_MODE(ctx)) {
3032 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3033 } else {
3034 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3035 }
3036 }
3037
3038 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3039 target_long val)
3040 {
3041 tcg_gen_addi_tl(ret, arg1, val);
3042 if (NARROW_MODE(ctx)) {
3043 tcg_gen_ext32u_tl(ret, ret);
3044 }
3045 }
3046
3047 static inline void gen_align_no_le(DisasContext *ctx)
3048 {
3049 gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3050 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3051 }
3052
3053 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3054 {
3055 TCGv ea = tcg_temp_new();
3056 if (ra) {
3057 tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3058 } else {
3059 tcg_gen_mov_tl(ea, displ);
3060 }
3061 if (NARROW_MODE(ctx)) {
3062 tcg_gen_ext32u_tl(ea, ea);
3063 }
3064 return ea;
3065 }
3066
3067 /*** Integer load ***/
3068 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3069 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3070
3071 #define GEN_QEMU_LOAD_TL(ldop, op) \
3072 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
3073 TCGv val, \
3074 TCGv addr) \
3075 { \
3076 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
3077 }
3078
3079 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
3080 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3081 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3082 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3083 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3084
3085 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3086 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3087
3088 #define GEN_QEMU_LOAD_64(ldop, op) \
3089 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
3090 TCGv_i64 val, \
3091 TCGv addr) \
3092 { \
3093 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
3094 }
3095
3096 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
3097 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3098 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3099 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3100 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ))
3101
3102 #if defined(TARGET_PPC64)
3103 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3104 #endif
3105
3106 #define GEN_QEMU_STORE_TL(stop, op) \
3107 static void glue(gen_qemu_, stop)(DisasContext *ctx, \
3108 TCGv val, \
3109 TCGv addr) \
3110 { \
3111 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
3112 }
3113
3114 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3115 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
3116 #endif
3117 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3118 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3119
3120 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3121 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3122
3123 #define GEN_QEMU_STORE_64(stop, op) \
3124 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
3125 TCGv_i64 val, \
3126 TCGv addr) \
3127 { \
3128 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
3129 }
3130
3131 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
3132 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3133 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3134 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3135
3136 #if defined(TARGET_PPC64)
3137 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3138 #endif
3139
3140 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
3141 static void glue(gen_, name##x)(DisasContext *ctx) \
3142 { \
3143 TCGv EA; \
3144 chk(ctx); \
3145 gen_set_access_type(ctx, ACCESS_INT); \
3146 EA = tcg_temp_new(); \
3147 gen_addr_reg_index(ctx, EA); \
3148 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
3149 }
3150
3151 #define GEN_LDX(name, ldop, opc2, opc3, type) \
3152 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3153
3154 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
3155 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3156
3157 #define GEN_LDEPX(name, ldop, opc2, opc3) \
3158 static void glue(gen_, name##epx)(DisasContext *ctx) \
3159 { \
3160 TCGv EA; \
3161 CHK_SV(ctx); \
3162 gen_set_access_type(ctx, ACCESS_INT); \
3163 EA = tcg_temp_new(); \
3164 gen_addr_reg_index(ctx, EA); \
3165 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3166 }
3167
3168 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3169 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3170 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3171 #if defined(TARGET_PPC64)
3172 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3173 #endif
3174
3175 #if defined(TARGET_PPC64)
3176 /* CI load/store variants */
3177 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3178 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3179 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3180 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3181 #endif
3182
3183 /*** Integer store ***/
3184 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
3185 static void glue(gen_, name##x)(DisasContext *ctx) \
3186 { \
3187 TCGv EA; \
3188 chk(ctx); \
3189 gen_set_access_type(ctx, ACCESS_INT); \
3190 EA = tcg_temp_new(); \
3191 gen_addr_reg_index(ctx, EA); \
3192 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3193 }
3194 #define GEN_STX(name, stop, opc2, opc3, type) \
3195 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3196
3197 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
3198 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3199
3200 #define GEN_STEPX(name, stop, opc2, opc3) \
3201 static void glue(gen_, name##epx)(DisasContext *ctx) \
3202 { \
3203 TCGv EA; \
3204 CHK_SV(ctx); \
3205 gen_set_access_type(ctx, ACCESS_INT); \
3206 EA = tcg_temp_new(); \
3207 gen_addr_reg_index(ctx, EA); \
3208 tcg_gen_qemu_st_tl( \
3209 cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \
3210 }
3211
3212 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3213 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3214 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3215 #if defined(TARGET_PPC64)
3216 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3217 #endif
3218
3219 #if defined(TARGET_PPC64)
3220 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3221 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3222 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3223 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3224 #endif
3225 /*** Integer load and store with byte reverse ***/
3226
3227 /* lhbrx */
3228 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3229
3230 /* lwbrx */
3231 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3232
3233 #if defined(TARGET_PPC64)
3234 /* ldbrx */
3235 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3236 /* stdbrx */
3237 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3238 #endif /* TARGET_PPC64 */
3239
3240 /* sthbrx */
3241 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3242 /* stwbrx */
3243 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3244
3245 /*** Integer load and store multiple ***/
3246
3247 /* lmw */
3248 static void gen_lmw(DisasContext *ctx)
3249 {
3250 TCGv t0;
3251 TCGv_i32 t1;
3252
3253 if (ctx->le_mode) {
3254 gen_align_no_le(ctx);
3255 return;
3256 }
3257 gen_set_access_type(ctx, ACCESS_INT);
3258 t0 = tcg_temp_new();
3259 t1 = tcg_const_i32(rD(ctx->opcode));
3260 gen_addr_imm_index(ctx, t0, 0);
3261 gen_helper_lmw(cpu_env, t0, t1);
3262 }
3263
3264 /* stmw */
3265 static void gen_stmw(DisasContext *ctx)
3266 {
3267 TCGv t0;
3268 TCGv_i32 t1;
3269
3270 if (ctx->le_mode) {
3271 gen_align_no_le(ctx);
3272 return;
3273 }
3274 gen_set_access_type(ctx, ACCESS_INT);
3275 t0 = tcg_temp_new();
3276 t1 = tcg_const_i32(rS(ctx->opcode));
3277 gen_addr_imm_index(ctx, t0, 0);
3278 gen_helper_stmw(cpu_env, t0, t1);
3279 }
3280
3281 /*** Integer load and store strings ***/
3282
3283 /* lswi */
3284 /*
3285 * PowerPC32 specification says we must generate an exception if rA is
3286 * in the range of registers to be loaded. In an other hand, IBM says
3287 * this is valid, but rA won't be loaded. For now, I'll follow the
3288 * spec...
3289 */
3290 static void gen_lswi(DisasContext *ctx)
3291 {
3292 TCGv t0;
3293 TCGv_i32 t1, t2;
3294 int nb = NB(ctx->opcode);
3295 int start = rD(ctx->opcode);
3296 int ra = rA(ctx->opcode);
3297 int nr;
3298
3299 if (ctx->le_mode) {
3300 gen_align_no_le(ctx);
3301 return;
3302 }
3303 if (nb == 0) {
3304 nb = 32;
3305 }
3306 nr = DIV_ROUND_UP(nb, 4);
3307 if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3308 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3309 return;
3310 }
3311 gen_set_access_type(ctx, ACCESS_INT);
3312 t0 = tcg_temp_new();
3313 gen_addr_register(ctx, t0);
3314 t1 = tcg_const_i32(nb);
3315 t2 = tcg_const_i32(start);
3316 gen_helper_lsw(cpu_env, t0, t1, t2);
3317 }
3318
3319 /* lswx */
3320 static void gen_lswx(DisasContext *ctx)
3321 {
3322 TCGv t0;
3323 TCGv_i32 t1, t2, t3;
3324
3325 if (ctx->le_mode) {
3326 gen_align_no_le(ctx);
3327 return;
3328 }
3329 gen_set_access_type(ctx, ACCESS_INT);
3330 t0 = tcg_temp_new();
3331 gen_addr_reg_index(ctx, t0);
3332 t1 = tcg_const_i32(rD(ctx->opcode));
3333 t2 = tcg_const_i32(rA(ctx->opcode));
3334 t3 = tcg_const_i32(rB(ctx->opcode));
3335 gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3336 }
3337
3338 /* stswi */
3339 static void gen_stswi(DisasContext *ctx)
3340 {
3341 TCGv t0;
3342 TCGv_i32 t1, t2;
3343 int nb = NB(ctx->opcode);
3344
3345 if (ctx->le_mode) {
3346 gen_align_no_le(ctx);
3347 return;
3348 }
3349 gen_set_access_type(ctx, ACCESS_INT);
3350 t0 = tcg_temp_new();
3351 gen_addr_register(ctx, t0);
3352 if (nb == 0) {
3353 nb = 32;
3354 }
3355 t1 = tcg_const_i32(nb);
3356 t2 = tcg_const_i32(rS(ctx->opcode));
3357 gen_helper_stsw(cpu_env, t0, t1, t2);
3358 }
3359
3360 /* stswx */
3361 static void gen_stswx(DisasContext *ctx)
3362 {
3363 TCGv t0;
3364 TCGv_i32 t1, t2;
3365
3366 if (ctx->le_mode) {
3367 gen_align_no_le(ctx);
3368 return;
3369 }
3370 gen_set_access_type(ctx, ACCESS_INT);
3371 t0 = tcg_temp_new();
3372 gen_addr_reg_index(ctx, t0);
3373 t1 = tcg_temp_new_i32();
3374 tcg_gen_trunc_tl_i32(t1, cpu_xer);
3375 tcg_gen_andi_i32(t1, t1, 0x7F);
3376 t2 = tcg_const_i32(rS(ctx->opcode));
3377 gen_helper_stsw(cpu_env, t0, t1, t2);
3378 }
3379
3380 /*** Memory synchronisation ***/
3381 /* eieio */
3382 static void gen_eieio(DisasContext *ctx)
3383 {
3384 TCGBar bar = TCG_MO_ALL;
3385
3386 /*
3387 * eieio has complex semanitcs. It provides memory ordering between
3388 * operations in the set:
3389 * - loads from CI memory.
3390 * - stores to CI memory.
3391 * - stores to WT memory.
3392 *
3393 * It separately also orders memory for operations in the set:
3394 * - stores to cacheble memory.
3395 *
3396 * It also serializes instructions:
3397 * - dcbt and dcbst.
3398 *
3399 * It separately serializes:
3400 * - tlbie and tlbsync.
3401 *
3402 * And separately serializes:
3403 * - slbieg, slbiag, and slbsync.
3404 *
3405 * The end result is that CI memory ordering requires TCG_MO_ALL
3406 * and it is not possible to special-case more relaxed ordering for
3407 * cacheable accesses. TCG_BAR_SC is required to provide this
3408 * serialization.
3409 */
3410
3411 /*
3412 * POWER9 has a eieio instruction variant using bit 6 as a hint to
3413 * tell the CPU it is a store-forwarding barrier.
3414 */
3415 if (ctx->opcode & 0x2000000) {
3416 /*
3417 * ISA says that "Reserved fields in instructions are ignored
3418 * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3419 * as this is not an instruction software should be using,
3420 * complain to the user.
3421 */
3422 if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3423 qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3424 TARGET_FMT_lx "\n", ctx->cia);
3425 } else {
3426 bar = TCG_MO_ST_LD;
3427 }
3428 }
3429
3430 tcg_gen_mb(bar | TCG_BAR_SC);
3431 }
3432
3433 #if !defined(CONFIG_USER_ONLY)
3434 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3435 {
3436 TCGv_i32 t;
3437 TCGLabel *l;
3438
3439 if (!ctx->lazy_tlb_flush) {
3440 return;
3441 }
3442 l = gen_new_label();
3443 t = tcg_temp_new_i32();
3444 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3445 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3446 if (global) {
3447 gen_helper_check_tlb_flush_global(cpu_env);
3448 } else {
3449 gen_helper_check_tlb_flush_local(cpu_env);
3450 }
3451 gen_set_label(l);
3452 }
3453 #else
3454 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3455 #endif
3456
3457 /* isync */
3458 static void gen_isync(DisasContext *ctx)
3459 {
3460 /*
3461 * We need to check for a pending TLB flush. This can only happen in
3462 * kernel mode however so check MSR_PR
3463 */
3464 if (!ctx->pr) {
3465 gen_check_tlb_flush(ctx, false);
3466 }
3467 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3468 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3469 }
3470
3471 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
3472
3473 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3474 {
3475 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3476 TCGv t0 = tcg_temp_new();
3477
3478 gen_set_access_type(ctx, ACCESS_RES);
3479 gen_addr_reg_index(ctx, t0);
3480 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3481 tcg_gen_mov_tl(cpu_reserve, t0);
3482 tcg_gen_mov_tl(cpu_reserve_val, gpr);
3483 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3484 }
3485
3486 #define LARX(name, memop) \
3487 static void gen_##name(DisasContext *ctx) \
3488 { \
3489 gen_load_locked(ctx, memop); \
3490 }
3491
3492 /* lwarx */
3493 LARX(lbarx, DEF_MEMOP(MO_UB))
3494 LARX(lharx, DEF_MEMOP(MO_UW))
3495 LARX(lwarx, DEF_MEMOP(MO_UL))
3496
3497 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3498 TCGv EA, TCGCond cond, int addend)
3499 {
3500 TCGv t = tcg_temp_new();
3501 TCGv t2 = tcg_temp_new();
3502 TCGv u = tcg_temp_new();
3503
3504 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3505 tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3506 tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3507 tcg_gen_addi_tl(u, t, addend);
3508
3509 /* E.g. for fetch and increment bounded... */
3510 /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3511 tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3512 tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3513
3514 /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3515 tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3516 tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3517 }
3518
3519 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3520 {
3521 uint32_t gpr_FC = FC(ctx->opcode);
3522 TCGv EA = tcg_temp_new();
3523 int rt = rD(ctx->opcode);
3524 bool need_serial;
3525 TCGv src, dst;
3526
3527 gen_addr_register(ctx, EA);
3528 dst = cpu_gpr[rt];
3529 src = cpu_gpr[(rt + 1) & 31];
3530
3531 need_serial = false;
3532 memop |= MO_ALIGN;
3533 switch (gpr_FC) {
3534 case 0: /* Fetch and add */
3535 tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3536 break;
3537 case 1: /* Fetch and xor */
3538 tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3539 break;
3540 case 2: /* Fetch and or */
3541 tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3542 break;
3543 case 3: /* Fetch and 'and' */
3544 tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3545 break;
3546 case 4: /* Fetch and max unsigned */
3547 tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3548 break;
3549 case 5: /* Fetch and max signed */
3550 tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3551 break;
3552 case 6: /* Fetch and min unsigned */
3553 tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3554 break;
3555 case 7: /* Fetch and min signed */
3556 tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3557 break;
3558 case 8: /* Swap */
3559 tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3560 break;
3561
3562 case 16: /* Compare and swap not equal */
3563 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3564 need_serial = true;
3565 } else {
3566 TCGv t0 = tcg_temp_new();
3567 TCGv t1 = tcg_temp_new();
3568
3569 tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3570 if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3571 tcg_gen_mov_tl(t1, src);
3572 } else {
3573 tcg_gen_ext32u_tl(t1, src);
3574 }
3575 tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3576 cpu_gpr[(rt + 2) & 31], t0);
3577 tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3578 tcg_gen_mov_tl(dst, t0);
3579 }
3580 break;
3581
3582 case 24: /* Fetch and increment bounded */
3583 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3584 need_serial = true;
3585 } else {
3586 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3587 }
3588 break;
3589 case 25: /* Fetch and increment equal */
3590 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3591 need_serial = true;
3592 } else {
3593 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3594 }
3595 break;
3596 case 28: /* Fetch and decrement bounded */
3597 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3598 need_serial = true;
3599 } else {
3600 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3601 }
3602 break;
3603
3604 default:
3605 /* invoke data storage error handler */
3606 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3607 }
3608
3609 if (need_serial) {
3610 /* Restart with exclusive lock. */
3611 gen_helper_exit_atomic(cpu_env);
3612 ctx->base.is_jmp = DISAS_NORETURN;
3613 }
3614 }
3615
3616 static void gen_lwat(DisasContext *ctx)
3617 {
3618 gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3619 }
3620
3621 #ifdef TARGET_PPC64
3622 static void gen_ldat(DisasContext *ctx)
3623 {
3624 gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3625 }
3626 #endif
3627
3628 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3629 {
3630 uint32_t gpr_FC = FC(ctx->opcode);
3631 TCGv EA = tcg_temp_new();
3632 TCGv src, discard;
3633
3634 gen_addr_register(ctx, EA);
3635 src = cpu_gpr[rD(ctx->opcode)];
3636 discard = tcg_temp_new();
3637
3638 memop |= MO_ALIGN;
3639 switch (gpr_FC) {
3640 case 0: /* add and Store */
3641 tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3642 break;
3643 case 1: /* xor and Store */
3644 tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3645 break;
3646 case 2: /* Or and Store */
3647 tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3648 break;
3649 case 3: /* 'and' and Store */
3650 tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3651 break;
3652 case 4: /* Store max unsigned */
3653 tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3654 break;
3655 case 5: /* Store max signed */
3656 tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3657 break;
3658 case 6: /* Store min unsigned */
3659 tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3660 break;
3661 case 7: /* Store min signed */
3662 tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3663 break;
3664 case 24: /* Store twin */
3665 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3666 /* Restart with exclusive lock. */
3667 gen_helper_exit_atomic(cpu_env);
3668 ctx->base.is_jmp = DISAS_NORETURN;
3669 } else {
3670 TCGv t = tcg_temp_new();
3671 TCGv t2 = tcg_temp_new();
3672 TCGv s = tcg_temp_new();
3673 TCGv s2 = tcg_temp_new();
3674 TCGv ea_plus_s = tcg_temp_new();
3675
3676 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3677 tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3678 tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3679 tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3680 tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3681 tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3682 tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3683 }
3684 break;
3685 default:
3686 /* invoke data storage error handler */
3687 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3688 }
3689 }
3690
3691 static void gen_stwat(DisasContext *ctx)
3692 {
3693 gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3694 }
3695
3696 #ifdef TARGET_PPC64
3697 static void gen_stdat(DisasContext *ctx)
3698 {
3699 gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3700 }
3701 #endif
3702
3703 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3704 {
3705 TCGLabel *l1 = gen_new_label();
3706 TCGLabel *l2 = gen_new_label();
3707 TCGv t0 = tcg_temp_new();
3708 int reg = rS(ctx->opcode);
3709
3710 gen_set_access_type(ctx, ACCESS_RES);
3711 gen_addr_reg_index(ctx, t0);
3712 tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
3713
3714 t0 = tcg_temp_new();
3715 tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3716 cpu_gpr[reg], ctx->mem_idx,
3717 DEF_MEMOP(memop) | MO_ALIGN);
3718 tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3719 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3720 tcg_gen_or_tl(t0, t0, cpu_so);
3721 tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
3722 tcg_gen_br(l2);
3723
3724 gen_set_label(l1);
3725
3726 /*
3727 * Address mismatch implies failure. But we still need to provide
3728 * the memory barrier semantics of the instruction.
3729 */
3730 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3731 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3732
3733 gen_set_label(l2);
3734 tcg_gen_movi_tl(cpu_reserve, -1);
3735 }
3736
3737 #define STCX(name, memop) \
3738 static void gen_##name(DisasContext *ctx) \
3739 { \
3740 gen_conditional_store(ctx, memop); \
3741 }
3742
3743 STCX(stbcx_, DEF_MEMOP(MO_UB))
3744 STCX(sthcx_, DEF_MEMOP(MO_UW))
3745 STCX(stwcx_, DEF_MEMOP(MO_UL))
3746
3747 #if defined(TARGET_PPC64)
3748 /* ldarx */
3749 LARX(ldarx, DEF_MEMOP(MO_UQ))
3750 /* stdcx. */
3751 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3752
3753 /* lqarx */
3754 static void gen_lqarx(DisasContext *ctx)
3755 {
3756 int rd = rD(ctx->opcode);
3757 TCGv EA, hi, lo;
3758
3759 if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3760 (rd == rB(ctx->opcode)))) {
3761 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3762 return;
3763 }
3764
3765 gen_set_access_type(ctx, ACCESS_RES);
3766 EA = tcg_temp_new();
3767 gen_addr_reg_index(ctx, EA);
3768
3769 /* Note that the low part is always in RD+1, even in LE mode. */
3770 lo = cpu_gpr[rd + 1];
3771 hi = cpu_gpr[rd];
3772
3773 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3774 if (HAVE_ATOMIC128) {
3775 TCGv_i32 oi = tcg_temp_new_i32();
3776 if (ctx->le_mode) {
3777 tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
3778 ctx->mem_idx));
3779 gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
3780 } else {
3781 tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
3782 ctx->mem_idx));
3783 gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
3784 }
3785 tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
3786 } else {
3787 /* Restart with exclusive lock. */
3788 gen_helper_exit_atomic(cpu_env);
3789 ctx->base.is_jmp = DISAS_NORETURN;
3790 return;
3791 }
3792 } else if (ctx->le_mode) {
3793 tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16);
3794 tcg_gen_mov_tl(cpu_reserve, EA);
3795 gen_addr_add(ctx, EA, EA, 8);
3796 tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ);
3797 } else {
3798 tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16);
3799 tcg_gen_mov_tl(cpu_reserve, EA);
3800 gen_addr_add(ctx, EA, EA, 8);
3801 tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ);
3802 }
3803
3804 tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3805 tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3806 }
3807
3808 /* stqcx. */
3809 static void gen_stqcx_(DisasContext *ctx)
3810 {
3811 TCGLabel *lab_fail, *lab_over;
3812 int rs = rS(ctx->opcode);
3813 TCGv EA, t0, t1;
3814 TCGv_i128 cmp, val;
3815
3816 if (unlikely(rs & 1)) {
3817 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3818 return;
3819 }
3820
3821 lab_fail = gen_new_label();
3822 lab_over = gen_new_label();
3823
3824 gen_set_access_type(ctx, ACCESS_RES);
3825 EA = tcg_temp_new();
3826 gen_addr_reg_index(ctx, EA);
3827
3828 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
3829
3830 cmp = tcg_temp_new_i128();
3831 val = tcg_temp_new_i128();
3832
3833 tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3834
3835 /* Note that the low part is always in RS+1, even in LE mode. */
3836 tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3837
3838 tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3839 DEF_MEMOP(MO_128 | MO_ALIGN));
3840
3841 t0 = tcg_temp_new();
3842 t1 = tcg_temp_new();
3843 tcg_gen_extr_i128_i64(t1, t0, val);
3844
3845 tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3846 tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3847 tcg_gen_or_tl(t0, t0, t1);
3848
3849 tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3850 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3851 tcg_gen_or_tl(t0, t0, cpu_so);
3852 tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
3853
3854 tcg_gen_br(lab_over);
3855 gen_set_label(lab_fail);
3856
3857 /*
3858 * Address mismatch implies failure. But we still need to provide
3859 * the memory barrier semantics of the instruction.
3860 */
3861 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3862 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3863
3864 gen_set_label(lab_over);
3865 tcg_gen_movi_tl(cpu_reserve, -1);
3866 }
3867 #endif /* defined(TARGET_PPC64) */
3868
3869 /* sync */
3870 static void gen_sync(DisasContext *ctx)
3871 {
3872 TCGBar bar = TCG_MO_ALL;
3873 uint32_t l = (ctx->opcode >> 21) & 3;
3874
3875 if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
3876 bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
3877 }
3878
3879 /*
3880 * We may need to check for a pending TLB flush.
3881 *
3882 * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3883 *
3884 * Additionally, this can only happen in kernel mode however so
3885 * check MSR_PR as well.
3886 */
3887 if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3888 gen_check_tlb_flush(ctx, true);
3889 }
3890
3891 tcg_gen_mb(bar | TCG_BAR_SC);
3892 }
3893
3894 /* wait */
3895 static void gen_wait(DisasContext *ctx)
3896 {
3897 uint32_t wc;
3898
3899 if (ctx->insns_flags & PPC_WAIT) {
3900 /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
3901
3902 if (ctx->insns_flags2 & PPC2_PM_ISA206) {
3903 /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
3904 wc = WC(ctx->opcode);
3905 } else {
3906 wc = 0;
3907 }
3908
3909 } else if (ctx->insns_flags2 & PPC2_ISA300) {
3910 /* v3.0 defines a new 'wait' encoding. */
3911 wc = WC(ctx->opcode);
3912 if (ctx->insns_flags2 & PPC2_ISA310) {
3913 uint32_t pl = PL(ctx->opcode);
3914
3915 /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
3916 if (wc == 3) {
3917 gen_invalid(ctx);
3918 return;
3919 }
3920
3921 /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
3922 if (pl > 0 && wc != 2) {
3923 gen_invalid(ctx);
3924 return;
3925 }
3926
3927 } else { /* ISA300 */
3928 /* WC 1-3 are reserved */
3929 if (wc > 0) {
3930 gen_invalid(ctx);
3931 return;
3932 }
3933 }
3934
3935 } else {
3936 warn_report("wait instruction decoded with wrong ISA flags.");
3937 gen_invalid(ctx);
3938 return;
3939 }
3940
3941 /*
3942 * wait without WC field or with WC=0 waits for an exception / interrupt
3943 * to occur.
3944 */
3945 if (wc == 0) {
3946 TCGv_i32 t0 = tcg_const_i32(1);
3947 tcg_gen_st_i32(t0, cpu_env,
3948 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3949 /* Stop translation, as the CPU is supposed to sleep from now */
3950 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3951 }
3952
3953 /*
3954 * Other wait types must not just wait until an exception occurs because
3955 * ignoring their other wake-up conditions could cause a hang.
3956 *
3957 * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
3958 * no-ops.
3959 *
3960 * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
3961 *
3962 * wc=2 waits for an implementation-specific condition, such could be
3963 * always true, so it can be implemented as a no-op.
3964 *
3965 * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
3966 *
3967 * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
3968 * Reservation-loss may have implementation-specific conditions, so it
3969 * can be implemented as a no-op.
3970 *
3971 * wc=2 waits for an exception or an amount of time to pass. This
3972 * amount is implementation-specific so it can be implemented as a
3973 * no-op.
3974 *
3975 * ISA v3.1 allows for execution to resume "in the rare case of
3976 * an implementation-dependent event", so in any case software must
3977 * not depend on the architected resumption condition to become
3978 * true, so no-op implementations should be architecturally correct
3979 * (if suboptimal).
3980 */
3981 }
3982
3983 #if defined(TARGET_PPC64)
3984 static void gen_doze(DisasContext *ctx)
3985 {
3986 #if defined(CONFIG_USER_ONLY)
3987 GEN_PRIV(ctx);
3988 #else
3989 TCGv_i32 t;
3990
3991 CHK_HV(ctx);
3992 t = tcg_const_i32(PPC_PM_DOZE);
3993 gen_helper_pminsn(cpu_env, t);
3994 /* Stop translation, as the CPU is supposed to sleep from now */
3995 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3996 #endif /* defined(CONFIG_USER_ONLY) */
3997 }
3998
3999 static void gen_nap(DisasContext *ctx)
4000 {
4001 #if defined(CONFIG_USER_ONLY)
4002 GEN_PRIV(ctx);
4003 #else
4004 TCGv_i32 t;
4005
4006 CHK_HV(ctx);
4007 t = tcg_const_i32(PPC_PM_NAP);
4008 gen_helper_pminsn(cpu_env, t);
4009 /* Stop translation, as the CPU is supposed to sleep from now */
4010 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4011 #endif /* defined(CONFIG_USER_ONLY) */
4012 }
4013
4014 static void gen_stop(DisasContext *ctx)
4015 {
4016 #if defined(CONFIG_USER_ONLY)
4017 GEN_PRIV(ctx);
4018 #else
4019 TCGv_i32 t;
4020
4021 CHK_HV(ctx);
4022 t = tcg_const_i32(PPC_PM_STOP);
4023 gen_helper_pminsn(cpu_env, t);
4024 /* Stop translation, as the CPU is supposed to sleep from now */
4025 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4026 #endif /* defined(CONFIG_USER_ONLY) */
4027 }
4028
4029 static void gen_sleep(DisasContext *ctx)
4030 {
4031 #if defined(CONFIG_USER_ONLY)
4032 GEN_PRIV(ctx);
4033 #else
4034 TCGv_i32 t;
4035
4036 CHK_HV(ctx);
4037 t = tcg_const_i32(PPC_PM_SLEEP);
4038 gen_helper_pminsn(cpu_env, t);
4039 /* Stop translation, as the CPU is supposed to sleep from now */
4040 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4041 #endif /* defined(CONFIG_USER_ONLY) */
4042 }
4043
4044 static void gen_rvwinkle(DisasContext *ctx)
4045 {
4046 #if defined(CONFIG_USER_ONLY)
4047 GEN_PRIV(ctx);
4048 #else
4049 TCGv_i32 t;
4050
4051 CHK_HV(ctx);
4052 t = tcg_const_i32(PPC_PM_RVWINKLE);
4053 gen_helper_pminsn(cpu_env, t);
4054 /* Stop translation, as the CPU is supposed to sleep from now */
4055 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4056 #endif /* defined(CONFIG_USER_ONLY) */
4057 }
4058 #endif /* #if defined(TARGET_PPC64) */
4059
4060 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4061 {
4062 #if defined(TARGET_PPC64)
4063 if (ctx->has_cfar) {
4064 tcg_gen_movi_tl(cpu_cfar, nip);
4065 }
4066 #endif
4067 }
4068
4069 #if defined(TARGET_PPC64)
4070 static void pmu_count_insns(DisasContext *ctx)
4071 {
4072 /*
4073 * Do not bother calling the helper if the PMU isn't counting
4074 * instructions.
4075 */
4076 if (!ctx->pmu_insn_cnt) {
4077 return;
4078 }
4079
4080 #if !defined(CONFIG_USER_ONLY)
4081 TCGLabel *l;
4082 TCGv t0;
4083
4084 /*
4085 * The PMU insns_inc() helper stops the internal PMU timer if a
4086 * counter overflows happens. In that case, if the guest is
4087 * running with icount and we do not handle it beforehand,
4088 * the helper can trigger a 'bad icount read'.
4089 */
4090 gen_icount_io_start(ctx);
4091
4092 /* Avoid helper calls when only PMC5-6 are enabled. */
4093 if (!ctx->pmc_other) {
4094 l = gen_new_label();
4095 t0 = tcg_temp_new();
4096
4097 gen_load_spr(t0, SPR_POWER_PMC5);
4098 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4099 gen_store_spr(SPR_POWER_PMC5, t0);
4100 /* Check for overflow, if it's enabled */
4101 if (ctx->mmcr0_pmcjce) {
4102 tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
4103 gen_helper_handle_pmc5_overflow(cpu_env);
4104 }
4105
4106 gen_set_label(l);
4107 } else {
4108 gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
4109 }
4110 #else
4111 /*
4112 * User mode can read (but not write) PMC5 and start/stop
4113 * the PMU via MMCR0_FC. In this case just increment
4114 * PMC5 with base.num_insns.
4115 */
4116 TCGv t0 = tcg_temp_new();
4117
4118 gen_load_spr(t0, SPR_POWER_PMC5);
4119 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4120 gen_store_spr(SPR_POWER_PMC5, t0);
4121 #endif /* #if !defined(CONFIG_USER_ONLY) */
4122 }
4123 #else
4124 static void pmu_count_insns(DisasContext *ctx)
4125 {
4126 return;
4127 }
4128 #endif /* #if defined(TARGET_PPC64) */
4129
4130 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4131 {
4132 return translator_use_goto_tb(&ctx->base, dest);
4133 }
4134
4135 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4136 {
4137 if (unlikely(ctx->singlestep_enabled)) {
4138 gen_debug_exception(ctx);
4139 } else {
4140 /*
4141 * tcg_gen_lookup_and_goto_ptr will exit the TB if
4142 * CF_NO_GOTO_PTR is set. Count insns now.
4143 */
4144 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4145 pmu_count_insns(ctx);
4146 }
4147
4148 tcg_gen_lookup_and_goto_ptr();
4149 }
4150 }
4151
4152 /*** Branch ***/
4153 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4154 {
4155 if (NARROW_MODE(ctx)) {
4156 dest = (uint32_t) dest;
4157 }
4158 if (use_goto_tb(ctx, dest)) {
4159 pmu_count_insns(ctx);
4160 tcg_gen_goto_tb(n);
4161 tcg_gen_movi_tl(cpu_nip, dest & ~3);
4162 tcg_gen_exit_tb(ctx->base.tb, n);
4163 } else {
4164 tcg_gen_movi_tl(cpu_nip, dest & ~3);
4165 gen_lookup_and_goto_ptr(ctx);
4166 }
4167 }
4168
4169 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4170 {
4171 if (NARROW_MODE(ctx)) {
4172 nip = (uint32_t)nip;
4173 }
4174 tcg_gen_movi_tl(cpu_lr, nip);
4175 }
4176
4177 /* b ba bl bla */
4178 static void gen_b(DisasContext *ctx)
4179 {
4180 target_ulong li, target;
4181
4182 /* sign extend LI */
4183 li = LI(ctx->opcode);
4184 li = (li ^ 0x02000000) - 0x02000000;
4185 if (likely(AA(ctx->opcode) == 0)) {
4186 target = ctx->cia + li;
4187 } else {
4188 target = li;
4189 }
4190 if (LK(ctx->opcode)) {
4191 gen_setlr(ctx, ctx->base.pc_next);
4192 }
4193 gen_update_cfar(ctx, ctx->cia);
4194 gen_goto_tb(ctx, 0, target);
4195 ctx->base.is_jmp = DISAS_NORETURN;
4196 }
4197
4198 #define BCOND_IM 0
4199 #define BCOND_LR 1
4200 #define BCOND_CTR 2
4201 #define BCOND_TAR 3
4202
4203 static void gen_bcond(DisasContext *ctx, int type)
4204 {
4205 uint32_t bo = BO(ctx->opcode);
4206 TCGLabel *l1;
4207 TCGv target;
4208
4209 if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4210 target = tcg_temp_new();
4211 if (type == BCOND_CTR) {
4212 tcg_gen_mov_tl(target, cpu_ctr);
4213 } else if (type == BCOND_TAR) {
4214 gen_load_spr(target, SPR_TAR);
4215 } else {
4216 tcg_gen_mov_tl(target, cpu_lr);
4217 }
4218 } else {
4219 target = NULL;
4220 }
4221 if (LK(ctx->opcode)) {
4222 gen_setlr(ctx, ctx->base.pc_next);
4223 }
4224 l1 = gen_new_label();
4225 if ((bo & 0x4) == 0) {
4226 /* Decrement and test CTR */
4227 TCGv temp = tcg_temp_new();
4228
4229 if (type == BCOND_CTR) {
4230 /*
4231 * All ISAs up to v3 describe this form of bcctr as invalid but
4232 * some processors, ie. 64-bit server processors compliant with
4233 * arch 2.x, do implement a "test and decrement" logic instead,
4234 * as described in their respective UMs. This logic involves CTR
4235 * to act as both the branch target and a counter, which makes
4236 * it basically useless and thus never used in real code.
4237 *
4238 * This form was hence chosen to trigger extra micro-architectural
4239 * side-effect on real HW needed for the Spectre v2 workaround.
4240 * It is up to guests that implement such workaround, ie. linux, to
4241 * use this form in a way it just triggers the side-effect without
4242 * doing anything else harmful.
4243 */
4244 if (unlikely(!is_book3s_arch2x(ctx))) {
4245 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4246 return;
4247 }
4248
4249 if (NARROW_MODE(ctx)) {
4250 tcg_gen_ext32u_tl(temp, cpu_ctr);
4251 } else {
4252 tcg_gen_mov_tl(temp, cpu_ctr);
4253 }
4254 if (bo & 0x2) {
4255 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4256 } else {
4257 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4258 }
4259 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4260 } else {
4261 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4262 if (NARROW_MODE(ctx)) {
4263 tcg_gen_ext32u_tl(temp, cpu_ctr);
4264 } else {
4265 tcg_gen_mov_tl(temp, cpu_ctr);
4266 }
4267 if (bo & 0x2) {
4268 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4269 } else {
4270 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4271 }
4272 }
4273 }
4274 if ((bo & 0x10) == 0) {
4275 /* Test CR */
4276 uint32_t bi = BI(ctx->opcode);
4277 uint32_t mask = 0x08 >> (bi & 0x03);
4278 TCGv_i32 temp = tcg_temp_new_i32();
4279
4280 if (bo & 0x8) {
4281 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4282 tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4283 } else {
4284 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4285 tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4286 }
4287 }
4288 gen_update_cfar(ctx, ctx->cia);
4289 if (type == BCOND_IM) {
4290 target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4291 if (likely(AA(ctx->opcode) == 0)) {
4292 gen_goto_tb(ctx, 0, ctx->cia + li);
4293 } else {
4294 gen_goto_tb(ctx, 0, li);
4295 }
4296 } else {
4297 if (NARROW_MODE(ctx)) {
4298 tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4299 } else {
4300 tcg_gen_andi_tl(cpu_nip, target, ~3);
4301 }
4302 gen_lookup_and_goto_ptr(ctx);
4303 }
4304 if ((bo & 0x14) != 0x14) {
4305 /* fallthrough case */
4306 gen_set_label(l1);
4307 gen_goto_tb(ctx, 1, ctx->base.pc_next);
4308 }
4309 ctx->base.is_jmp = DISAS_NORETURN;
4310 }
4311
4312 static void gen_bc(DisasContext *ctx)
4313 {
4314 gen_bcond(ctx, BCOND_IM);
4315 }
4316
4317 static void gen_bcctr(DisasContext *ctx)
4318 {
4319 gen_bcond(ctx, BCOND_CTR);
4320 }
4321
4322 static void gen_bclr(DisasContext *ctx)
4323 {
4324 gen_bcond(ctx, BCOND_LR);
4325 }
4326
4327 static void gen_bctar(DisasContext *ctx)
4328 {
4329 gen_bcond(ctx, BCOND_TAR);
4330 }
4331
4332 /*** Condition register logical ***/
4333 #define GEN_CRLOGIC(name, tcg_op, opc) \
4334 static void glue(gen_, name)(DisasContext *ctx) \
4335 { \
4336 uint8_t bitmask; \
4337 int sh; \
4338 TCGv_i32 t0, t1; \
4339 sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \
4340 t0 = tcg_temp_new_i32(); \
4341 if (sh > 0) \
4342 tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \
4343 else if (sh < 0) \
4344 tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \
4345 else \
4346 tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \
4347 t1 = tcg_temp_new_i32(); \
4348 sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \
4349 if (sh > 0) \
4350 tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \
4351 else if (sh < 0) \
4352 tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \
4353 else \
4354 tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \
4355 tcg_op(t0, t0, t1); \
4356 bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \
4357 tcg_gen_andi_i32(t0, t0, bitmask); \
4358 tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
4359 tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
4360 }
4361
4362 /* crand */
4363 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4364 /* crandc */
4365 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4366 /* creqv */
4367 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4368 /* crnand */
4369 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4370 /* crnor */
4371 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4372 /* cror */
4373 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4374 /* crorc */
4375 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4376 /* crxor */
4377 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4378
4379 /* mcrf */
4380 static void gen_mcrf(DisasContext *ctx)
4381 {
4382 tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4383 }
4384
4385 /*** System linkage ***/
4386
4387 /* rfi (supervisor only) */
4388 static void gen_rfi(DisasContext *ctx)
4389 {
4390 #if defined(CONFIG_USER_ONLY)
4391 GEN_PRIV(ctx);
4392 #else
4393 /*
4394 * This instruction doesn't exist anymore on 64-bit server
4395 * processors compliant with arch 2.x
4396 */
4397 if (is_book3s_arch2x(ctx)) {
4398 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4399 return;
4400 }
4401 /* Restore CPU state */
4402 CHK_SV(ctx);
4403 gen_icount_io_start(ctx);
4404 gen_update_cfar(ctx, ctx->cia);
4405 gen_helper_rfi(cpu_env);
4406 ctx->base.is_jmp = DISAS_EXIT;
4407 #endif
4408 }
4409
4410 #if defined(TARGET_PPC64)
4411 static void gen_rfid(DisasContext *ctx)
4412 {
4413 #if defined(CONFIG_USER_ONLY)
4414 GEN_PRIV(ctx);
4415 #else
4416 /* Restore CPU state */
4417 CHK_SV(ctx);
4418 gen_icount_io_start(ctx);
4419 gen_update_cfar(ctx, ctx->cia);
4420 gen_helper_rfid(cpu_env);
4421 ctx->base.is_jmp = DISAS_EXIT;
4422 #endif
4423 }
4424
4425 #if !defined(CONFIG_USER_ONLY)
4426 static void gen_rfscv(DisasContext *ctx)
4427 {
4428 #if defined(CONFIG_USER_ONLY)
4429 GEN_PRIV(ctx);
4430 #else
4431 /* Restore CPU state */
4432 CHK_SV(ctx);
4433 gen_icount_io_start(ctx);
4434 gen_update_cfar(ctx, ctx->cia);
4435 gen_helper_rfscv(cpu_env);
4436 ctx->base.is_jmp = DISAS_EXIT;
4437 #endif
4438 }
4439 #endif
4440
4441 static void gen_hrfid(DisasContext *ctx)
4442 {
4443 #if defined(CONFIG_USER_ONLY)
4444 GEN_PRIV(ctx);
4445 #else
4446 /* Restore CPU state */
4447 CHK_HV(ctx);
4448 gen_helper_hrfid(cpu_env);
4449 ctx->base.is_jmp = DISAS_EXIT;
4450 #endif
4451 }
4452 #endif
4453
4454 /* sc */
4455 #if defined(CONFIG_USER_ONLY)
4456 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4457 #else
4458 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4459 #define POWERPC_SYSCALL_VECTORED POWERPC_EXCP_SYSCALL_VECTORED
4460 #endif
4461 static void gen_sc(DisasContext *ctx)
4462 {
4463 uint32_t lev;
4464
4465 lev = (ctx->opcode >> 5) & 0x7F;
4466 gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4467 }
4468
4469 #if defined(TARGET_PPC64)
4470 #if !defined(CONFIG_USER_ONLY)
4471 static void gen_scv(DisasContext *ctx)
4472 {
4473 uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4474
4475 /* Set the PC back to the faulting instruction. */
4476 gen_update_nip(ctx, ctx->cia);
4477 gen_helper_scv(cpu_env, tcg_constant_i32(lev));
4478
4479 ctx->base.is_jmp = DISAS_NORETURN;
4480 }
4481 #endif
4482 #endif
4483
4484 /*** Trap ***/
4485
4486 /* Check for unconditional traps (always or never) */
4487 static bool check_unconditional_trap(DisasContext *ctx)
4488 {
4489 /* Trap never */
4490 if (TO(ctx->opcode) == 0) {
4491 return true;
4492 }
4493 /* Trap always */
4494 if (TO(ctx->opcode) == 31) {
4495 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4496 return true;
4497 }
4498 return false;
4499 }
4500
4501 /* tw */
4502 static void gen_tw(DisasContext *ctx)
4503 {
4504 TCGv_i32 t0;
4505
4506 if (check_unconditional_trap(ctx)) {
4507 return;
4508 }
4509 t0 = tcg_const_i32(TO(ctx->opcode));
4510 gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4511 t0);
4512 }
4513
4514 /* twi */
4515 static void gen_twi(DisasContext *ctx)
4516 {
4517 TCGv t0;
4518 TCGv_i32 t1;
4519
4520 if (check_unconditional_trap(ctx)) {
4521 return;
4522 }
4523 t0 = tcg_const_tl(SIMM(ctx->opcode));
4524 t1 = tcg_const_i32(TO(ctx->opcode));
4525 gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4526 }
4527
4528 #if defined(TARGET_PPC64)
4529 /* td */
4530 static void gen_td(DisasContext *ctx)
4531 {
4532 TCGv_i32 t0;
4533
4534 if (check_unconditional_trap(ctx)) {
4535 return;
4536 }
4537 t0 = tcg_const_i32(TO(ctx->opcode));
4538 gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4539 t0);
4540 }
4541
4542 /* tdi */
4543 static void gen_tdi(DisasContext *ctx)
4544 {
4545 TCGv t0;
4546 TCGv_i32 t1;
4547
4548 if (check_unconditional_trap(ctx)) {
4549 return;
4550 }
4551 t0 = tcg_const_tl(SIMM(ctx->opcode));
4552 t1 = tcg_const_i32(TO(ctx->opcode));
4553 gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4554 }
4555 #endif
4556
4557 /*** Processor control ***/
4558
4559 /* mcrxr */
4560 static void gen_mcrxr(DisasContext *ctx)
4561 {
4562 TCGv_i32 t0 = tcg_temp_new_i32();
4563 TCGv_i32 t1 = tcg_temp_new_i32();
4564 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4565
4566 tcg_gen_trunc_tl_i32(t0, cpu_so);
4567 tcg_gen_trunc_tl_i32(t1, cpu_ov);
4568 tcg_gen_trunc_tl_i32(dst, cpu_ca);
4569 tcg_gen_shli_i32(t0, t0, 3);
4570 tcg_gen_shli_i32(t1, t1, 2);
4571 tcg_gen_shli_i32(dst, dst, 1);
4572 tcg_gen_or_i32(dst, dst, t0);
4573 tcg_gen_or_i32(dst, dst, t1);
4574
4575 tcg_gen_movi_tl(cpu_so, 0);
4576 tcg_gen_movi_tl(cpu_ov, 0);
4577 tcg_gen_movi_tl(cpu_ca, 0);
4578 }
4579
4580 #ifdef TARGET_PPC64
4581 /* mcrxrx */
4582 static void gen_mcrxrx(DisasContext *ctx)
4583 {
4584 TCGv t0 = tcg_temp_new();
4585 TCGv t1 = tcg_temp_new();
4586 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4587
4588 /* copy OV and OV32 */
4589 tcg_gen_shli_tl(t0, cpu_ov, 1);
4590 tcg_gen_or_tl(t0, t0, cpu_ov32);
4591 tcg_gen_shli_tl(t0, t0, 2);
4592 /* copy CA and CA32 */
4593 tcg_gen_shli_tl(t1, cpu_ca, 1);
4594 tcg_gen_or_tl(t1, t1, cpu_ca32);
4595 tcg_gen_or_tl(t0, t0, t1);
4596 tcg_gen_trunc_tl_i32(dst, t0);
4597 }
4598 #endif
4599
4600 /* mfcr mfocrf */
4601 static void gen_mfcr(DisasContext *ctx)
4602 {
4603 uint32_t crm, crn;
4604
4605 if (likely(ctx->opcode & 0x00100000)) {
4606 crm = CRM(ctx->opcode);
4607 if (likely(crm && ((crm & (crm - 1)) == 0))) {
4608 crn = ctz32(crm);
4609 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4610 tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4611 cpu_gpr[rD(ctx->opcode)], crn * 4);
4612 }
4613 } else {
4614 TCGv_i32 t0 = tcg_temp_new_i32();
4615 tcg_gen_mov_i32(t0, cpu_crf[0]);
4616 tcg_gen_shli_i32(t0, t0, 4);
4617 tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4618 tcg_gen_shli_i32(t0, t0, 4);
4619 tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4620 tcg_gen_shli_i32(t0, t0, 4);
4621 tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4622 tcg_gen_shli_i32(t0, t0, 4);
4623 tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4624 tcg_gen_shli_i32(t0, t0, 4);
4625 tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4626 tcg_gen_shli_i32(t0, t0, 4);
4627 tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4628 tcg_gen_shli_i32(t0, t0, 4);
4629 tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4630 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4631 }
4632 }
4633
4634 /* mfmsr */
4635 static void gen_mfmsr(DisasContext *ctx)
4636 {
4637 CHK_SV(ctx);
4638 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4639 }
4640
4641 /* mfspr */
4642 static inline void gen_op_mfspr(DisasContext *ctx)
4643 {
4644 void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4645 uint32_t sprn = SPR(ctx->opcode);
4646
4647 #if defined(CONFIG_USER_ONLY)
4648 read_cb = ctx->spr_cb[sprn].uea_read;
4649 #else
4650 if (ctx->pr) {
4651 read_cb = ctx->spr_cb[sprn].uea_read;
4652 } else if (ctx->hv) {
4653 read_cb = ctx->spr_cb[sprn].hea_read;
4654 } else {
4655 read_cb = ctx->spr_cb[sprn].oea_read;
4656 }
4657 #endif
4658 if (likely(read_cb != NULL)) {
4659 if (likely(read_cb != SPR_NOACCESS)) {
4660 (*read_cb)(ctx, rD(ctx->opcode), sprn);
4661 } else {
4662 /* Privilege exception */
4663 /*
4664 * This is a hack to avoid warnings when running Linux:
4665 * this OS breaks the PowerPC virtualisation model,
4666 * allowing userland application to read the PVR
4667 */
4668 if (sprn != SPR_PVR) {
4669 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4670 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4671 ctx->cia);
4672 }
4673 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4674 }
4675 } else {
4676 /* ISA 2.07 defines these as no-ops */
4677 if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4678 (sprn >= 808 && sprn <= 811)) {
4679 /* This is a nop */
4680 return;
4681 }
4682 /* Not defined */
4683 qemu_log_mask(LOG_GUEST_ERROR,
4684 "Trying to read invalid spr %d (0x%03x) at "
4685 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4686
4687 /*
4688 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4689 * generate a priv, a hv emu or a no-op
4690 */
4691 if (sprn & 0x10) {
4692 if (ctx->pr) {
4693 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4694 }
4695 } else {
4696 if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4697 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4698 }
4699 }
4700 }
4701 }
4702
4703 static void gen_mfspr(DisasContext *ctx)
4704 {
4705 gen_op_mfspr(ctx);
4706 }
4707
4708 /* mftb */
4709 static void gen_mftb(DisasContext *ctx)
4710 {
4711 gen_op_mfspr(ctx);
4712 }
4713
4714 /* mtcrf mtocrf*/
4715 static void gen_mtcrf(DisasContext *ctx)
4716 {
4717 uint32_t crm, crn;
4718
4719 crm = CRM(ctx->opcode);
4720 if (likely((ctx->opcode & 0x00100000))) {
4721 if (crm && ((crm & (crm - 1)) == 0)) {
4722 TCGv_i32 temp = tcg_temp_new_i32();
4723 crn = ctz32(crm);
4724 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4725 tcg_gen_shri_i32(temp, temp, crn * 4);
4726 tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4727 }
4728 } else {
4729 TCGv_i32 temp = tcg_temp_new_i32();
4730 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4731 for (crn = 0 ; crn < 8 ; crn++) {
4732 if (crm & (1 << crn)) {
4733 tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4734 tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4735 }
4736 }
4737 }
4738 }
4739
4740 /* mtmsr */
4741 #if defined(TARGET_PPC64)
4742 static void gen_mtmsrd(DisasContext *ctx)
4743 {
4744 if (unlikely(!is_book3s_arch2x(ctx))) {
4745 gen_invalid(ctx);
4746 return;
4747 }
4748
4749 CHK_SV(ctx);
4750
4751 #if !defined(CONFIG_USER_ONLY)
4752 TCGv t0, t1;
4753 target_ulong mask;
4754
4755 t0 = tcg_temp_new();
4756 t1 = tcg_temp_new();
4757
4758 gen_icount_io_start(ctx);
4759
4760 if (ctx->opcode & 0x00010000) {
4761 /* L=1 form only updates EE and RI */
4762 mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4763 } else {
4764 /* mtmsrd does not alter HV, S, ME, or LE */
4765 mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4766 (1ULL << MSR_HV));
4767 /*
4768 * XXX: we need to update nip before the store if we enter
4769 * power saving mode, we will exit the loop directly from
4770 * ppc_store_msr
4771 */
4772 gen_update_nip(ctx, ctx->base.pc_next);
4773 }
4774
4775 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4776 tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4777 tcg_gen_or_tl(t0, t0, t1);
4778
4779 gen_helper_store_msr(cpu_env, t0);
4780
4781 /* Must stop the translation as machine state (may have) changed */
4782 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4783 #endif /* !defined(CONFIG_USER_ONLY) */
4784 }
4785 #endif /* defined(TARGET_PPC64) */
4786
4787 static void gen_mtmsr(DisasContext *ctx)
4788 {
4789 CHK_SV(ctx);
4790
4791 #if !defined(CONFIG_USER_ONLY)
4792 TCGv t0, t1;
4793 target_ulong mask = 0xFFFFFFFF;
4794
4795 t0 = tcg_temp_new();
4796 t1 = tcg_temp_new();
4797
4798 gen_icount_io_start(ctx);
4799 if (ctx->opcode & 0x00010000) {
4800 /* L=1 form only updates EE and RI */
4801 mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4802 } else {
4803 /* mtmsr does not alter S, ME, or LE */
4804 mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4805
4806 /*
4807 * XXX: we need to update nip before the store if we enter
4808 * power saving mode, we will exit the loop directly from
4809 * ppc_store_msr
4810 */
4811 gen_update_nip(ctx, ctx->base.pc_next);
4812 }
4813
4814 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4815 tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4816 tcg_gen_or_tl(t0, t0, t1);
4817
4818 gen_helper_store_msr(cpu_env, t0);
4819
4820 /* Must stop the translation as machine state (may have) changed */
4821 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4822 #endif
4823 }
4824
4825 /* mtspr */
4826 static void gen_mtspr(DisasContext *ctx)
4827 {
4828 void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4829 uint32_t sprn = SPR(ctx->opcode);
4830
4831 #if defined(CONFIG_USER_ONLY)
4832 write_cb = ctx->spr_cb[sprn].uea_write;
4833 #else
4834 if (ctx->pr) {
4835 write_cb = ctx->spr_cb[sprn].uea_write;
4836 } else if (ctx->hv) {
4837 write_cb = ctx->spr_cb[sprn].hea_write;
4838 } else {
4839 write_cb = ctx->spr_cb[sprn].oea_write;
4840 }
4841 #endif
4842 if (likely(write_cb != NULL)) {
4843 if (likely(write_cb != SPR_NOACCESS)) {
4844 (*write_cb)(ctx, sprn, rS(ctx->opcode));
4845 } else {
4846 /* Privilege exception */
4847 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4848 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4849 ctx->cia);
4850 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4851 }
4852 } else {
4853 /* ISA 2.07 defines these as no-ops */
4854 if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4855 (sprn >= 808 && sprn <= 811)) {
4856 /* This is a nop */
4857 return;
4858 }
4859
4860 /* Not defined */
4861 qemu_log_mask(LOG_GUEST_ERROR,
4862 "Trying to write invalid spr %d (0x%03x) at "
4863 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4864
4865
4866 /*
4867 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4868 * generate a priv, a hv emu or a no-op
4869 */
4870 if (sprn & 0x10) {
4871 if (ctx->pr) {
4872 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4873 }
4874 } else {
4875 if (ctx->pr || sprn == 0) {
4876 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4877 }
4878 }
4879 }
4880 }
4881
4882 #if defined(TARGET_PPC64)
4883 /* setb */
4884 static void gen_setb(DisasContext *ctx)
4885 {
4886 TCGv_i32 t0 = tcg_temp_new_i32();
4887 TCGv_i32 t8 = tcg_constant_i32(8);
4888 TCGv_i32 tm1 = tcg_constant_i32(-1);
4889 int crf = crfS(ctx->opcode);
4890
4891 tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4892 tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4893 tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4894 }
4895 #endif
4896
4897 /*** Cache management ***/
4898
4899 /* dcbf */
4900 static void gen_dcbf(DisasContext *ctx)
4901 {
4902 /* XXX: specification says this is treated as a load by the MMU */
4903 TCGv t0;
4904 gen_set_access_type(ctx, ACCESS_CACHE);
4905 t0 = tcg_temp_new();
4906 gen_addr_reg_index(ctx, t0);
4907 gen_qemu_ld8u(ctx, t0, t0);
4908 }
4909
4910 /* dcbfep (external PID dcbf) */
4911 static void gen_dcbfep(DisasContext *ctx)
4912 {
4913 /* XXX: specification says this is treated as a load by the MMU */
4914 TCGv t0;
4915 CHK_SV(ctx);
4916 gen_set_access_type(ctx, ACCESS_CACHE);
4917 t0 = tcg_temp_new();
4918 gen_addr_reg_index(ctx, t0);
4919 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4920 }
4921
4922 /* dcbi (Supervisor only) */
4923 static void gen_dcbi(DisasContext *ctx)
4924 {
4925 #if defined(CONFIG_USER_ONLY)
4926 GEN_PRIV(ctx);
4927 #else
4928 TCGv EA, val;
4929
4930 CHK_SV(ctx);
4931 EA = tcg_temp_new();
4932 gen_set_access_type(ctx, ACCESS_CACHE);
4933 gen_addr_reg_index(ctx, EA);
4934 val = tcg_temp_new();
4935 /* XXX: specification says this should be treated as a store by the MMU */
4936 gen_qemu_ld8u(ctx, val, EA);
4937 gen_qemu_st8(ctx, val, EA);
4938 #endif /* defined(CONFIG_USER_ONLY) */
4939 }
4940
4941 /* dcdst */
4942 static void gen_dcbst(DisasContext *ctx)
4943 {
4944 /* XXX: specification say this is treated as a load by the MMU */
4945 TCGv t0;
4946 gen_set_access_type(ctx, ACCESS_CACHE);
4947 t0 = tcg_temp_new();
4948 gen_addr_reg_index(ctx, t0);
4949 gen_qemu_ld8u(ctx, t0, t0);
4950 }
4951
4952 /* dcbstep (dcbstep External PID version) */
4953 static void gen_dcbstep(DisasContext *ctx)
4954 {
4955 /* XXX: specification say this is treated as a load by the MMU */
4956 TCGv t0;
4957 gen_set_access_type(ctx, ACCESS_CACHE);
4958 t0 = tcg_temp_new();
4959 gen_addr_reg_index(ctx, t0);
4960 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4961 }
4962
4963 /* dcbt */
4964 static void gen_dcbt(DisasContext *ctx)
4965 {
4966 /*
4967 * interpreted as no-op
4968 * XXX: specification say this is treated as a load by the MMU but
4969 * does not generate any exception
4970 */
4971 }
4972
4973 /* dcbtep */
4974 static void gen_dcbtep(DisasContext *ctx)
4975 {
4976 /*
4977 * interpreted as no-op
4978 * XXX: specification say this is treated as a load by the MMU but
4979 * does not generate any exception
4980 */
4981 }
4982
4983 /* dcbtst */
4984 static void gen_dcbtst(DisasContext *ctx)
4985 {
4986 /*
4987 * interpreted as no-op
4988 * XXX: specification say this is treated as a load by the MMU but
4989 * does not generate any exception
4990 */
4991 }
4992
4993 /* dcbtstep */
4994 static void gen_dcbtstep(DisasContext *ctx)
4995 {
4996 /*
4997 * interpreted as no-op
4998 * XXX: specification say this is treated as a load by the MMU but
4999 * does not generate any exception
5000 */
5001 }
5002
5003 /* dcbtls */
5004 static void gen_dcbtls(DisasContext *ctx)
5005 {
5006 /* Always fails locking the cache */
5007 TCGv t0 = tcg_temp_new();
5008 gen_load_spr(t0, SPR_Exxx_L1CSR0);
5009 tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5010 gen_store_spr(SPR_Exxx_L1CSR0, t0);
5011 }
5012
5013 /* dcblc */
5014 static void gen_dcblc(DisasContext *ctx)
5015 {
5016 /*
5017 * interpreted as no-op
5018 */
5019 }
5020
5021 /* dcbz */
5022 static void gen_dcbz(DisasContext *ctx)
5023 {
5024 TCGv tcgv_addr;
5025 TCGv_i32 tcgv_op;
5026
5027 gen_set_access_type(ctx, ACCESS_CACHE);
5028 tcgv_addr = tcg_temp_new();
5029 tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
5030 gen_addr_reg_index(ctx, tcgv_addr);
5031 gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
5032 }
5033
5034 /* dcbzep */
5035 static void gen_dcbzep(DisasContext *ctx)
5036 {
5037 TCGv tcgv_addr;
5038 TCGv_i32 tcgv_op;
5039
5040 gen_set_access_type(ctx, ACCESS_CACHE);
5041 tcgv_addr = tcg_temp_new();
5042 tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
5043 gen_addr_reg_index(ctx, tcgv_addr);
5044 gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
5045 }
5046
5047 /* dst / dstt */
5048 static void gen_dst(DisasContext *ctx)
5049 {
5050 if (rA(ctx->opcode) == 0) {
5051 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5052 } else {
5053 /* interpreted as no-op */
5054 }
5055 }
5056
5057 /* dstst /dststt */
5058 static void gen_dstst(DisasContext *ctx)
5059 {
5060 if (rA(ctx->opcode) == 0) {
5061 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5062 } else {
5063 /* interpreted as no-op */
5064 }
5065
5066 }
5067
5068 /* dss / dssall */
5069 static void gen_dss(DisasContext *ctx)
5070 {
5071 /* interpreted as no-op */
5072 }
5073
5074 /* icbi */
5075 static void gen_icbi(DisasContext *ctx)
5076 {
5077 TCGv t0;
5078 gen_set_access_type(ctx, ACCESS_CACHE);
5079 t0 = tcg_temp_new();
5080 gen_addr_reg_index(ctx, t0);
5081 gen_helper_icbi(cpu_env, t0);
5082 }
5083
5084 /* icbiep */
5085 static void gen_icbiep(DisasContext *ctx)
5086 {
5087 TCGv t0;
5088 gen_set_access_type(ctx, ACCESS_CACHE);
5089 t0 = tcg_temp_new();
5090 gen_addr_reg_index(ctx, t0);
5091 gen_helper_icbiep(cpu_env, t0);
5092 }
5093
5094 /* Optional: */
5095 /* dcba */
5096 static void gen_dcba(DisasContext *ctx)
5097 {
5098 /*
5099 * interpreted as no-op
5100 * XXX: specification say this is treated as a store by the MMU
5101 * but does not generate any exception
5102 */
5103 }
5104
5105 /*** Segment register manipulation ***/
5106 /* Supervisor only: */
5107
5108 /* mfsr */
5109 static void gen_mfsr(DisasContext *ctx)
5110 {
5111 #if defined(CONFIG_USER_ONLY)
5112 GEN_PRIV(ctx);
5113 #else
5114 TCGv t0;
5115
5116 CHK_SV(ctx);
5117 t0 = tcg_const_tl(SR(ctx->opcode));
5118 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5119 #endif /* defined(CONFIG_USER_ONLY) */
5120 }
5121
5122 /* mfsrin */
5123 static void gen_mfsrin(DisasContext *ctx)
5124 {
5125 #if defined(CONFIG_USER_ONLY)
5126 GEN_PRIV(ctx);
5127 #else
5128 TCGv t0;
5129
5130 CHK_SV(ctx);
5131 t0 = tcg_temp_new();
5132 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5133 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5134 #endif /* defined(CONFIG_USER_ONLY) */
5135 }
5136
5137 /* mtsr */
5138 static void gen_mtsr(DisasContext *ctx)
5139 {
5140 #if defined(CONFIG_USER_ONLY)
5141 GEN_PRIV(ctx);
5142 #else
5143 TCGv t0;
5144
5145 CHK_SV(ctx);
5146 t0 = tcg_const_tl(SR(ctx->opcode));
5147 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5148 #endif /* defined(CONFIG_USER_ONLY) */
5149 }
5150
5151 /* mtsrin */
5152 static void gen_mtsrin(DisasContext *ctx)
5153 {
5154 #if defined(CONFIG_USER_ONLY)
5155 GEN_PRIV(ctx);
5156 #else
5157 TCGv t0;
5158 CHK_SV(ctx);
5159
5160 t0 = tcg_temp_new();
5161 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5162 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
5163 #endif /* defined(CONFIG_USER_ONLY) */
5164 }
5165
5166 #if defined(TARGET_PPC64)
5167 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5168
5169 /* mfsr */
5170 static void gen_mfsr_64b(DisasContext *ctx)
5171 {
5172 #if defined(CONFIG_USER_ONLY)
5173 GEN_PRIV(ctx);
5174 #else
5175 TCGv t0;
5176
5177 CHK_SV(ctx);
5178 t0 = tcg_const_tl(SR(ctx->opcode));
5179 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5180 #endif /* defined(CONFIG_USER_ONLY) */
5181 }
5182
5183 /* mfsrin */
5184 static void gen_mfsrin_64b(DisasContext *ctx)
5185 {
5186 #if defined(CONFIG_USER_ONLY)
5187 GEN_PRIV(ctx);
5188 #else
5189 TCGv t0;
5190
5191 CHK_SV(ctx);
5192 t0 = tcg_temp_new();
5193 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5194 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5195 #endif /* defined(CONFIG_USER_ONLY) */
5196 }
5197
5198 /* mtsr */
5199 static void gen_mtsr_64b(DisasContext *ctx)
5200 {
5201 #if defined(CONFIG_USER_ONLY)
5202 GEN_PRIV(ctx);
5203 #else
5204 TCGv t0;
5205
5206 CHK_SV(ctx);
5207 t0 = tcg_const_tl(SR(ctx->opcode));
5208 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5209 #endif /* defined(CONFIG_USER_ONLY) */
5210 }
5211
5212 /* mtsrin */
5213 static void gen_mtsrin_64b(DisasContext *ctx)
5214 {
5215 #if defined(CONFIG_USER_ONLY)
5216 GEN_PRIV(ctx);
5217 #else
5218 TCGv t0;
5219
5220 CHK_SV(ctx);
5221 t0 = tcg_temp_new();
5222 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5223 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5224 #endif /* defined(CONFIG_USER_ONLY) */
5225 }
5226
5227 #endif /* defined(TARGET_PPC64) */
5228
5229 /*** Lookaside buffer management ***/
5230 /* Optional & supervisor only: */
5231
5232 /* tlbia */
5233 static void gen_tlbia(DisasContext *ctx)
5234 {
5235 #if defined(CONFIG_USER_ONLY)
5236 GEN_PRIV(ctx);
5237 #else
5238 CHK_HV(ctx);
5239
5240 gen_helper_tlbia(cpu_env);
5241 #endif /* defined(CONFIG_USER_ONLY) */
5242 }
5243
5244 /* tlbsync */
5245 static void gen_tlbsync(DisasContext *ctx)
5246 {
5247 #if defined(CONFIG_USER_ONLY)
5248 GEN_PRIV(ctx);
5249 #else
5250
5251 if (ctx->gtse) {
5252 CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5253 } else {
5254 CHK_HV(ctx); /* Else hypervisor privileged */
5255 }
5256
5257 /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5258 if (ctx->insns_flags & PPC_BOOKE) {
5259 gen_check_tlb_flush(ctx, true);
5260 }
5261 #endif /* defined(CONFIG_USER_ONLY) */
5262 }
5263
5264 /*** External control ***/
5265 /* Optional: */
5266
5267 /* eciwx */
5268 static void gen_eciwx(DisasContext *ctx)
5269 {
5270 TCGv t0;
5271 /* Should check EAR[E] ! */
5272 gen_set_access_type(ctx, ACCESS_EXT);
5273 t0 = tcg_temp_new();
5274 gen_addr_reg_index(ctx, t0);
5275 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5276 DEF_MEMOP(MO_UL | MO_ALIGN));
5277 }
5278
5279 /* ecowx */
5280 static void gen_ecowx(DisasContext *ctx)
5281 {
5282 TCGv t0;
5283 /* Should check EAR[E] ! */
5284 gen_set_access_type(ctx, ACCESS_EXT);
5285 t0 = tcg_temp_new();
5286 gen_addr_reg_index(ctx, t0);
5287 tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5288 DEF_MEMOP(MO_UL | MO_ALIGN));
5289 }
5290
5291 /* 602 - 603 - G2 TLB management */
5292
5293 /* tlbld */
5294 static void gen_tlbld_6xx(DisasContext *ctx)
5295 {
5296 #if defined(CONFIG_USER_ONLY)
5297 GEN_PRIV(ctx);
5298 #else
5299 CHK_SV(ctx);
5300 gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5301 #endif /* defined(CONFIG_USER_ONLY) */
5302 }
5303
5304 /* tlbli */
5305 static void gen_tlbli_6xx(DisasContext *ctx)
5306 {
5307 #if defined(CONFIG_USER_ONLY)
5308 GEN_PRIV(ctx);
5309 #else
5310 CHK_SV(ctx);
5311 gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5312 #endif /* defined(CONFIG_USER_ONLY) */
5313 }
5314
5315 /* BookE specific instructions */
5316
5317 /* XXX: not implemented on 440 ? */
5318 static void gen_mfapidi(DisasContext *ctx)
5319 {
5320 /* XXX: TODO */
5321 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5322 }
5323
5324 /* XXX: not implemented on 440 ? */
5325 static void gen_tlbiva(DisasContext *ctx)
5326 {
5327 #if defined(CONFIG_USER_ONLY)
5328 GEN_PRIV(ctx);
5329 #else
5330 TCGv t0;
5331
5332 CHK_SV(ctx);
5333 t0 = tcg_temp_new();
5334 gen_addr_reg_index(ctx, t0);
5335 gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5336 #endif /* defined(CONFIG_USER_ONLY) */
5337 }
5338
5339 /* All 405 MAC instructions are translated here */
5340 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5341 int ra, int rb, int rt, int Rc)
5342 {
5343 TCGv t0, t1;
5344
5345 t0 = tcg_temp_new();
5346 t1 = tcg_temp_new();
5347
5348 switch (opc3 & 0x0D) {
5349 case 0x05:
5350 /* macchw - macchw. - macchwo - macchwo. */
5351 /* macchws - macchws. - macchwso - macchwso. */
5352 /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
5353 /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
5354 /* mulchw - mulchw. */
5355 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5356 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5357 tcg_gen_ext16s_tl(t1, t1);
5358 break;
5359 case 0x04:
5360 /* macchwu - macchwu. - macchwuo - macchwuo. */
5361 /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
5362 /* mulchwu - mulchwu. */
5363 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5364 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5365 tcg_gen_ext16u_tl(t1, t1);
5366 break;
5367 case 0x01:
5368 /* machhw - machhw. - machhwo - machhwo. */
5369 /* machhws - machhws. - machhwso - machhwso. */
5370 /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
5371 /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
5372 /* mulhhw - mulhhw. */
5373 tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5374 tcg_gen_ext16s_tl(t0, t0);
5375 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5376 tcg_gen_ext16s_tl(t1, t1);
5377 break;
5378 case 0x00:
5379 /* machhwu - machhwu. - machhwuo - machhwuo. */
5380 /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
5381 /* mulhhwu - mulhhwu. */
5382 tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5383 tcg_gen_ext16u_tl(t0, t0);
5384 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5385 tcg_gen_ext16u_tl(t1, t1);
5386 break;
5387 case 0x0D:
5388 /* maclhw - maclhw. - maclhwo - maclhwo. */
5389 /* maclhws - maclhws. - maclhwso - maclhwso. */
5390 /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
5391 /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
5392 /* mullhw - mullhw. */
5393 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5394 tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5395 break;
5396 case 0x0C:
5397 /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
5398 /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
5399 /* mullhwu - mullhwu. */
5400 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5401 tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5402 break;
5403 }
5404 if (opc2 & 0x04) {
5405 /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5406 tcg_gen_mul_tl(t1, t0, t1);
5407 if (opc2 & 0x02) {
5408 /* nmultiply-and-accumulate (0x0E) */
5409 tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5410 } else {
5411 /* multiply-and-accumulate (0x0C) */
5412 tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5413 }
5414
5415 if (opc3 & 0x12) {
5416 /* Check overflow and/or saturate */
5417 TCGLabel *l1 = gen_new_label();
5418
5419 if (opc3 & 0x10) {
5420 /* Start with XER OV disabled, the most likely case */
5421 tcg_gen_movi_tl(cpu_ov, 0);
5422 }
5423 if (opc3 & 0x01) {
5424 /* Signed */
5425 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5426 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5427 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5428 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5429 if (opc3 & 0x02) {
5430 /* Saturate */
5431 tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5432 tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5433 }
5434 } else {
5435 /* Unsigned */
5436 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5437 if (opc3 & 0x02) {
5438 /* Saturate */
5439 tcg_gen_movi_tl(t0, UINT32_MAX);
5440 }
5441 }
5442 if (opc3 & 0x10) {
5443 /* Check overflow */
5444 tcg_gen_movi_tl(cpu_ov, 1);
5445 tcg_gen_movi_tl(cpu_so, 1);
5446 }
5447 gen_set_label(l1);
5448 tcg_gen_mov_tl(cpu_gpr[rt], t0);
5449 }
5450 } else {
5451 tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5452 }
5453 if (unlikely(Rc) != 0) {
5454 /* Update Rc0 */
5455 gen_set_Rc0(ctx, cpu_gpr[rt]);
5456 }
5457 }
5458
5459 #define GEN_MAC_HANDLER(name, opc2, opc3) \
5460 static void glue(gen_, name)(DisasContext *ctx) \
5461 { \
5462 gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \
5463 rD(ctx->opcode), Rc(ctx->opcode)); \
5464 }
5465
5466 /* macchw - macchw. */
5467 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5468 /* macchwo - macchwo. */
5469 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5470 /* macchws - macchws. */
5471 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5472 /* macchwso - macchwso. */
5473 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5474 /* macchwsu - macchwsu. */
5475 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5476 /* macchwsuo - macchwsuo. */
5477 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5478 /* macchwu - macchwu. */
5479 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5480 /* macchwuo - macchwuo. */
5481 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5482 /* machhw - machhw. */
5483 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5484 /* machhwo - machhwo. */
5485 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5486 /* machhws - machhws. */
5487 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5488 /* machhwso - machhwso. */
5489 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5490 /* machhwsu - machhwsu. */
5491 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5492 /* machhwsuo - machhwsuo. */
5493 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5494 /* machhwu - machhwu. */
5495 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5496 /* machhwuo - machhwuo. */
5497 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5498 /* maclhw - maclhw. */
5499 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5500 /* maclhwo - maclhwo. */
5501 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5502 /* maclhws - maclhws. */
5503 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5504 /* maclhwso - maclhwso. */
5505 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5506 /* maclhwu - maclhwu. */
5507 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5508 /* maclhwuo - maclhwuo. */
5509 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5510 /* maclhwsu - maclhwsu. */
5511 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5512 /* maclhwsuo - maclhwsuo. */
5513 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5514 /* nmacchw - nmacchw. */
5515 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5516 /* nmacchwo - nmacchwo. */
5517 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5518 /* nmacchws - nmacchws. */
5519 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5520 /* nmacchwso - nmacchwso. */
5521 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5522 /* nmachhw - nmachhw. */
5523 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5524 /* nmachhwo - nmachhwo. */
5525 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5526 /* nmachhws - nmachhws. */
5527 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5528 /* nmachhwso - nmachhwso. */
5529 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5530 /* nmaclhw - nmaclhw. */
5531 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5532 /* nmaclhwo - nmaclhwo. */
5533 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5534 /* nmaclhws - nmaclhws. */
5535 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5536 /* nmaclhwso - nmaclhwso. */
5537 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5538
5539 /* mulchw - mulchw. */
5540 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5541 /* mulchwu - mulchwu. */
5542 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5543 /* mulhhw - mulhhw. */
5544 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5545 /* mulhhwu - mulhhwu. */
5546 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5547 /* mullhw - mullhw. */
5548 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5549 /* mullhwu - mullhwu. */
5550 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5551
5552 /* mfdcr */
5553 static void gen_mfdcr(DisasContext *ctx)
5554 {
5555 #if defined(CONFIG_USER_ONLY)
5556 GEN_PRIV(ctx);
5557 #else
5558 TCGv dcrn;
5559
5560 CHK_SV(ctx);
5561 dcrn = tcg_const_tl(SPR(ctx->opcode));
5562 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
5563 #endif /* defined(CONFIG_USER_ONLY) */
5564 }
5565
5566 /* mtdcr */
5567 static void gen_mtdcr(DisasContext *ctx)
5568 {
5569 #if defined(CONFIG_USER_ONLY)
5570 GEN_PRIV(ctx);
5571 #else
5572 TCGv dcrn;
5573
5574 CHK_SV(ctx);
5575 dcrn = tcg_const_tl(SPR(ctx->opcode));
5576 gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5577 #endif /* defined(CONFIG_USER_ONLY) */
5578 }
5579
5580 /* mfdcrx */
5581 /* XXX: not implemented on 440 ? */
5582 static void gen_mfdcrx(DisasContext *ctx)
5583 {
5584 #if defined(CONFIG_USER_ONLY)
5585 GEN_PRIV(ctx);
5586 #else
5587 CHK_SV(ctx);
5588 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
5589 cpu_gpr[rA(ctx->opcode)]);
5590 /* Note: Rc update flag set leads to undefined state of Rc0 */
5591 #endif /* defined(CONFIG_USER_ONLY) */
5592 }
5593
5594 /* mtdcrx */
5595 /* XXX: not implemented on 440 ? */
5596 static void gen_mtdcrx(DisasContext *ctx)
5597 {
5598 #if defined(CONFIG_USER_ONLY)
5599 GEN_PRIV(ctx);
5600 #else
5601 CHK_SV(ctx);
5602 gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
5603 cpu_gpr[rS(ctx->opcode)]);
5604 /* Note: Rc update flag set leads to undefined state of Rc0 */
5605 #endif /* defined(CONFIG_USER_ONLY) */
5606 }
5607
5608 /* dccci */
5609 static void gen_dccci(DisasContext *ctx)
5610 {
5611 CHK_SV(ctx);
5612 /* interpreted as no-op */
5613 }
5614
5615 /* dcread */
5616 static void gen_dcread(DisasContext *ctx)
5617 {
5618 #if defined(CONFIG_USER_ONLY)
5619 GEN_PRIV(ctx);
5620 #else
5621 TCGv EA, val;
5622
5623 CHK_SV(ctx);
5624 gen_set_access_type(ctx, ACCESS_CACHE);
5625 EA = tcg_temp_new();
5626 gen_addr_reg_index(ctx, EA);
5627 val = tcg_temp_new();
5628 gen_qemu_ld32u(ctx, val, EA);
5629 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5630 #endif /* defined(CONFIG_USER_ONLY) */
5631 }
5632
5633 /* icbt */
5634 static void gen_icbt_40x(DisasContext *ctx)
5635 {
5636 /*
5637 * interpreted as no-op
5638 * XXX: specification say this is treated as a load by the MMU but
5639 * does not generate any exception
5640 */
5641 }
5642
5643 /* iccci */
5644 static void gen_iccci(DisasContext *ctx)
5645 {
5646 CHK_SV(ctx);
5647 /* interpreted as no-op */
5648 }
5649
5650 /* icread */
5651 static void gen_icread(DisasContext *ctx)
5652 {
5653 CHK_SV(ctx);
5654 /* interpreted as no-op */
5655 }
5656
5657 /* rfci (supervisor only) */
5658 static void gen_rfci_40x(DisasContext *ctx)
5659 {
5660 #if defined(CONFIG_USER_ONLY)
5661 GEN_PRIV(ctx);
5662 #else
5663 CHK_SV(ctx);
5664 /* Restore CPU state */
5665 gen_helper_40x_rfci(cpu_env);
5666 ctx->base.is_jmp = DISAS_EXIT;
5667 #endif /* defined(CONFIG_USER_ONLY) */
5668 }
5669
5670 static void gen_rfci(DisasContext *ctx)
5671 {
5672 #if defined(CONFIG_USER_ONLY)
5673 GEN_PRIV(ctx);
5674 #else
5675 CHK_SV(ctx);
5676 /* Restore CPU state */
5677 gen_helper_rfci(cpu_env);
5678 ctx->base.is_jmp = DISAS_EXIT;
5679 #endif /* defined(CONFIG_USER_ONLY) */
5680 }
5681
5682 /* BookE specific */
5683
5684 /* XXX: not implemented on 440 ? */
5685 static void gen_rfdi(DisasContext *ctx)
5686 {
5687 #if defined(CONFIG_USER_ONLY)
5688 GEN_PRIV(ctx);
5689 #else
5690 CHK_SV(ctx);
5691 /* Restore CPU state */
5692 gen_helper_rfdi(cpu_env);
5693 ctx->base.is_jmp = DISAS_EXIT;
5694 #endif /* defined(CONFIG_USER_ONLY) */
5695 }
5696
5697 /* XXX: not implemented on 440 ? */
5698 static void gen_rfmci(DisasContext *ctx)
5699 {
5700 #if defined(CONFIG_USER_ONLY)
5701 GEN_PRIV(ctx);
5702 #else
5703 CHK_SV(ctx);
5704 /* Restore CPU state */
5705 gen_helper_rfmci(cpu_env);
5706 ctx->base.is_jmp = DISAS_EXIT;
5707 #endif /* defined(CONFIG_USER_ONLY) */
5708 }
5709
5710 /* TLB management - PowerPC 405 implementation */
5711
5712 /* tlbre */
5713 static void gen_tlbre_40x(DisasContext *ctx)
5714 {
5715 #if defined(CONFIG_USER_ONLY)
5716 GEN_PRIV(ctx);
5717 #else
5718 CHK_SV(ctx);
5719 switch (rB(ctx->opcode)) {
5720 case 0:
5721 gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
5722 cpu_gpr[rA(ctx->opcode)]);
5723 break;
5724 case 1:
5725 gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
5726 cpu_gpr[rA(ctx->opcode)]);
5727 break;
5728 default:
5729 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5730 break;
5731 }
5732 #endif /* defined(CONFIG_USER_ONLY) */
5733 }
5734
5735 /* tlbsx - tlbsx. */
5736 static void gen_tlbsx_40x(DisasContext *ctx)
5737 {
5738 #if defined(CONFIG_USER_ONLY)
5739 GEN_PRIV(ctx);
5740 #else
5741 TCGv t0;
5742
5743 CHK_SV(ctx);
5744 t0 = tcg_temp_new();
5745 gen_addr_reg_index(ctx, t0);
5746 gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5747 if (Rc(ctx->opcode)) {
5748 TCGLabel *l1 = gen_new_label();
5749 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5750 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5751 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5752 gen_set_label(l1);
5753 }
5754 #endif /* defined(CONFIG_USER_ONLY) */
5755 }
5756
5757 /* tlbwe */
5758 static void gen_tlbwe_40x(DisasContext *ctx)
5759 {
5760 #if defined(CONFIG_USER_ONLY)
5761 GEN_PRIV(ctx);
5762 #else
5763 CHK_SV(ctx);
5764
5765 switch (rB(ctx->opcode)) {
5766 case 0:
5767 gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
5768 cpu_gpr[rS(ctx->opcode)]);
5769 break;
5770 case 1:
5771 gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
5772 cpu_gpr[rS(ctx->opcode)]);
5773 break;
5774 default:
5775 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5776 break;
5777 }
5778 #endif /* defined(CONFIG_USER_ONLY) */
5779 }
5780
5781 /* TLB management - PowerPC 440 implementation */
5782
5783 /* tlbre */
5784 static void gen_tlbre_440(DisasContext *ctx)
5785 {
5786 #if defined(CONFIG_USER_ONLY)
5787 GEN_PRIV(ctx);
5788 #else
5789 CHK_SV(ctx);
5790
5791 switch (rB(ctx->opcode)) {
5792 case 0:
5793 case 1:
5794 case 2:
5795 {
5796 TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
5797 gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
5798 t0, cpu_gpr[rA(ctx->opcode)]);
5799 }
5800 break;
5801 default:
5802 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5803 break;
5804 }
5805 #endif /* defined(CONFIG_USER_ONLY) */
5806 }
5807
5808 /* tlbsx - tlbsx. */
5809 static void gen_tlbsx_440(DisasContext *ctx)
5810 {
5811 #if defined(CONFIG_USER_ONLY)
5812 GEN_PRIV(ctx);
5813 #else
5814 TCGv t0;
5815
5816 CHK_SV(ctx);
5817 t0 = tcg_temp_new();
5818 gen_addr_reg_index(ctx, t0);
5819 gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5820 if (Rc(ctx->opcode)) {
5821 TCGLabel *l1 = gen_new_label();
5822 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5823 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5824 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5825 gen_set_label(l1);
5826 }
5827 #endif /* defined(CONFIG_USER_ONLY) */
5828 }
5829
5830 /* tlbwe */
5831 static void gen_tlbwe_440(DisasContext *ctx)
5832 {
5833 #if defined(CONFIG_USER_ONLY)
5834 GEN_PRIV(ctx);
5835 #else
5836 CHK_SV(ctx);
5837 switch (rB(ctx->opcode)) {
5838 case 0:
5839 case 1:
5840 case 2:
5841 {
5842 TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
5843 gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
5844 cpu_gpr[rS(ctx->opcode)]);
5845 }
5846 break;
5847 default:
5848 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5849 break;
5850 }
5851 #endif /* defined(CONFIG_USER_ONLY) */
5852 }
5853
5854 /* TLB management - PowerPC BookE 2.06 implementation */
5855
5856 /* tlbre */
5857 static void gen_tlbre_booke206(DisasContext *ctx)
5858 {
5859 #if defined(CONFIG_USER_ONLY)
5860 GEN_PRIV(ctx);
5861 #else
5862 CHK_SV(ctx);
5863 gen_helper_booke206_tlbre(cpu_env);
5864 #endif /* defined(CONFIG_USER_ONLY) */
5865 }
5866
5867 /* tlbsx - tlbsx. */
5868 static void gen_tlbsx_booke206(DisasContext *ctx)
5869 {
5870 #if defined(CONFIG_USER_ONLY)
5871 GEN_PRIV(ctx);
5872 #else
5873 TCGv t0;
5874
5875 CHK_SV(ctx);
5876 if (rA(ctx->opcode)) {
5877 t0 = tcg_temp_new();
5878 tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5879 } else {
5880 t0 = cpu_gpr[rB(ctx->opcode)];
5881 }
5882 gen_helper_booke206_tlbsx(cpu_env, t0);
5883 #endif /* defined(CONFIG_USER_ONLY) */
5884 }
5885
5886 /* tlbwe */
5887 static void gen_tlbwe_booke206(DisasContext *ctx)
5888 {
5889 #if defined(CONFIG_USER_ONLY)
5890 GEN_PRIV(ctx);
5891 #else
5892 CHK_SV(ctx);
5893 gen_helper_booke206_tlbwe(cpu_env);
5894 #endif /* defined(CONFIG_USER_ONLY) */
5895 }
5896
5897 static void gen_tlbivax_booke206(DisasContext *ctx)
5898 {
5899 #if defined(CONFIG_USER_ONLY)
5900 GEN_PRIV(ctx);
5901 #else
5902 TCGv t0;
5903
5904 CHK_SV(ctx);
5905 t0 = tcg_temp_new();
5906 gen_addr_reg_index(ctx, t0);
5907 gen_helper_booke206_tlbivax(cpu_env, t0);
5908 #endif /* defined(CONFIG_USER_ONLY) */
5909 }
5910
5911 static void gen_tlbilx_booke206(DisasContext *ctx)
5912 {
5913 #if defined(CONFIG_USER_ONLY)
5914 GEN_PRIV(ctx);
5915 #else
5916 TCGv t0;
5917
5918 CHK_SV(ctx);
5919 t0 = tcg_temp_new();
5920 gen_addr_reg_index(ctx, t0);
5921
5922 switch ((ctx->opcode >> 21) & 0x3) {
5923 case 0:
5924 gen_helper_booke206_tlbilx0(cpu_env, t0);
5925 break;
5926 case 1:
5927 gen_helper_booke206_tlbilx1(cpu_env, t0);
5928 break;
5929 case 3:
5930 gen_helper_booke206_tlbilx3(cpu_env, t0);
5931 break;
5932 default:
5933 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5934 break;
5935 }
5936 #endif /* defined(CONFIG_USER_ONLY) */
5937 }
5938
5939 /* wrtee */
5940 static void gen_wrtee(DisasContext *ctx)
5941 {
5942 #if defined(CONFIG_USER_ONLY)
5943 GEN_PRIV(ctx);
5944 #else
5945 TCGv t0;
5946
5947 CHK_SV(ctx);
5948 t0 = tcg_temp_new();
5949 tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
5950 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5951 tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
5952 gen_ppc_maybe_interrupt(ctx);
5953 /*
5954 * Stop translation to have a chance to raise an exception if we
5955 * just set msr_ee to 1
5956 */
5957 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5958 #endif /* defined(CONFIG_USER_ONLY) */
5959 }
5960
5961 /* wrteei */
5962 static void gen_wrteei(DisasContext *ctx)
5963 {
5964 #if defined(CONFIG_USER_ONLY)
5965 GEN_PRIV(ctx);
5966 #else
5967 CHK_SV(ctx);
5968 if (ctx->opcode & 0x00008000) {
5969 tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
5970 gen_ppc_maybe_interrupt(ctx);
5971 /* Stop translation to have a chance to raise an exception */
5972 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5973 } else {
5974 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5975 }
5976 #endif /* defined(CONFIG_USER_ONLY) */
5977 }
5978
5979 /* PowerPC 440 specific instructions */
5980
5981 /* dlmzb */
5982 static void gen_dlmzb(DisasContext *ctx)
5983 {
5984 TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
5985 gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
5986 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
5987 }
5988
5989 /* mbar replaces eieio on 440 */
5990 static void gen_mbar(DisasContext *ctx)
5991 {
5992 /* interpreted as no-op */
5993 }
5994
5995 /* msync replaces sync on 440 */
5996 static void gen_msync_4xx(DisasContext *ctx)
5997 {
5998 /* Only e500 seems to treat reserved bits as invalid */
5999 if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6000 (ctx->opcode & 0x03FFF801)) {
6001 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6002 }
6003 /* otherwise interpreted as no-op */
6004 }
6005
6006 /* icbt */
6007 static void gen_icbt_440(DisasContext *ctx)
6008 {
6009 /*
6010 * interpreted as no-op
6011 * XXX: specification say this is treated as a load by the MMU but
6012 * does not generate any exception
6013 */
6014 }
6015
6016 #if defined(TARGET_PPC64)
6017 static void gen_maddld(DisasContext *ctx)
6018 {
6019 TCGv_i64 t1 = tcg_temp_new_i64();
6020
6021 tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6022 tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6023 }
6024
6025 /* maddhd maddhdu */
6026 static void gen_maddhd_maddhdu(DisasContext *ctx)
6027 {
6028 TCGv_i64 lo = tcg_temp_new_i64();
6029 TCGv_i64 hi = tcg_temp_new_i64();
6030 TCGv_i64 t1 = tcg_temp_new_i64();
6031
6032 if (Rc(ctx->opcode)) {
6033 tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6034 cpu_gpr[rB(ctx->opcode)]);
6035 tcg_gen_movi_i64(t1, 0);
6036 } else {
6037 tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6038 cpu_gpr[rB(ctx->opcode)]);
6039 tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6040 }
6041 tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6042 cpu_gpr[rC(ctx->opcode)], t1);
6043 }
6044 #endif /* defined(TARGET_PPC64) */
6045
6046 static void gen_tbegin(DisasContext *ctx)
6047 {
6048 if (unlikely(!ctx->tm_enabled)) {
6049 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6050 return;
6051 }
6052 gen_helper_tbegin(cpu_env);
6053 }
6054
6055 #define GEN_TM_NOOP(name) \
6056 static inline void gen_##name(DisasContext *ctx) \
6057 { \
6058 if (unlikely(!ctx->tm_enabled)) { \
6059 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
6060 return; \
6061 } \
6062 /* \
6063 * Because tbegin always fails in QEMU, these user \
6064 * space instructions all have a simple implementation: \
6065 * \
6066 * CR[0] = 0b0 || MSR[TS] || 0b0 \
6067 * = 0b0 || 0b00 || 0b0 \
6068 */ \
6069 tcg_gen_movi_i32(cpu_crf[0], 0); \
6070 }
6071
6072 GEN_TM_NOOP(tend);
6073 GEN_TM_NOOP(tabort);
6074 GEN_TM_NOOP(tabortwc);
6075 GEN_TM_NOOP(tabortwci);
6076 GEN_TM_NOOP(tabortdc);
6077 GEN_TM_NOOP(tabortdci);
6078 GEN_TM_NOOP(tsr);
6079
6080 static inline void gen_cp_abort(DisasContext *ctx)
6081 {
6082 /* Do Nothing */
6083 }
6084
6085 #define GEN_CP_PASTE_NOOP(name) \
6086 static inline void gen_##name(DisasContext *ctx) \
6087 { \
6088 /* \
6089 * Generate invalid exception until we have an \
6090 * implementation of the copy paste facility \
6091 */ \
6092 gen_invalid(ctx); \
6093 }
6094
6095 GEN_CP_PASTE_NOOP(copy)
6096 GEN_CP_PASTE_NOOP(paste)
6097
6098 static void gen_tcheck(DisasContext *ctx)
6099 {
6100 if (unlikely(!ctx->tm_enabled)) {
6101 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6102 return;
6103 }
6104 /*
6105 * Because tbegin always fails, the tcheck implementation is
6106 * simple:
6107 *
6108 * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6109 * = 0b1 || 0b00 || 0b0
6110 */
6111 tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6112 }
6113
6114 #if defined(CONFIG_USER_ONLY)
6115 #define GEN_TM_PRIV_NOOP(name) \
6116 static inline void gen_##name(DisasContext *ctx) \
6117 { \
6118 gen_priv_opc(ctx); \
6119 }
6120
6121 #else
6122
6123 #define GEN_TM_PRIV_NOOP(name) \
6124 static inline void gen_##name(DisasContext *ctx) \
6125 { \
6126 CHK_SV(ctx); \
6127 if (unlikely(!ctx->tm_enabled)) { \
6128 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
6129 return; \
6130 } \
6131 /* \
6132 * Because tbegin always fails, the implementation is \
6133 * simple: \
6134 * \
6135 * CR[0] = 0b0 || MSR[TS] || 0b0 \
6136 * = 0b0 || 0b00 | 0b0 \
6137 */ \
6138 tcg_gen_movi_i32(cpu_crf[0], 0); \
6139 }
6140
6141 #endif
6142
6143 GEN_TM_PRIV_NOOP(treclaim);
6144 GEN_TM_PRIV_NOOP(trechkpt);
6145
6146 static inline void get_fpr(TCGv_i64 dst, int regno)
6147 {
6148 tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
6149 }
6150
6151 static inline void set_fpr(int regno, TCGv_i64 src)
6152 {
6153 tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
6154 /*
6155 * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6156 * corresponding to the target FPR was undefined. However,
6157 * most (if not all) real hardware were setting the result to 0.
6158 * Starting at ISA v3.1, the result for doubleword 1 is now defined
6159 * to be 0.
6160 */
6161 tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
6162 }
6163
6164 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6165 {
6166 tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
6167 }
6168
6169 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6170 {
6171 tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
6172 }
6173
6174 /*
6175 * Helpers for decodetree used by !function for decoding arguments.
6176 */
6177 static int times_2(DisasContext *ctx, int x)
6178 {
6179 return x * 2;
6180 }
6181
6182 static int times_4(DisasContext *ctx, int x)
6183 {
6184 return x * 4;
6185 }
6186
6187 static int times_16(DisasContext *ctx, int x)
6188 {
6189 return x * 16;
6190 }
6191
6192 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6193 {
6194 return deposit64(0xfffffffffffffe00, 3, 6, x);
6195 }
6196
6197 /*
6198 * Helpers for trans_* functions to check for specific insns flags.
6199 * Use token pasting to ensure that we use the proper flag with the
6200 * proper variable.
6201 */
6202 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6203 do { \
6204 if (((CTX)->insns_flags & PPC_##NAME) == 0) { \
6205 return false; \
6206 } \
6207 } while (0)
6208
6209 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6210 do { \
6211 if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6212 return false; \
6213 } \
6214 } while (0)
6215
6216 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6217 #if TARGET_LONG_BITS == 32
6218 # define REQUIRE_64BIT(CTX) return false
6219 #else
6220 # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B)
6221 #endif
6222
6223 #define REQUIRE_VECTOR(CTX) \
6224 do { \
6225 if (unlikely(!(CTX)->altivec_enabled)) { \
6226 gen_exception((CTX), POWERPC_EXCP_VPU); \
6227 return true; \
6228 } \
6229 } while (0)
6230
6231 #define REQUIRE_VSX(CTX) \
6232 do { \
6233 if (unlikely(!(CTX)->vsx_enabled)) { \
6234 gen_exception((CTX), POWERPC_EXCP_VSXU); \
6235 return true; \
6236 } \
6237 } while (0)
6238
6239 #define REQUIRE_FPU(ctx) \
6240 do { \
6241 if (unlikely(!(ctx)->fpu_enabled)) { \
6242 gen_exception((ctx), POWERPC_EXCP_FPU); \
6243 return true; \
6244 } \
6245 } while (0)
6246
6247 #if !defined(CONFIG_USER_ONLY)
6248 #define REQUIRE_SV(CTX) \
6249 do { \
6250 if (unlikely((CTX)->pr)) { \
6251 gen_priv_opc(CTX); \
6252 return true; \
6253 } \
6254 } while (0)
6255
6256 #define REQUIRE_HV(CTX) \
6257 do { \
6258 if (unlikely((CTX)->pr || !(CTX)->hv)) { \
6259 gen_priv_opc(CTX); \
6260 return true; \
6261 } \
6262 } while (0)
6263 #else
6264 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6265 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6266 #endif
6267
6268 /*
6269 * Helpers for implementing sets of trans_* functions.
6270 * Defer the implementation of NAME to FUNC, with optional extra arguments.
6271 */
6272 #define TRANS(NAME, FUNC, ...) \
6273 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6274 { return FUNC(ctx, a, __VA_ARGS__); }
6275 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6276 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6277 { \
6278 REQUIRE_INSNS_FLAGS(ctx, FLAGS); \
6279 return FUNC(ctx, a, __VA_ARGS__); \
6280 }
6281 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6282 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6283 { \
6284 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
6285 return FUNC(ctx, a, __VA_ARGS__); \
6286 }
6287
6288 #define TRANS64(NAME, FUNC, ...) \
6289 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6290 { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6291 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6292 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6293 { \
6294 REQUIRE_64BIT(ctx); \
6295 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
6296 return FUNC(ctx, a, __VA_ARGS__); \
6297 }
6298
6299 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6300
6301
6302 #include "decode-insn32.c.inc"
6303 #include "decode-insn64.c.inc"
6304 #include "power8-pmu-regs.c.inc"
6305
6306 /*
6307 * Incorporate CIA into the constant when R=1.
6308 * Validate that when R=1, RA=0.
6309 */
6310 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6311 {
6312 d->rt = a->rt;
6313 d->ra = a->ra;
6314 d->si = a->si;
6315 if (a->r) {
6316 if (unlikely(a->ra != 0)) {
6317 gen_invalid(ctx);
6318 return false;
6319 }
6320 d->si += ctx->cia;
6321 }
6322 return true;
6323 }
6324
6325 #include "translate/fixedpoint-impl.c.inc"
6326
6327 #include "translate/fp-impl.c.inc"
6328
6329 #include "translate/vmx-impl.c.inc"
6330
6331 #include "translate/vsx-impl.c.inc"
6332
6333 #include "translate/dfp-impl.c.inc"
6334
6335 #include "translate/spe-impl.c.inc"
6336
6337 #include "translate/branch-impl.c.inc"
6338
6339 #include "translate/processor-ctrl-impl.c.inc"
6340
6341 #include "translate/storage-ctrl-impl.c.inc"
6342
6343 /* Handles lfdp */
6344 static void gen_dform39(DisasContext *ctx)
6345 {
6346 if ((ctx->opcode & 0x3) == 0) {
6347 if (ctx->insns_flags2 & PPC2_ISA205) {
6348 return gen_lfdp(ctx);
6349 }
6350 }
6351 return gen_invalid(ctx);
6352 }
6353
6354 /* Handles stfdp */
6355 static void gen_dform3D(DisasContext *ctx)
6356 {
6357 if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6358 /* stfdp */
6359 if (ctx->insns_flags2 & PPC2_ISA205) {
6360 return gen_stfdp(ctx);
6361 }
6362 }
6363 return gen_invalid(ctx);
6364 }
6365
6366 #if defined(TARGET_PPC64)
6367 /* brd */
6368 static void gen_brd(DisasContext *ctx)
6369 {
6370 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6371 }
6372
6373 /* brw */
6374 static void gen_brw(DisasContext *ctx)
6375 {
6376 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6377 tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6378
6379 }
6380
6381 /* brh */
6382 static void gen_brh(DisasContext *ctx)
6383 {
6384 TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6385 TCGv_i64 t1 = tcg_temp_new_i64();
6386 TCGv_i64 t2 = tcg_temp_new_i64();
6387
6388 tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6389 tcg_gen_and_i64(t2, t1, mask);
6390 tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6391 tcg_gen_shli_i64(t1, t1, 8);
6392 tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6393 }
6394 #endif
6395
6396 static opcode_t opcodes[] = {
6397 #if defined(TARGET_PPC64)
6398 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6399 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6400 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6401 #endif
6402 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6403 #if defined(TARGET_PPC64)
6404 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6405 #endif
6406 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6407 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6408 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6409 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6410 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6411 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6412 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6413 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6414 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6415 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6416 #if defined(TARGET_PPC64)
6417 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6418 #endif
6419 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6420 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6421 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6422 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6423 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6424 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6425 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6426 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6427 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6428 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6429 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6430 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6431 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6432 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6433 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6434 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6435 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6436 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6437 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6438 #if defined(TARGET_PPC64)
6439 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6440 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6441 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6442 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6443 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6444 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6445 #endif
6446 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6447 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6448 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6449 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6450 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6451 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6452 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6453 #if defined(TARGET_PPC64)
6454 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6455 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6456 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6457 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6458 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6459 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6460 PPC_NONE, PPC2_ISA300),
6461 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6462 PPC_NONE, PPC2_ISA300),
6463 #endif
6464 /* handles lfdp, lxsd, lxssp */
6465 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6466 /* handles stfdp, stxsd, stxssp */
6467 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6468 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6469 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6470 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6471 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6472 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6473 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6474 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6475 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6476 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6477 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6478 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6479 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6480 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6481 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6482 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6483 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6484 #if defined(TARGET_PPC64)
6485 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6486 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6487 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6488 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6489 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6490 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6491 #endif
6492 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6493 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6494 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6495 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6496 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6497 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6498 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6499 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6500 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6501 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6502 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6503 #if defined(TARGET_PPC64)
6504 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6505 #if !defined(CONFIG_USER_ONLY)
6506 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6507 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6508 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6509 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6510 #endif
6511 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6512 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6513 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6514 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6515 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6516 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6517 #endif
6518 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6519 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6520 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6521 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6522 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6523 #if defined(TARGET_PPC64)
6524 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6525 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6526 #endif
6527 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6528 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6529 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6530 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6531 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6532 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6533 #if defined(TARGET_PPC64)
6534 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6535 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6536 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6537 #endif
6538 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6539 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6540 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6541 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6542 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6543 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6544 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6545 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6546 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6547 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6548 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6549 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6550 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6551 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6552 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6553 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6554 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6555 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6556 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6557 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6558 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6559 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6560 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6561 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6562 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6563 #if defined(TARGET_PPC64)
6564 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6565 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6566 PPC_SEGMENT_64B),
6567 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6568 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6569 PPC_SEGMENT_64B),
6570 #endif
6571 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6572 /*
6573 * XXX Those instructions will need to be handled differently for
6574 * different ISA versions
6575 */
6576 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6577 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6578 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6579 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6580 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6581 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6582 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6583 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6584 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6585 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6586 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6587 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6588 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6589 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6590 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6591 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6592 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6593 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6594 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6595 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6596 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6597 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6598 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6599 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6600 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6601 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6602 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6603 PPC_NONE, PPC2_BOOKE206),
6604 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6605 PPC_NONE, PPC2_BOOKE206),
6606 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6607 PPC_NONE, PPC2_BOOKE206),
6608 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6609 PPC_NONE, PPC2_BOOKE206),
6610 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6611 PPC_NONE, PPC2_BOOKE206),
6612 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6613 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6614 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6615 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6616 PPC_BOOKE, PPC2_BOOKE206),
6617 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6618 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6619 PPC_BOOKE, PPC2_BOOKE206),
6620 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6621 PPC_440_SPEC),
6622 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6623 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6624 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6625 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6626 #if defined(TARGET_PPC64)
6627 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6628 PPC2_ISA300),
6629 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6630 #endif
6631
6632 #undef GEN_INT_ARITH_ADD
6633 #undef GEN_INT_ARITH_ADD_CONST
6634 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
6635 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6636 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
6637 add_ca, compute_ca, compute_ov) \
6638 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6639 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6640 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6641 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6642 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6643 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6644 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6645 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6646 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6647 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6648 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6649 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6650
6651 #undef GEN_INT_ARITH_DIVW
6652 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
6653 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6654 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6655 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6656 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6657 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6658 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6659 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6660 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6661 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6662 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6663 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6664
6665 #if defined(TARGET_PPC64)
6666 #undef GEN_INT_ARITH_DIVD
6667 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
6668 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6669 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6670 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6671 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6672 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6673
6674 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6675 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6676 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6677 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6678 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6679 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6680
6681 #undef GEN_INT_ARITH_MUL_HELPER
6682 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
6683 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6684 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6685 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6686 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6687 #endif
6688
6689 #undef GEN_INT_ARITH_SUBF
6690 #undef GEN_INT_ARITH_SUBF_CONST
6691 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
6692 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6693 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
6694 add_ca, compute_ca, compute_ov) \
6695 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6696 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
6697 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
6698 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
6699 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
6700 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
6701 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
6702 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
6703 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
6704 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
6705 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
6706
6707 #undef GEN_LOGICAL1
6708 #undef GEN_LOGICAL2
6709 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
6710 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
6711 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
6712 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
6713 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
6714 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
6715 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
6716 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
6717 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
6718 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
6719 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
6720 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
6721 #if defined(TARGET_PPC64)
6722 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
6723 #endif
6724
6725 #if defined(TARGET_PPC64)
6726 #undef GEN_PPC64_R2
6727 #undef GEN_PPC64_R4
6728 #define GEN_PPC64_R2(name, opc1, opc2) \
6729 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6730 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
6731 PPC_64B)
6732 #define GEN_PPC64_R4(name, opc1, opc2) \
6733 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6734 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \
6735 PPC_64B), \
6736 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
6737 PPC_64B), \
6738 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \
6739 PPC_64B)
6740 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6741 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6742 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6743 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6744 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6745 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6746 #endif
6747
6748 #undef GEN_LDX_E
6749 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
6750 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6751
6752 #if defined(TARGET_PPC64)
6753 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6754
6755 /* HV/P7 and later only */
6756 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6757 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6758 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6759 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6760 #endif
6761 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6762 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6763
6764 /* External PID based load */
6765 #undef GEN_LDEPX
6766 #define GEN_LDEPX(name, ldop, opc2, opc3) \
6767 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
6768 0x00000001, PPC_NONE, PPC2_BOOKE206),
6769
6770 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6771 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6772 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6773 #if defined(TARGET_PPC64)
6774 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6775 #endif
6776
6777 #undef GEN_STX_E
6778 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
6779 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6780
6781 #if defined(TARGET_PPC64)
6782 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6783 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6784 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6785 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6786 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6787 #endif
6788 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6789 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6790
6791 #undef GEN_STEPX
6792 #define GEN_STEPX(name, ldop, opc2, opc3) \
6793 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
6794 0x00000001, PPC_NONE, PPC2_BOOKE206),
6795
6796 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6797 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6798 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6799 #if defined(TARGET_PPC64)
6800 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6801 #endif
6802
6803 #undef GEN_CRLOGIC
6804 #define GEN_CRLOGIC(name, tcg_op, opc) \
6805 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6806 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6807 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6808 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6809 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6810 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6811 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6812 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6813 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6814
6815 #undef GEN_MAC_HANDLER
6816 #define GEN_MAC_HANDLER(name, opc2, opc3) \
6817 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6818 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6819 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6820 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6821 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6822 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6823 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6824 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6825 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6826 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6827 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6828 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6829 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6830 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6831 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6832 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6833 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6834 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6835 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6836 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6837 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6838 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6839 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6840 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6841 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6842 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6843 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6844 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6845 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6846 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6847 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6848 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6849 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6850 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6851 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6852 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6853 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6854 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6855 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6856 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6857 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6858 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6859 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6860
6861 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6862 PPC_NONE, PPC2_TM),
6863 GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
6864 PPC_NONE, PPC2_TM),
6865 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6866 PPC_NONE, PPC2_TM),
6867 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6868 PPC_NONE, PPC2_TM),
6869 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6870 PPC_NONE, PPC2_TM),
6871 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6872 PPC_NONE, PPC2_TM),
6873 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6874 PPC_NONE, PPC2_TM),
6875 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6876 PPC_NONE, PPC2_TM),
6877 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6878 PPC_NONE, PPC2_TM),
6879 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6880 PPC_NONE, PPC2_TM),
6881 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
6882 PPC_NONE, PPC2_TM),
6883
6884 #include "translate/fp-ops.c.inc"
6885
6886 #include "translate/vmx-ops.c.inc"
6887
6888 #include "translate/vsx-ops.c.inc"
6889
6890 #include "translate/spe-ops.c.inc"
6891 };
6892
6893 /*****************************************************************************/
6894 /* Opcode types */
6895 enum {
6896 PPC_DIRECT = 0, /* Opcode routine */
6897 PPC_INDIRECT = 1, /* Indirect opcode table */
6898 };
6899
6900 #define PPC_OPCODE_MASK 0x3
6901
6902 static inline int is_indirect_opcode(void *handler)
6903 {
6904 return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
6905 }
6906
6907 static inline opc_handler_t **ind_table(void *handler)
6908 {
6909 return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
6910 }
6911
6912 /* Instruction table creation */
6913 /* Opcodes tables creation */
6914 static void fill_new_table(opc_handler_t **table, int len)
6915 {
6916 int i;
6917
6918 for (i = 0; i < len; i++) {
6919 table[i] = &invalid_handler;
6920 }
6921 }
6922
6923 static int create_new_table(opc_handler_t **table, unsigned char idx)
6924 {
6925 opc_handler_t **tmp;
6926
6927 tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
6928 fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
6929 table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
6930
6931 return 0;
6932 }
6933
6934 static int insert_in_table(opc_handler_t **table, unsigned char idx,
6935 opc_handler_t *handler)
6936 {
6937 if (table[idx] != &invalid_handler) {
6938 return -1;
6939 }
6940 table[idx] = handler;
6941
6942 return 0;
6943 }
6944
6945 static int register_direct_insn(opc_handler_t **ppc_opcodes,
6946 unsigned char idx, opc_handler_t *handler)
6947 {
6948 if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
6949 printf("*** ERROR: opcode %02x already assigned in main "
6950 "opcode table\n", idx);
6951 return -1;
6952 }
6953
6954 return 0;
6955 }
6956
6957 static int register_ind_in_table(opc_handler_t **table,
6958 unsigned char idx1, unsigned char idx2,
6959 opc_handler_t *handler)
6960 {
6961 if (table[idx1] == &invalid_handler) {
6962 if (create_new_table(table, idx1) < 0) {
6963 printf("*** ERROR: unable to create indirect table "
6964 "idx=%02x\n", idx1);
6965 return -1;
6966 }
6967 } else {
6968 if (!is_indirect_opcode(table[idx1])) {
6969 printf("*** ERROR: idx %02x already assigned to a direct "
6970 "opcode\n", idx1);
6971 return -1;
6972 }
6973 }
6974 if (handler != NULL &&
6975 insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
6976 printf("*** ERROR: opcode %02x already assigned in "
6977 "opcode table %02x\n", idx2, idx1);
6978 return -1;
6979 }
6980
6981 return 0;
6982 }
6983
6984 static int register_ind_insn(opc_handler_t **ppc_opcodes,
6985 unsigned char idx1, unsigned char idx2,
6986 opc_handler_t *handler)
6987 {
6988 return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
6989 }
6990
6991 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
6992 unsigned char idx1, unsigned char idx2,
6993 unsigned char idx3, opc_handler_t *handler)
6994 {
6995 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
6996 printf("*** ERROR: unable to join indirect table idx "
6997 "[%02x-%02x]\n", idx1, idx2);
6998 return -1;
6999 }
7000 if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7001 handler) < 0) {
7002 printf("*** ERROR: unable to insert opcode "
7003 "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7004 return -1;
7005 }
7006
7007 return 0;
7008 }
7009
7010 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7011 unsigned char idx1, unsigned char idx2,
7012 unsigned char idx3, unsigned char idx4,
7013 opc_handler_t *handler)
7014 {
7015 opc_handler_t **table;
7016
7017 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7018 printf("*** ERROR: unable to join indirect table idx "
7019 "[%02x-%02x]\n", idx1, idx2);
7020 return -1;
7021 }
7022 table = ind_table(ppc_opcodes[idx1]);
7023 if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7024 printf("*** ERROR: unable to join 2nd-level indirect table idx "
7025 "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7026 return -1;
7027 }
7028 table = ind_table(table[idx2]);
7029 if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7030 printf("*** ERROR: unable to insert opcode "
7031 "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7032 return -1;
7033 }
7034 return 0;
7035 }
7036 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7037 {
7038 if (insn->opc2 != 0xFF) {
7039 if (insn->opc3 != 0xFF) {
7040 if (insn->opc4 != 0xFF) {
7041 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7042 insn->opc3, insn->opc4,
7043 &insn->handler) < 0) {
7044 return -1;
7045 }
7046 } else {
7047 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7048 insn->opc3, &insn->handler) < 0) {
7049 return -1;
7050 }
7051 }
7052 } else {
7053 if (register_ind_insn(ppc_opcodes, insn->opc1,
7054 insn->opc2, &insn->handler) < 0) {
7055 return -1;
7056 }
7057 }
7058 } else {
7059 if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7060 return -1;
7061 }
7062 }
7063
7064 return 0;
7065 }
7066
7067 static int test_opcode_table(opc_handler_t **table, int len)
7068 {
7069 int i, count, tmp;
7070
7071 for (i = 0, count = 0; i < len; i++) {
7072 /* Consistency fixup */
7073 if (table[i] == NULL) {
7074 table[i] = &invalid_handler;
7075 }
7076 if (table[i] != &invalid_handler) {
7077 if (is_indirect_opcode(table[i])) {
7078 tmp = test_opcode_table(ind_table(table[i]),
7079 PPC_CPU_INDIRECT_OPCODES_LEN);
7080 if (tmp == 0) {
7081 free(table[i]);
7082 table[i] = &invalid_handler;
7083 } else {
7084 count++;
7085 }
7086 } else {
7087 count++;
7088 }
7089 }
7090 }
7091
7092 return count;
7093 }
7094
7095 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7096 {
7097 if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7098 printf("*** WARNING: no opcode defined !\n");
7099 }
7100 }
7101
7102 /*****************************************************************************/
7103 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7104 {
7105 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7106 opcode_t *opc;
7107
7108 fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7109 for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7110 if (((opc->handler.type & pcc->insns_flags) != 0) ||
7111 ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7112 if (register_insn(cpu->opcodes, opc) < 0) {
7113 error_setg(errp, "ERROR initializing PowerPC instruction "
7114 "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7115 opc->opc3);
7116 return;
7117 }
7118 }
7119 }
7120 fix_opcode_tables(cpu->opcodes);
7121 fflush(stdout);
7122 fflush(stderr);
7123 }
7124
7125 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7126 {
7127 opc_handler_t **table, **table_2;
7128 int i, j, k;
7129
7130 for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7131 if (cpu->opcodes[i] == &invalid_handler) {
7132 continue;
7133 }
7134 if (is_indirect_opcode(cpu->opcodes[i])) {
7135 table = ind_table(cpu->opcodes[i]);
7136 for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7137 if (table[j] == &invalid_handler) {
7138 continue;
7139 }
7140 if (is_indirect_opcode(table[j])) {
7141 table_2 = ind_table(table[j]);
7142 for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7143 if (table_2[k] != &invalid_handler &&
7144 is_indirect_opcode(table_2[k])) {
7145 g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7146 ~PPC_INDIRECT));
7147 }
7148 }
7149 g_free((opc_handler_t *)((uintptr_t)table[j] &
7150 ~PPC_INDIRECT));
7151 }
7152 }
7153 g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7154 ~PPC_INDIRECT));
7155 }
7156 }
7157 }
7158
7159 int ppc_fixup_cpu(PowerPCCPU *cpu)
7160 {
7161 CPUPPCState *env = &cpu->env;
7162
7163 /*
7164 * TCG doesn't (yet) emulate some groups of instructions that are
7165 * implemented on some otherwise supported CPUs (e.g. VSX and
7166 * decimal floating point instructions on POWER7). We remove
7167 * unsupported instruction groups from the cpu state's instruction
7168 * masks and hope the guest can cope. For at least the pseries
7169 * machine, the unavailability of these instructions can be
7170 * advertised to the guest via the device tree.
7171 */
7172 if ((env->insns_flags & ~PPC_TCG_INSNS)
7173 || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7174 warn_report("Disabling some instructions which are not "
7175 "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7176 env->insns_flags & ~PPC_TCG_INSNS,
7177 env->insns_flags2 & ~PPC_TCG_INSNS2);
7178 }
7179 env->insns_flags &= PPC_TCG_INSNS;
7180 env->insns_flags2 &= PPC_TCG_INSNS2;
7181 return 0;
7182 }
7183
7184 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7185 {
7186 opc_handler_t **table, *handler;
7187 uint32_t inval;
7188
7189 ctx->opcode = insn;
7190
7191 LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7192 insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7193 ctx->le_mode ? "little" : "big");
7194
7195 table = cpu->opcodes;
7196 handler = table[opc1(insn)];
7197 if (is_indirect_opcode(handler)) {
7198 table = ind_table(handler);
7199 handler = table[opc2(insn)];
7200 if (is_indirect_opcode(handler)) {
7201 table = ind_table(handler);
7202 handler = table[opc3(insn)];
7203 if (is_indirect_opcode(handler)) {
7204 table = ind_table(handler);
7205 handler = table[opc4(insn)];
7206 }
7207 }
7208 }
7209
7210 /* Is opcode *REALLY* valid ? */
7211 if (unlikely(handler->handler == &gen_invalid)) {
7212 qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7213 "%02x - %02x - %02x - %02x (%08x) "
7214 TARGET_FMT_lx "\n",
7215 opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7216 insn, ctx->cia);
7217 return false;
7218 }
7219
7220 if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7221 && Rc(insn))) {
7222 inval = handler->inval2;
7223 } else {
7224 inval = handler->inval1;
7225 }
7226
7227 if (unlikely((insn & inval) != 0)) {
7228 qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7229 "%02x - %02x - %02x - %02x (%08x) "
7230 TARGET_FMT_lx "\n", insn & inval,
7231 opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7232 insn, ctx->cia);
7233 return false;
7234 }
7235
7236 handler->handler(ctx);
7237 return true;
7238 }
7239
7240 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7241 {
7242 DisasContext *ctx = container_of(dcbase, DisasContext, base);
7243 CPUPPCState *env = cs->env_ptr;
7244 uint32_t hflags = ctx->base.tb->flags;
7245
7246 ctx->spr_cb = env->spr_cb;
7247 ctx->pr = (hflags >> HFLAGS_PR) & 1;
7248 ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7249 ctx->dr = (hflags >> HFLAGS_DR) & 1;
7250 ctx->hv = (hflags >> HFLAGS_HV) & 1;
7251 ctx->insns_flags = env->insns_flags;
7252 ctx->insns_flags2 = env->insns_flags2;
7253 ctx->access_type = -1;
7254 ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7255 ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7256 ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7257 ctx->flags = env->flags;
7258 #if defined(TARGET_PPC64)
7259 ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7260 ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7261 #endif
7262 ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7263 || env->mmu_model & POWERPC_MMU_64;
7264
7265 ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7266 ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7267 ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7268 ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7269 ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7270 ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7271 ctx->hr = (hflags >> HFLAGS_HR) & 1;
7272 ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7273 ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7274 ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
7275 ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
7276 ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7277
7278 ctx->singlestep_enabled = 0;
7279 if ((hflags >> HFLAGS_SE) & 1) {
7280 ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7281 ctx->base.max_insns = 1;
7282 }
7283 if ((hflags >> HFLAGS_BE) & 1) {
7284 ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7285 }
7286 }
7287
7288 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7289 {
7290 }
7291
7292 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7293 {
7294 tcg_gen_insn_start(dcbase->pc_next);
7295 }
7296
7297 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7298 {
7299 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7300 return opc1(insn) == 1;
7301 }
7302
7303 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7304 {
7305 DisasContext *ctx = container_of(dcbase, DisasContext, base);
7306 PowerPCCPU *cpu = POWERPC_CPU(cs);
7307 CPUPPCState *env = cs->env_ptr;
7308 target_ulong pc;
7309 uint32_t insn;
7310 bool ok;
7311
7312 LOG_DISAS("----------------\n");
7313 LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7314 ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7315
7316 ctx->cia = pc = ctx->base.pc_next;
7317 insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7318 ctx->base.pc_next = pc += 4;
7319
7320 if (!is_prefix_insn(ctx, insn)) {
7321 ok = (decode_insn32(ctx, insn) ||
7322 decode_legacy(cpu, ctx, insn));
7323 } else if ((pc & 63) == 0) {
7324 /*
7325 * Power v3.1, section 1.9 Exceptions:
7326 * attempt to execute a prefixed instruction that crosses a
7327 * 64-byte address boundary (system alignment error).
7328 */
7329 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7330 ok = true;
7331 } else {
7332 uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7333 need_byteswap(ctx));
7334 ctx->base.pc_next = pc += 4;
7335 ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7336 }
7337 if (!ok) {
7338 gen_invalid(ctx);
7339 }
7340
7341 /* End the TB when crossing a page boundary. */
7342 if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7343 ctx->base.is_jmp = DISAS_TOO_MANY;
7344 }
7345 }
7346
7347 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7348 {
7349 DisasContext *ctx = container_of(dcbase, DisasContext, base);
7350 DisasJumpType is_jmp = ctx->base.is_jmp;
7351 target_ulong nip = ctx->base.pc_next;
7352
7353 if (is_jmp == DISAS_NORETURN) {
7354 /* We have already exited the TB. */
7355 return;
7356 }
7357
7358 /* Honor single stepping. */
7359 if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
7360 && (nip <= 0x100 || nip > 0xf00)) {
7361 switch (is_jmp) {
7362 case DISAS_TOO_MANY:
7363 case DISAS_EXIT_UPDATE:
7364 case DISAS_CHAIN_UPDATE:
7365 gen_update_nip(ctx, nip);
7366 break;
7367 case DISAS_EXIT:
7368 case DISAS_CHAIN:
7369 break;
7370 default:
7371 g_assert_not_reached();
7372 }
7373
7374 gen_debug_exception(ctx);
7375 return;
7376 }
7377
7378 switch (is_jmp) {
7379 case DISAS_TOO_MANY:
7380 if (use_goto_tb(ctx, nip)) {
7381 pmu_count_insns(ctx);
7382 tcg_gen_goto_tb(0);
7383 gen_update_nip(ctx, nip);
7384 tcg_gen_exit_tb(ctx->base.tb, 0);
7385 break;
7386 }
7387 /* fall through */
7388 case DISAS_CHAIN_UPDATE:
7389 gen_update_nip(ctx, nip);
7390 /* fall through */
7391 case DISAS_CHAIN:
7392 /*
7393 * tcg_gen_lookup_and_goto_ptr will exit the TB if
7394 * CF_NO_GOTO_PTR is set. Count insns now.
7395 */
7396 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7397 pmu_count_insns(ctx);
7398 }
7399
7400 tcg_gen_lookup_and_goto_ptr();
7401 break;
7402
7403 case DISAS_EXIT_UPDATE:
7404 gen_update_nip(ctx, nip);
7405 /* fall through */
7406 case DISAS_EXIT:
7407 pmu_count_insns(ctx);
7408 tcg_gen_exit_tb(NULL, 0);
7409 break;
7410
7411 default:
7412 g_assert_not_reached();
7413 }
7414 }
7415
7416 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7417 CPUState *cs, FILE *logfile)
7418 {
7419 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7420 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7421 }
7422
7423 static const TranslatorOps ppc_tr_ops = {
7424 .init_disas_context = ppc_tr_init_disas_context,
7425 .tb_start = ppc_tr_tb_start,
7426 .insn_start = ppc_tr_insn_start,
7427 .translate_insn = ppc_tr_translate_insn,
7428 .tb_stop = ppc_tr_tb_stop,
7429 .disas_log = ppc_tr_disas_log,
7430 };
7431
7432 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
7433 target_ulong pc, void *host_pc)
7434 {
7435 DisasContext ctx;
7436
7437 translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7438 }