]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/translate.c
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / target / ppc / translate.c
1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 #include "qemu/main-loop.h"
30 #include "exec/cpu_ldst.h"
31
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34
35 #include "exec/translator.h"
36 #include "exec/log.h"
37 #include "qemu/atomic128.h"
38 #include "spr_common.h"
39 #include "power8-pmu.h"
40
41 #include "qemu/qemu-print.h"
42 #include "qapi/error.h"
43
44 #define HELPER_H "helper.h"
45 #include "exec/helper-info.c.inc"
46 #undef HELPER_H
47
48 #define CPU_SINGLE_STEP 0x1
49 #define CPU_BRANCH_STEP 0x2
50
51 /* Include definitions for instructions classes and implementations flags */
52 /* #define PPC_DEBUG_DISAS */
53
54 #ifdef PPC_DEBUG_DISAS
55 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
56 #else
57 # define LOG_DISAS(...) do { } while (0)
58 #endif
59 /*****************************************************************************/
60 /* Code translation helpers */
61
62 /* global register indexes */
63 static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */
64 + 10 * 4 + 22 * 5 /* SPE GPRh */
65 + 8 * 5 /* CRF */];
66 static TCGv cpu_gpr[32];
67 static TCGv cpu_gprh[32];
68 static TCGv_i32 cpu_crf[8];
69 static TCGv cpu_nip;
70 static TCGv cpu_msr;
71 static TCGv cpu_ctr;
72 static TCGv cpu_lr;
73 #if defined(TARGET_PPC64)
74 static TCGv cpu_cfar;
75 #endif
76 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
77 static TCGv cpu_reserve;
78 static TCGv cpu_reserve_length;
79 static TCGv cpu_reserve_val;
80 static TCGv cpu_reserve_val2;
81 static TCGv cpu_fpscr;
82 static TCGv_i32 cpu_access_type;
83
84 void ppc_translate_init(void)
85 {
86 int i;
87 char *p;
88 size_t cpu_reg_names_size;
89
90 p = cpu_reg_names;
91 cpu_reg_names_size = sizeof(cpu_reg_names);
92
93 for (i = 0; i < 8; i++) {
94 snprintf(p, cpu_reg_names_size, "crf%d", i);
95 cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
96 offsetof(CPUPPCState, crf[i]), p);
97 p += 5;
98 cpu_reg_names_size -= 5;
99 }
100
101 for (i = 0; i < 32; i++) {
102 snprintf(p, cpu_reg_names_size, "r%d", i);
103 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
104 offsetof(CPUPPCState, gpr[i]), p);
105 p += (i < 10) ? 3 : 4;
106 cpu_reg_names_size -= (i < 10) ? 3 : 4;
107 snprintf(p, cpu_reg_names_size, "r%dH", i);
108 cpu_gprh[i] = tcg_global_mem_new(cpu_env,
109 offsetof(CPUPPCState, gprh[i]), p);
110 p += (i < 10) ? 4 : 5;
111 cpu_reg_names_size -= (i < 10) ? 4 : 5;
112 }
113
114 cpu_nip = tcg_global_mem_new(cpu_env,
115 offsetof(CPUPPCState, nip), "nip");
116
117 cpu_msr = tcg_global_mem_new(cpu_env,
118 offsetof(CPUPPCState, msr), "msr");
119
120 cpu_ctr = tcg_global_mem_new(cpu_env,
121 offsetof(CPUPPCState, ctr), "ctr");
122
123 cpu_lr = tcg_global_mem_new(cpu_env,
124 offsetof(CPUPPCState, lr), "lr");
125
126 #if defined(TARGET_PPC64)
127 cpu_cfar = tcg_global_mem_new(cpu_env,
128 offsetof(CPUPPCState, cfar), "cfar");
129 #endif
130
131 cpu_xer = tcg_global_mem_new(cpu_env,
132 offsetof(CPUPPCState, xer), "xer");
133 cpu_so = tcg_global_mem_new(cpu_env,
134 offsetof(CPUPPCState, so), "SO");
135 cpu_ov = tcg_global_mem_new(cpu_env,
136 offsetof(CPUPPCState, ov), "OV");
137 cpu_ca = tcg_global_mem_new(cpu_env,
138 offsetof(CPUPPCState, ca), "CA");
139 cpu_ov32 = tcg_global_mem_new(cpu_env,
140 offsetof(CPUPPCState, ov32), "OV32");
141 cpu_ca32 = tcg_global_mem_new(cpu_env,
142 offsetof(CPUPPCState, ca32), "CA32");
143
144 cpu_reserve = tcg_global_mem_new(cpu_env,
145 offsetof(CPUPPCState, reserve_addr),
146 "reserve_addr");
147 cpu_reserve_length = tcg_global_mem_new(cpu_env,
148 offsetof(CPUPPCState,
149 reserve_length),
150 "reserve_length");
151 cpu_reserve_val = tcg_global_mem_new(cpu_env,
152 offsetof(CPUPPCState, reserve_val),
153 "reserve_val");
154 cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
155 offsetof(CPUPPCState, reserve_val2),
156 "reserve_val2");
157
158 cpu_fpscr = tcg_global_mem_new(cpu_env,
159 offsetof(CPUPPCState, fpscr), "fpscr");
160
161 cpu_access_type = tcg_global_mem_new_i32(cpu_env,
162 offsetof(CPUPPCState, access_type),
163 "access_type");
164 }
165
166 /* internal defines */
167 struct DisasContext {
168 DisasContextBase base;
169 target_ulong cia; /* current instruction address */
170 uint32_t opcode;
171 /* Routine used to access memory */
172 bool pr, hv, dr, le_mode;
173 bool lazy_tlb_flush;
174 bool need_access_type;
175 int mem_idx;
176 int access_type;
177 /* Translation flags */
178 MemOp default_tcg_memop_mask;
179 #if defined(TARGET_PPC64)
180 bool sf_mode;
181 bool has_cfar;
182 #endif
183 bool fpu_enabled;
184 bool altivec_enabled;
185 bool vsx_enabled;
186 bool spe_enabled;
187 bool tm_enabled;
188 bool gtse;
189 bool hr;
190 bool mmcr0_pmcc0;
191 bool mmcr0_pmcc1;
192 bool mmcr0_pmcjce;
193 bool pmc_other;
194 bool pmu_insn_cnt;
195 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
196 int singlestep_enabled;
197 uint32_t flags;
198 uint64_t insns_flags;
199 uint64_t insns_flags2;
200 };
201
202 #define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */
203 #define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */
204 #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */
205 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */
206
207 /* Return true iff byteswap is needed in a scalar memop */
208 static inline bool need_byteswap(const DisasContext *ctx)
209 {
210 #if TARGET_BIG_ENDIAN
211 return ctx->le_mode;
212 #else
213 return !ctx->le_mode;
214 #endif
215 }
216
217 /* True when active word size < size of target_long. */
218 #ifdef TARGET_PPC64
219 # define NARROW_MODE(C) (!(C)->sf_mode)
220 #else
221 # define NARROW_MODE(C) 0
222 #endif
223
224 struct opc_handler_t {
225 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
226 uint32_t inval1;
227 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
228 uint32_t inval2;
229 /* instruction type */
230 uint64_t type;
231 /* extended instruction type */
232 uint64_t type2;
233 /* handler */
234 void (*handler)(DisasContext *ctx);
235 };
236
237 static inline bool gen_serialize(DisasContext *ctx)
238 {
239 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
240 /* Restart with exclusive lock. */
241 gen_helper_exit_atomic(cpu_env);
242 ctx->base.is_jmp = DISAS_NORETURN;
243 return false;
244 }
245 return true;
246 }
247
248 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
249 static inline bool gen_serialize_core(DisasContext *ctx)
250 {
251 if (ctx->flags & POWERPC_FLAG_SMT) {
252 return gen_serialize(ctx);
253 }
254
255 return true;
256 }
257 #endif
258
259 /* SPR load/store helpers */
260 static inline void gen_load_spr(TCGv t, int reg)
261 {
262 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
263 }
264
265 static inline void gen_store_spr(int reg, TCGv t)
266 {
267 tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
268 }
269
270 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
271 {
272 if (ctx->need_access_type && ctx->access_type != access_type) {
273 tcg_gen_movi_i32(cpu_access_type, access_type);
274 ctx->access_type = access_type;
275 }
276 }
277
278 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
279 {
280 if (NARROW_MODE(ctx)) {
281 nip = (uint32_t)nip;
282 }
283 tcg_gen_movi_tl(cpu_nip, nip);
284 }
285
286 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
287 {
288 TCGv_i32 t0, t1;
289
290 /*
291 * These are all synchronous exceptions, we set the PC back to the
292 * faulting instruction
293 */
294 gen_update_nip(ctx, ctx->cia);
295 t0 = tcg_constant_i32(excp);
296 t1 = tcg_constant_i32(error);
297 gen_helper_raise_exception_err(cpu_env, t0, t1);
298 ctx->base.is_jmp = DISAS_NORETURN;
299 }
300
301 static void gen_exception(DisasContext *ctx, uint32_t excp)
302 {
303 TCGv_i32 t0;
304
305 /*
306 * These are all synchronous exceptions, we set the PC back to the
307 * faulting instruction
308 */
309 gen_update_nip(ctx, ctx->cia);
310 t0 = tcg_constant_i32(excp);
311 gen_helper_raise_exception(cpu_env, t0);
312 ctx->base.is_jmp = DISAS_NORETURN;
313 }
314
315 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
316 target_ulong nip)
317 {
318 TCGv_i32 t0;
319
320 gen_update_nip(ctx, nip);
321 t0 = tcg_constant_i32(excp);
322 gen_helper_raise_exception(cpu_env, t0);
323 ctx->base.is_jmp = DISAS_NORETURN;
324 }
325
326 #if !defined(CONFIG_USER_ONLY)
327 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
328 {
329 translator_io_start(&ctx->base);
330 gen_helper_ppc_maybe_interrupt(cpu_env);
331 }
332 #endif
333
334 /*
335 * Tells the caller what is the appropriate exception to generate and prepares
336 * SPR registers for this exception.
337 *
338 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
339 * POWERPC_EXCP_DEBUG (on BookE).
340 */
341 static uint32_t gen_prep_dbgex(DisasContext *ctx)
342 {
343 if (ctx->flags & POWERPC_FLAG_DE) {
344 target_ulong dbsr = 0;
345 if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
346 dbsr = DBCR0_ICMP;
347 } else {
348 /* Must have been branch */
349 dbsr = DBCR0_BRT;
350 }
351 TCGv t0 = tcg_temp_new();
352 gen_load_spr(t0, SPR_BOOKE_DBSR);
353 tcg_gen_ori_tl(t0, t0, dbsr);
354 gen_store_spr(SPR_BOOKE_DBSR, t0);
355 return POWERPC_EXCP_DEBUG;
356 } else {
357 return POWERPC_EXCP_TRACE;
358 }
359 }
360
361 static void gen_debug_exception(DisasContext *ctx)
362 {
363 gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
364 ctx->base.is_jmp = DISAS_NORETURN;
365 }
366
367 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
368 {
369 /* Will be converted to program check if needed */
370 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
371 }
372
373 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
374 {
375 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
376 }
377
378 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
379 {
380 /* Will be converted to program check if needed */
381 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
382 }
383
384 /*****************************************************************************/
385 /* SPR READ/WRITE CALLBACKS */
386
387 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
388 {
389 #if 0
390 sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
391 printf("ERROR: try to access SPR %d !\n", sprn);
392 #endif
393 }
394
395 /* #define PPC_DUMP_SPR_ACCESSES */
396
397 /*
398 * Generic callbacks:
399 * do nothing but store/retrieve spr value
400 */
401 static void spr_load_dump_spr(int sprn)
402 {
403 #ifdef PPC_DUMP_SPR_ACCESSES
404 TCGv_i32 t0 = tcg_constant_i32(sprn);
405 gen_helper_load_dump_spr(cpu_env, t0);
406 #endif
407 }
408
409 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
410 {
411 gen_load_spr(cpu_gpr[gprn], sprn);
412 spr_load_dump_spr(sprn);
413 }
414
415 static void spr_store_dump_spr(int sprn)
416 {
417 #ifdef PPC_DUMP_SPR_ACCESSES
418 TCGv_i32 t0 = tcg_constant_i32(sprn);
419 gen_helper_store_dump_spr(cpu_env, t0);
420 #endif
421 }
422
423 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
424 {
425 gen_store_spr(sprn, cpu_gpr[gprn]);
426 spr_store_dump_spr(sprn);
427 }
428
429 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
430 {
431 #ifdef TARGET_PPC64
432 TCGv t0 = tcg_temp_new();
433 tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
434 gen_store_spr(sprn, t0);
435 spr_store_dump_spr(sprn);
436 #else
437 spr_write_generic(ctx, sprn, gprn);
438 #endif
439 }
440
441 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
442 {
443 /* This does not implement >1 thread */
444 TCGv t0 = tcg_temp_new();
445 TCGv t1 = tcg_temp_new();
446 tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
447 tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
448 tcg_gen_or_tl(t1, t1, t0);
449 gen_store_spr(sprn, t1);
450 }
451
452 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
453 {
454 if (!(ctx->flags & POWERPC_FLAG_SMT)) {
455 spr_write_CTRL_ST(ctx, sprn, gprn);
456 goto out;
457 }
458
459 if (!gen_serialize(ctx)) {
460 return;
461 }
462
463 gen_helper_spr_write_CTRL(cpu_env, tcg_constant_i32(sprn),
464 cpu_gpr[gprn]);
465 out:
466 spr_store_dump_spr(sprn);
467
468 /*
469 * SPR_CTRL writes must force a new translation block,
470 * allowing the PMU to calculate the run latch events with
471 * more accuracy.
472 */
473 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
474 }
475
476 #if !defined(CONFIG_USER_ONLY)
477 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
478 {
479 TCGv t0 = tcg_temp_new();
480 TCGv t1 = tcg_temp_new();
481 gen_load_spr(t0, sprn);
482 tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
483 tcg_gen_and_tl(t0, t0, t1);
484 gen_store_spr(sprn, t0);
485 }
486
487 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
488 {
489 }
490
491 #endif
492
493 /* SPR common to all PowerPC */
494 /* XER */
495 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
496 {
497 TCGv dst = cpu_gpr[gprn];
498 TCGv t0 = tcg_temp_new();
499 TCGv t1 = tcg_temp_new();
500 TCGv t2 = tcg_temp_new();
501 tcg_gen_mov_tl(dst, cpu_xer);
502 tcg_gen_shli_tl(t0, cpu_so, XER_SO);
503 tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
504 tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
505 tcg_gen_or_tl(t0, t0, t1);
506 tcg_gen_or_tl(dst, dst, t2);
507 tcg_gen_or_tl(dst, dst, t0);
508 if (is_isa300(ctx)) {
509 tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
510 tcg_gen_or_tl(dst, dst, t0);
511 tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
512 tcg_gen_or_tl(dst, dst, t0);
513 }
514 }
515
516 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
517 {
518 TCGv src = cpu_gpr[gprn];
519 /* Write all flags, while reading back check for isa300 */
520 tcg_gen_andi_tl(cpu_xer, src,
521 ~((1u << XER_SO) |
522 (1u << XER_OV) | (1u << XER_OV32) |
523 (1u << XER_CA) | (1u << XER_CA32)));
524 tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
525 tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
526 tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
527 tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
528 tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
529 }
530
531 /* LR */
532 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
533 {
534 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
535 }
536
537 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
538 {
539 tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
540 }
541
542 /* CFAR */
543 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
544 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
545 {
546 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
547 }
548
549 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
550 {
551 tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
552 }
553 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
554
555 /* CTR */
556 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
557 {
558 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
559 }
560
561 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
562 {
563 tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
564 }
565
566 /* User read access to SPR */
567 /* USPRx */
568 /* UMMCRx */
569 /* UPMCx */
570 /* USIA */
571 /* UDECR */
572 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
573 {
574 gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
575 }
576
577 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
578 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
579 {
580 gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
581 }
582 #endif
583
584 /* SPR common to all non-embedded PowerPC */
585 /* DECR */
586 #if !defined(CONFIG_USER_ONLY)
587 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
588 {
589 translator_io_start(&ctx->base);
590 gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
591 }
592
593 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
594 {
595 translator_io_start(&ctx->base);
596 gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
597 }
598 #endif
599
600 /* SPR common to all non-embedded PowerPC, except 601 */
601 /* Time base */
602 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
603 {
604 translator_io_start(&ctx->base);
605 gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
606 }
607
608 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
609 {
610 translator_io_start(&ctx->base);
611 gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
612 }
613
614 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
615 {
616 gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
617 }
618
619 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
620 {
621 gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
622 }
623
624 #if !defined(CONFIG_USER_ONLY)
625 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
626 {
627 translator_io_start(&ctx->base);
628 gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
629 }
630
631 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
632 {
633 translator_io_start(&ctx->base);
634 gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
635 }
636
637 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
638 {
639 gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
640 }
641
642 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
643 {
644 gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
645 }
646
647 #if defined(TARGET_PPC64)
648 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
649 {
650 translator_io_start(&ctx->base);
651 gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
652 }
653
654 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
655 {
656 translator_io_start(&ctx->base);
657 gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
658 }
659
660 /* HDECR */
661 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
662 {
663 translator_io_start(&ctx->base);
664 gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
665 }
666
667 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
668 {
669 translator_io_start(&ctx->base);
670 gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
671 }
672
673 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
674 {
675 translator_io_start(&ctx->base);
676 gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
677 }
678
679 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
680 {
681 translator_io_start(&ctx->base);
682 gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
683 }
684
685 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
686 {
687 translator_io_start(&ctx->base);
688 gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
689 }
690
691 #endif
692 #endif
693
694 #if !defined(CONFIG_USER_ONLY)
695 /* IBAT0U...IBAT0U */
696 /* IBAT0L...IBAT7L */
697 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
698 {
699 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
700 offsetof(CPUPPCState,
701 IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
702 }
703
704 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
705 {
706 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
707 offsetof(CPUPPCState,
708 IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
709 }
710
711 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
712 {
713 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
714 gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
715 }
716
717 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
718 {
719 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
720 gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
721 }
722
723 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
724 {
725 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
726 gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
727 }
728
729 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
730 {
731 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
732 gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
733 }
734
735 /* DBAT0U...DBAT7U */
736 /* DBAT0L...DBAT7L */
737 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
738 {
739 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
740 offsetof(CPUPPCState,
741 DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
742 }
743
744 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
745 {
746 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
747 offsetof(CPUPPCState,
748 DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
749 }
750
751 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
752 {
753 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
754 gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
755 }
756
757 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
758 {
759 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
760 gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
761 }
762
763 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
764 {
765 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
766 gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
767 }
768
769 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
770 {
771 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
772 gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
773 }
774
775 /* SDR1 */
776 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
777 {
778 gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
779 }
780
781 #if defined(TARGET_PPC64)
782 /* 64 bits PowerPC specific SPRs */
783 /* PIDR */
784 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
785 {
786 gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
787 }
788
789 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
790 {
791 gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
792 }
793
794 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
795 {
796 tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
797 }
798
799 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
800 {
801 TCGv t0 = tcg_temp_new();
802 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
803 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
804 }
805 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
806 {
807 gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
808 }
809
810 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
811 {
812 gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
813 }
814
815 /* DPDES */
816 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
817 {
818 if (!gen_serialize_core(ctx)) {
819 return;
820 }
821
822 gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
823 }
824
825 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
826 {
827 if (!gen_serialize_core(ctx)) {
828 return;
829 }
830
831 gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
832 }
833 #endif
834 #endif
835
836 /* PowerPC 40x specific registers */
837 #if !defined(CONFIG_USER_ONLY)
838 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
839 {
840 translator_io_start(&ctx->base);
841 gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
842 }
843
844 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
845 {
846 translator_io_start(&ctx->base);
847 gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
848 }
849
850 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
851 {
852 translator_io_start(&ctx->base);
853 gen_store_spr(sprn, cpu_gpr[gprn]);
854 gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
855 /* We must stop translation as we may have rebooted */
856 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
857 }
858
859 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
860 {
861 translator_io_start(&ctx->base);
862 gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
863 }
864
865 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
866 {
867 translator_io_start(&ctx->base);
868 gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
869 }
870
871 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
872 {
873 translator_io_start(&ctx->base);
874 gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
875 }
876
877 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
878 {
879 TCGv t0 = tcg_temp_new();
880 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
881 gen_helper_store_40x_pid(cpu_env, t0);
882 }
883
884 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
885 {
886 translator_io_start(&ctx->base);
887 gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
888 }
889
890 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
891 {
892 translator_io_start(&ctx->base);
893 gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
894 }
895 #endif
896
897 /* PIR */
898 #if !defined(CONFIG_USER_ONLY)
899 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
900 {
901 TCGv t0 = tcg_temp_new();
902 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
903 gen_store_spr(SPR_PIR, t0);
904 }
905 #endif
906
907 /* SPE specific registers */
908 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
909 {
910 TCGv_i32 t0 = tcg_temp_new_i32();
911 tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
912 tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
913 }
914
915 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
916 {
917 TCGv_i32 t0 = tcg_temp_new_i32();
918 tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
919 tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
920 }
921
922 #if !defined(CONFIG_USER_ONLY)
923 /* Callback used to write the exception vector base */
924 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
925 {
926 TCGv t0 = tcg_temp_new();
927 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
928 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
929 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
930 gen_store_spr(sprn, t0);
931 }
932
933 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
934 {
935 int sprn_offs;
936
937 if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
938 sprn_offs = sprn - SPR_BOOKE_IVOR0;
939 } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
940 sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
941 } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
942 sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
943 } else {
944 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
945 " vector 0x%03x\n", sprn);
946 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
947 return;
948 }
949
950 TCGv t0 = tcg_temp_new();
951 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
952 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
953 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
954 gen_store_spr(sprn, t0);
955 }
956 #endif
957
958 #ifdef TARGET_PPC64
959 #ifndef CONFIG_USER_ONLY
960 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
961 {
962 TCGv t0 = tcg_temp_new();
963 TCGv t1 = tcg_temp_new();
964 TCGv t2 = tcg_temp_new();
965
966 /*
967 * Note, the HV=1 PR=0 case is handled earlier by simply using
968 * spr_write_generic for HV mode in the SPR table
969 */
970
971 /* Build insertion mask into t1 based on context */
972 if (ctx->pr) {
973 gen_load_spr(t1, SPR_UAMOR);
974 } else {
975 gen_load_spr(t1, SPR_AMOR);
976 }
977
978 /* Mask new bits into t2 */
979 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
980
981 /* Load AMR and clear new bits in t0 */
982 gen_load_spr(t0, SPR_AMR);
983 tcg_gen_andc_tl(t0, t0, t1);
984
985 /* Or'in new bits and write it out */
986 tcg_gen_or_tl(t0, t0, t2);
987 gen_store_spr(SPR_AMR, t0);
988 spr_store_dump_spr(SPR_AMR);
989 }
990
991 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
992 {
993 TCGv t0 = tcg_temp_new();
994 TCGv t1 = tcg_temp_new();
995 TCGv t2 = tcg_temp_new();
996
997 /*
998 * Note, the HV=1 case is handled earlier by simply using
999 * spr_write_generic for HV mode in the SPR table
1000 */
1001
1002 /* Build insertion mask into t1 based on context */
1003 gen_load_spr(t1, SPR_AMOR);
1004
1005 /* Mask new bits into t2 */
1006 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1007
1008 /* Load AMR and clear new bits in t0 */
1009 gen_load_spr(t0, SPR_UAMOR);
1010 tcg_gen_andc_tl(t0, t0, t1);
1011
1012 /* Or'in new bits and write it out */
1013 tcg_gen_or_tl(t0, t0, t2);
1014 gen_store_spr(SPR_UAMOR, t0);
1015 spr_store_dump_spr(SPR_UAMOR);
1016 }
1017
1018 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
1019 {
1020 TCGv t0 = tcg_temp_new();
1021 TCGv t1 = tcg_temp_new();
1022 TCGv t2 = tcg_temp_new();
1023
1024 /*
1025 * Note, the HV=1 case is handled earlier by simply using
1026 * spr_write_generic for HV mode in the SPR table
1027 */
1028
1029 /* Build insertion mask into t1 based on context */
1030 gen_load_spr(t1, SPR_AMOR);
1031
1032 /* Mask new bits into t2 */
1033 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1034
1035 /* Load AMR and clear new bits in t0 */
1036 gen_load_spr(t0, SPR_IAMR);
1037 tcg_gen_andc_tl(t0, t0, t1);
1038
1039 /* Or'in new bits and write it out */
1040 tcg_gen_or_tl(t0, t0, t2);
1041 gen_store_spr(SPR_IAMR, t0);
1042 spr_store_dump_spr(SPR_IAMR);
1043 }
1044 #endif
1045 #endif
1046
1047 #ifndef CONFIG_USER_ONLY
1048 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1049 {
1050 gen_helper_fixup_thrm(cpu_env);
1051 gen_load_spr(cpu_gpr[gprn], sprn);
1052 spr_load_dump_spr(sprn);
1053 }
1054 #endif /* !CONFIG_USER_ONLY */
1055
1056 #if !defined(CONFIG_USER_ONLY)
1057 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1058 {
1059 TCGv t0 = tcg_temp_new();
1060
1061 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1062 gen_store_spr(sprn, t0);
1063 }
1064
1065 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1066 {
1067 TCGv t0 = tcg_temp_new();
1068
1069 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1070 gen_store_spr(sprn, t0);
1071 }
1072
1073 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1074 {
1075 TCGv t0 = tcg_temp_new();
1076
1077 tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1078 ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1079 gen_store_spr(sprn, t0);
1080 }
1081
1082 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1083 {
1084 gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
1085 }
1086
1087 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1088 {
1089 TCGv_i32 t0 = tcg_constant_i32(sprn);
1090 gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
1091 }
1092
1093 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1094 {
1095 gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
1096 }
1097
1098 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1099 {
1100 gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
1101 }
1102
1103 #endif
1104
1105 #if !defined(CONFIG_USER_ONLY)
1106 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1107 {
1108 TCGv val = tcg_temp_new();
1109 tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1110 gen_store_spr(SPR_BOOKE_MAS3, val);
1111 tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1112 gen_store_spr(SPR_BOOKE_MAS7, val);
1113 }
1114
1115 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1116 {
1117 TCGv mas7 = tcg_temp_new();
1118 TCGv mas3 = tcg_temp_new();
1119 gen_load_spr(mas7, SPR_BOOKE_MAS7);
1120 tcg_gen_shli_tl(mas7, mas7, 32);
1121 gen_load_spr(mas3, SPR_BOOKE_MAS3);
1122 tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1123 }
1124
1125 #endif
1126
1127 #ifdef TARGET_PPC64
1128 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1129 int bit, int sprn, int cause)
1130 {
1131 TCGv_i32 t1 = tcg_constant_i32(bit);
1132 TCGv_i32 t2 = tcg_constant_i32(sprn);
1133 TCGv_i32 t3 = tcg_constant_i32(cause);
1134
1135 gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
1136 }
1137
1138 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1139 int bit, int sprn, int cause)
1140 {
1141 TCGv_i32 t1 = tcg_constant_i32(bit);
1142 TCGv_i32 t2 = tcg_constant_i32(sprn);
1143 TCGv_i32 t3 = tcg_constant_i32(cause);
1144
1145 gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
1146 }
1147
1148 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1149 {
1150 TCGv spr_up = tcg_temp_new();
1151 TCGv spr = tcg_temp_new();
1152
1153 gen_load_spr(spr, sprn - 1);
1154 tcg_gen_shri_tl(spr_up, spr, 32);
1155 tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1156 }
1157
1158 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1159 {
1160 TCGv spr = tcg_temp_new();
1161
1162 gen_load_spr(spr, sprn - 1);
1163 tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1164 gen_store_spr(sprn - 1, spr);
1165 }
1166
1167 #if !defined(CONFIG_USER_ONLY)
1168 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1169 {
1170 TCGv hmer = tcg_temp_new();
1171
1172 gen_load_spr(hmer, sprn);
1173 tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1174 gen_store_spr(sprn, hmer);
1175 spr_store_dump_spr(sprn);
1176 }
1177
1178 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1179 {
1180 gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
1181 }
1182 #endif /* !defined(CONFIG_USER_ONLY) */
1183
1184 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1185 {
1186 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1187 spr_read_generic(ctx, gprn, sprn);
1188 }
1189
1190 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1191 {
1192 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1193 spr_write_generic(ctx, sprn, gprn);
1194 }
1195
1196 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1197 {
1198 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1199 spr_read_generic(ctx, gprn, sprn);
1200 }
1201
1202 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1203 {
1204 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1205 spr_write_generic(ctx, sprn, gprn);
1206 }
1207
1208 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1209 {
1210 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1211 spr_read_prev_upper32(ctx, gprn, sprn);
1212 }
1213
1214 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1215 {
1216 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1217 spr_write_prev_upper32(ctx, sprn, gprn);
1218 }
1219
1220 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1221 {
1222 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1223 spr_read_generic(ctx, gprn, sprn);
1224 }
1225
1226 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1227 {
1228 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1229 spr_write_generic(ctx, sprn, gprn);
1230 }
1231
1232 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1233 {
1234 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1235 spr_read_prev_upper32(ctx, gprn, sprn);
1236 }
1237
1238 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1239 {
1240 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1241 spr_write_prev_upper32(ctx, sprn, gprn);
1242 }
1243
1244 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1245 {
1246 TCGv t0 = tcg_temp_new();
1247
1248 /*
1249 * Access to the (H)DEXCR in problem state is done using separated
1250 * SPR indexes which are 16 below the SPR indexes which have full
1251 * access to the (H)DEXCR in privileged state. Problem state can
1252 * only read bits 32:63, bits 0:31 return 0.
1253 *
1254 * See section 9.3.1-9.3.2 of PowerISA v3.1B
1255 */
1256
1257 gen_load_spr(t0, sprn + 16);
1258 tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1259 }
1260 #endif
1261
1262 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
1263 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1264
1265 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
1266 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1267
1268 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
1269 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1270
1271 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
1272 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1273
1274 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
1275 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1276
1277 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1278 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1279
1280 typedef struct opcode_t {
1281 unsigned char opc1, opc2, opc3, opc4;
1282 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1283 unsigned char pad[4];
1284 #endif
1285 opc_handler_t handler;
1286 const char *oname;
1287 } opcode_t;
1288
1289 static void gen_priv_opc(DisasContext *ctx)
1290 {
1291 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1292 }
1293
1294 /* Helpers for priv. check */
1295 #define GEN_PRIV(CTX) \
1296 do { \
1297 gen_priv_opc(CTX); return; \
1298 } while (0)
1299
1300 #if defined(CONFIG_USER_ONLY)
1301 #define CHK_HV(CTX) GEN_PRIV(CTX)
1302 #define CHK_SV(CTX) GEN_PRIV(CTX)
1303 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1304 #else
1305 #define CHK_HV(CTX) \
1306 do { \
1307 if (unlikely(ctx->pr || !ctx->hv)) {\
1308 GEN_PRIV(CTX); \
1309 } \
1310 } while (0)
1311 #define CHK_SV(CTX) \
1312 do { \
1313 if (unlikely(ctx->pr)) { \
1314 GEN_PRIV(CTX); \
1315 } \
1316 } while (0)
1317 #define CHK_HVRM(CTX) \
1318 do { \
1319 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1320 GEN_PRIV(CTX); \
1321 } \
1322 } while (0)
1323 #endif
1324
1325 #define CHK_NONE(CTX)
1326
1327 /*****************************************************************************/
1328 /* PowerPC instructions table */
1329
1330 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
1331 { \
1332 .opc1 = op1, \
1333 .opc2 = op2, \
1334 .opc3 = op3, \
1335 .opc4 = 0xff, \
1336 .handler = { \
1337 .inval1 = invl, \
1338 .type = _typ, \
1339 .type2 = _typ2, \
1340 .handler = &gen_##name, \
1341 }, \
1342 .oname = stringify(name), \
1343 }
1344 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
1345 { \
1346 .opc1 = op1, \
1347 .opc2 = op2, \
1348 .opc3 = op3, \
1349 .opc4 = 0xff, \
1350 .handler = { \
1351 .inval1 = invl1, \
1352 .inval2 = invl2, \
1353 .type = _typ, \
1354 .type2 = _typ2, \
1355 .handler = &gen_##name, \
1356 }, \
1357 .oname = stringify(name), \
1358 }
1359 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
1360 { \
1361 .opc1 = op1, \
1362 .opc2 = op2, \
1363 .opc3 = op3, \
1364 .opc4 = 0xff, \
1365 .handler = { \
1366 .inval1 = invl, \
1367 .type = _typ, \
1368 .type2 = _typ2, \
1369 .handler = &gen_##name, \
1370 }, \
1371 .oname = onam, \
1372 }
1373 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
1374 { \
1375 .opc1 = op1, \
1376 .opc2 = op2, \
1377 .opc3 = op3, \
1378 .opc4 = op4, \
1379 .handler = { \
1380 .inval1 = invl, \
1381 .type = _typ, \
1382 .type2 = _typ2, \
1383 .handler = &gen_##name, \
1384 }, \
1385 .oname = stringify(name), \
1386 }
1387 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
1388 { \
1389 .opc1 = op1, \
1390 .opc2 = op2, \
1391 .opc3 = op3, \
1392 .opc4 = op4, \
1393 .handler = { \
1394 .inval1 = invl, \
1395 .type = _typ, \
1396 .type2 = _typ2, \
1397 .handler = &gen_##name, \
1398 }, \
1399 .oname = onam, \
1400 }
1401
1402 /* Invalid instruction */
1403 static void gen_invalid(DisasContext *ctx)
1404 {
1405 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1406 }
1407
1408 static opc_handler_t invalid_handler = {
1409 .inval1 = 0xFFFFFFFF,
1410 .inval2 = 0xFFFFFFFF,
1411 .type = PPC_NONE,
1412 .type2 = PPC_NONE,
1413 .handler = gen_invalid,
1414 };
1415
1416 /*** Integer comparison ***/
1417
1418 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1419 {
1420 TCGv t0 = tcg_temp_new();
1421 TCGv t1 = tcg_temp_new();
1422 TCGv_i32 t = tcg_temp_new_i32();
1423
1424 tcg_gen_movi_tl(t0, CRF_EQ);
1425 tcg_gen_movi_tl(t1, CRF_LT);
1426 tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1427 t0, arg0, arg1, t1, t0);
1428 tcg_gen_movi_tl(t1, CRF_GT);
1429 tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1430 t0, arg0, arg1, t1, t0);
1431
1432 tcg_gen_trunc_tl_i32(t, t0);
1433 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1434 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1435 }
1436
1437 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1438 {
1439 TCGv t0 = tcg_constant_tl(arg1);
1440 gen_op_cmp(arg0, t0, s, crf);
1441 }
1442
1443 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1444 {
1445 TCGv t0, t1;
1446 t0 = tcg_temp_new();
1447 t1 = tcg_temp_new();
1448 if (s) {
1449 tcg_gen_ext32s_tl(t0, arg0);
1450 tcg_gen_ext32s_tl(t1, arg1);
1451 } else {
1452 tcg_gen_ext32u_tl(t0, arg0);
1453 tcg_gen_ext32u_tl(t1, arg1);
1454 }
1455 gen_op_cmp(t0, t1, s, crf);
1456 }
1457
1458 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1459 {
1460 TCGv t0 = tcg_constant_tl(arg1);
1461 gen_op_cmp32(arg0, t0, s, crf);
1462 }
1463
1464 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1465 {
1466 if (NARROW_MODE(ctx)) {
1467 gen_op_cmpi32(reg, 0, 1, 0);
1468 } else {
1469 gen_op_cmpi(reg, 0, 1, 0);
1470 }
1471 }
1472
1473 /* cmprb - range comparison: isupper, isaplha, islower*/
1474 static void gen_cmprb(DisasContext *ctx)
1475 {
1476 TCGv_i32 src1 = tcg_temp_new_i32();
1477 TCGv_i32 src2 = tcg_temp_new_i32();
1478 TCGv_i32 src2lo = tcg_temp_new_i32();
1479 TCGv_i32 src2hi = tcg_temp_new_i32();
1480 TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1481
1482 tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1483 tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1484
1485 tcg_gen_andi_i32(src1, src1, 0xFF);
1486 tcg_gen_ext8u_i32(src2lo, src2);
1487 tcg_gen_shri_i32(src2, src2, 8);
1488 tcg_gen_ext8u_i32(src2hi, src2);
1489
1490 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1491 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1492 tcg_gen_and_i32(crf, src2lo, src2hi);
1493
1494 if (ctx->opcode & 0x00200000) {
1495 tcg_gen_shri_i32(src2, src2, 8);
1496 tcg_gen_ext8u_i32(src2lo, src2);
1497 tcg_gen_shri_i32(src2, src2, 8);
1498 tcg_gen_ext8u_i32(src2hi, src2);
1499 tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1500 tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1501 tcg_gen_and_i32(src2lo, src2lo, src2hi);
1502 tcg_gen_or_i32(crf, crf, src2lo);
1503 }
1504 tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1505 }
1506
1507 #if defined(TARGET_PPC64)
1508 /* cmpeqb */
1509 static void gen_cmpeqb(DisasContext *ctx)
1510 {
1511 gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1512 cpu_gpr[rB(ctx->opcode)]);
1513 }
1514 #endif
1515
1516 /* isel (PowerPC 2.03 specification) */
1517 static void gen_isel(DisasContext *ctx)
1518 {
1519 uint32_t bi = rC(ctx->opcode);
1520 uint32_t mask = 0x08 >> (bi & 0x03);
1521 TCGv t0 = tcg_temp_new();
1522 TCGv zr;
1523
1524 tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1525 tcg_gen_andi_tl(t0, t0, mask);
1526
1527 zr = tcg_constant_tl(0);
1528 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1529 rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1530 cpu_gpr[rB(ctx->opcode)]);
1531 }
1532
1533 /* cmpb: PowerPC 2.05 specification */
1534 static void gen_cmpb(DisasContext *ctx)
1535 {
1536 gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1537 cpu_gpr[rB(ctx->opcode)]);
1538 }
1539
1540 /*** Integer arithmetic ***/
1541
1542 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1543 TCGv arg1, TCGv arg2, int sub)
1544 {
1545 TCGv t0 = tcg_temp_new();
1546
1547 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1548 tcg_gen_xor_tl(t0, arg1, arg2);
1549 if (sub) {
1550 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1551 } else {
1552 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1553 }
1554 if (NARROW_MODE(ctx)) {
1555 tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1556 if (is_isa300(ctx)) {
1557 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1558 }
1559 } else {
1560 if (is_isa300(ctx)) {
1561 tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1562 }
1563 tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1564 }
1565 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1566 }
1567
1568 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1569 TCGv res, TCGv arg0, TCGv arg1,
1570 TCGv ca32, int sub)
1571 {
1572 TCGv t0;
1573
1574 if (!is_isa300(ctx)) {
1575 return;
1576 }
1577
1578 t0 = tcg_temp_new();
1579 if (sub) {
1580 tcg_gen_eqv_tl(t0, arg0, arg1);
1581 } else {
1582 tcg_gen_xor_tl(t0, arg0, arg1);
1583 }
1584 tcg_gen_xor_tl(t0, t0, res);
1585 tcg_gen_extract_tl(ca32, t0, 32, 1);
1586 }
1587
1588 /* Common add function */
1589 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1590 TCGv arg2, TCGv ca, TCGv ca32,
1591 bool add_ca, bool compute_ca,
1592 bool compute_ov, bool compute_rc0)
1593 {
1594 TCGv t0 = ret;
1595
1596 if (compute_ca || compute_ov) {
1597 t0 = tcg_temp_new();
1598 }
1599
1600 if (compute_ca) {
1601 if (NARROW_MODE(ctx)) {
1602 /*
1603 * Caution: a non-obvious corner case of the spec is that
1604 * we must produce the *entire* 64-bit addition, but
1605 * produce the carry into bit 32.
1606 */
1607 TCGv t1 = tcg_temp_new();
1608 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
1609 tcg_gen_add_tl(t0, arg1, arg2);
1610 if (add_ca) {
1611 tcg_gen_add_tl(t0, t0, ca);
1612 }
1613 tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */
1614 tcg_gen_extract_tl(ca, ca, 32, 1);
1615 if (is_isa300(ctx)) {
1616 tcg_gen_mov_tl(ca32, ca);
1617 }
1618 } else {
1619 TCGv zero = tcg_constant_tl(0);
1620 if (add_ca) {
1621 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1622 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1623 } else {
1624 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1625 }
1626 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1627 }
1628 } else {
1629 tcg_gen_add_tl(t0, arg1, arg2);
1630 if (add_ca) {
1631 tcg_gen_add_tl(t0, t0, ca);
1632 }
1633 }
1634
1635 if (compute_ov) {
1636 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1637 }
1638 if (unlikely(compute_rc0)) {
1639 gen_set_Rc0(ctx, t0);
1640 }
1641
1642 if (t0 != ret) {
1643 tcg_gen_mov_tl(ret, t0);
1644 }
1645 }
1646 /* Add functions with two operands */
1647 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \
1648 static void glue(gen_, name)(DisasContext *ctx) \
1649 { \
1650 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
1651 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1652 ca, glue(ca, 32), \
1653 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1654 }
1655 /* Add functions with one operand and one immediate */
1656 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \
1657 add_ca, compute_ca, compute_ov) \
1658 static void glue(gen_, name)(DisasContext *ctx) \
1659 { \
1660 TCGv t0 = tcg_constant_tl(const_val); \
1661 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
1662 cpu_gpr[rA(ctx->opcode)], t0, \
1663 ca, glue(ca, 32), \
1664 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
1665 }
1666
1667 /* add add. addo addo. */
1668 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1669 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1670 /* addc addc. addco addco. */
1671 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1672 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1673 /* adde adde. addeo addeo. */
1674 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1675 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1676 /* addme addme. addmeo addmeo. */
1677 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1678 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1679 /* addex */
1680 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1681 /* addze addze. addzeo addzeo.*/
1682 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1683 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1684 /* addic addic.*/
1685 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1686 {
1687 TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
1688 gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1689 c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1690 }
1691
1692 static void gen_addic(DisasContext *ctx)
1693 {
1694 gen_op_addic(ctx, 0);
1695 }
1696
1697 static void gen_addic_(DisasContext *ctx)
1698 {
1699 gen_op_addic(ctx, 1);
1700 }
1701
1702 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1703 TCGv arg2, int sign, int compute_ov)
1704 {
1705 TCGv_i32 t0 = tcg_temp_new_i32();
1706 TCGv_i32 t1 = tcg_temp_new_i32();
1707 TCGv_i32 t2 = tcg_temp_new_i32();
1708 TCGv_i32 t3 = tcg_temp_new_i32();
1709
1710 tcg_gen_trunc_tl_i32(t0, arg1);
1711 tcg_gen_trunc_tl_i32(t1, arg2);
1712 if (sign) {
1713 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1714 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1715 tcg_gen_and_i32(t2, t2, t3);
1716 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1717 tcg_gen_or_i32(t2, t2, t3);
1718 tcg_gen_movi_i32(t3, 0);
1719 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1720 tcg_gen_div_i32(t3, t0, t1);
1721 tcg_gen_extu_i32_tl(ret, t3);
1722 } else {
1723 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1724 tcg_gen_movi_i32(t3, 0);
1725 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1726 tcg_gen_divu_i32(t3, t0, t1);
1727 tcg_gen_extu_i32_tl(ret, t3);
1728 }
1729 if (compute_ov) {
1730 tcg_gen_extu_i32_tl(cpu_ov, t2);
1731 if (is_isa300(ctx)) {
1732 tcg_gen_extu_i32_tl(cpu_ov32, t2);
1733 }
1734 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1735 }
1736
1737 if (unlikely(Rc(ctx->opcode) != 0)) {
1738 gen_set_Rc0(ctx, ret);
1739 }
1740 }
1741 /* Div functions */
1742 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
1743 static void glue(gen_, name)(DisasContext *ctx) \
1744 { \
1745 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
1746 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1747 sign, compute_ov); \
1748 }
1749 /* divwu divwu. divwuo divwuo. */
1750 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1751 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1752 /* divw divw. divwo divwo. */
1753 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1754 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1755
1756 /* div[wd]eu[o][.] */
1757 #define GEN_DIVE(name, hlpr, compute_ov) \
1758 static void gen_##name(DisasContext *ctx) \
1759 { \
1760 TCGv_i32 t0 = tcg_constant_i32(compute_ov); \
1761 gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
1762 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1763 if (unlikely(Rc(ctx->opcode) != 0)) { \
1764 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
1765 } \
1766 }
1767
1768 GEN_DIVE(divweu, divweu, 0);
1769 GEN_DIVE(divweuo, divweu, 1);
1770 GEN_DIVE(divwe, divwe, 0);
1771 GEN_DIVE(divweo, divwe, 1);
1772
1773 #if defined(TARGET_PPC64)
1774 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1775 TCGv arg2, int sign, int compute_ov)
1776 {
1777 TCGv_i64 t0 = tcg_temp_new_i64();
1778 TCGv_i64 t1 = tcg_temp_new_i64();
1779 TCGv_i64 t2 = tcg_temp_new_i64();
1780 TCGv_i64 t3 = tcg_temp_new_i64();
1781
1782 tcg_gen_mov_i64(t0, arg1);
1783 tcg_gen_mov_i64(t1, arg2);
1784 if (sign) {
1785 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1786 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1787 tcg_gen_and_i64(t2, t2, t3);
1788 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1789 tcg_gen_or_i64(t2, t2, t3);
1790 tcg_gen_movi_i64(t3, 0);
1791 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1792 tcg_gen_div_i64(ret, t0, t1);
1793 } else {
1794 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1795 tcg_gen_movi_i64(t3, 0);
1796 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1797 tcg_gen_divu_i64(ret, t0, t1);
1798 }
1799 if (compute_ov) {
1800 tcg_gen_mov_tl(cpu_ov, t2);
1801 if (is_isa300(ctx)) {
1802 tcg_gen_mov_tl(cpu_ov32, t2);
1803 }
1804 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1805 }
1806
1807 if (unlikely(Rc(ctx->opcode) != 0)) {
1808 gen_set_Rc0(ctx, ret);
1809 }
1810 }
1811
1812 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
1813 static void glue(gen_, name)(DisasContext *ctx) \
1814 { \
1815 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
1816 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1817 sign, compute_ov); \
1818 }
1819 /* divdu divdu. divduo divduo. */
1820 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1821 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1822 /* divd divd. divdo divdo. */
1823 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1824 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1825
1826 GEN_DIVE(divdeu, divdeu, 0);
1827 GEN_DIVE(divdeuo, divdeu, 1);
1828 GEN_DIVE(divde, divde, 0);
1829 GEN_DIVE(divdeo, divde, 1);
1830 #endif
1831
1832 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1833 TCGv arg2, int sign)
1834 {
1835 TCGv_i32 t0 = tcg_temp_new_i32();
1836 TCGv_i32 t1 = tcg_temp_new_i32();
1837
1838 tcg_gen_trunc_tl_i32(t0, arg1);
1839 tcg_gen_trunc_tl_i32(t1, arg2);
1840 if (sign) {
1841 TCGv_i32 t2 = tcg_temp_new_i32();
1842 TCGv_i32 t3 = tcg_temp_new_i32();
1843 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1844 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1845 tcg_gen_and_i32(t2, t2, t3);
1846 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1847 tcg_gen_or_i32(t2, t2, t3);
1848 tcg_gen_movi_i32(t3, 0);
1849 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1850 tcg_gen_rem_i32(t3, t0, t1);
1851 tcg_gen_ext_i32_tl(ret, t3);
1852 } else {
1853 TCGv_i32 t2 = tcg_constant_i32(1);
1854 TCGv_i32 t3 = tcg_constant_i32(0);
1855 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1856 tcg_gen_remu_i32(t0, t0, t1);
1857 tcg_gen_extu_i32_tl(ret, t0);
1858 }
1859 }
1860
1861 #define GEN_INT_ARITH_MODW(name, opc3, sign) \
1862 static void glue(gen_, name)(DisasContext *ctx) \
1863 { \
1864 gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
1865 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1866 sign); \
1867 }
1868
1869 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1870 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1871
1872 #if defined(TARGET_PPC64)
1873 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1874 TCGv arg2, int sign)
1875 {
1876 TCGv_i64 t0 = tcg_temp_new_i64();
1877 TCGv_i64 t1 = tcg_temp_new_i64();
1878
1879 tcg_gen_mov_i64(t0, arg1);
1880 tcg_gen_mov_i64(t1, arg2);
1881 if (sign) {
1882 TCGv_i64 t2 = tcg_temp_new_i64();
1883 TCGv_i64 t3 = tcg_temp_new_i64();
1884 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1885 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1886 tcg_gen_and_i64(t2, t2, t3);
1887 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1888 tcg_gen_or_i64(t2, t2, t3);
1889 tcg_gen_movi_i64(t3, 0);
1890 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1891 tcg_gen_rem_i64(ret, t0, t1);
1892 } else {
1893 TCGv_i64 t2 = tcg_constant_i64(1);
1894 TCGv_i64 t3 = tcg_constant_i64(0);
1895 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1896 tcg_gen_remu_i64(ret, t0, t1);
1897 }
1898 }
1899
1900 #define GEN_INT_ARITH_MODD(name, opc3, sign) \
1901 static void glue(gen_, name)(DisasContext *ctx) \
1902 { \
1903 gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
1904 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
1905 sign); \
1906 }
1907
1908 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1909 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1910 #endif
1911
1912 /* mulhw mulhw. */
1913 static void gen_mulhw(DisasContext *ctx)
1914 {
1915 TCGv_i32 t0 = tcg_temp_new_i32();
1916 TCGv_i32 t1 = tcg_temp_new_i32();
1917
1918 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1919 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1920 tcg_gen_muls2_i32(t0, t1, t0, t1);
1921 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1922 if (unlikely(Rc(ctx->opcode) != 0)) {
1923 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1924 }
1925 }
1926
1927 /* mulhwu mulhwu. */
1928 static void gen_mulhwu(DisasContext *ctx)
1929 {
1930 TCGv_i32 t0 = tcg_temp_new_i32();
1931 TCGv_i32 t1 = tcg_temp_new_i32();
1932
1933 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1934 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1935 tcg_gen_mulu2_i32(t0, t1, t0, t1);
1936 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1937 if (unlikely(Rc(ctx->opcode) != 0)) {
1938 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1939 }
1940 }
1941
1942 /* mullw mullw. */
1943 static void gen_mullw(DisasContext *ctx)
1944 {
1945 #if defined(TARGET_PPC64)
1946 TCGv_i64 t0, t1;
1947 t0 = tcg_temp_new_i64();
1948 t1 = tcg_temp_new_i64();
1949 tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1950 tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1951 tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1952 #else
1953 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1954 cpu_gpr[rB(ctx->opcode)]);
1955 #endif
1956 if (unlikely(Rc(ctx->opcode) != 0)) {
1957 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1958 }
1959 }
1960
1961 /* mullwo mullwo. */
1962 static void gen_mullwo(DisasContext *ctx)
1963 {
1964 TCGv_i32 t0 = tcg_temp_new_i32();
1965 TCGv_i32 t1 = tcg_temp_new_i32();
1966
1967 tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1968 tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1969 tcg_gen_muls2_i32(t0, t1, t0, t1);
1970 #if defined(TARGET_PPC64)
1971 tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1972 #else
1973 tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1974 #endif
1975
1976 tcg_gen_sari_i32(t0, t0, 31);
1977 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1978 tcg_gen_extu_i32_tl(cpu_ov, t0);
1979 if (is_isa300(ctx)) {
1980 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1981 }
1982 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1983
1984 if (unlikely(Rc(ctx->opcode) != 0)) {
1985 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1986 }
1987 }
1988
1989 /* mulli */
1990 static void gen_mulli(DisasContext *ctx)
1991 {
1992 tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1993 SIMM(ctx->opcode));
1994 }
1995
1996 #if defined(TARGET_PPC64)
1997 /* mulhd mulhd. */
1998 static void gen_mulhd(DisasContext *ctx)
1999 {
2000 TCGv lo = tcg_temp_new();
2001 tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2002 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2003 if (unlikely(Rc(ctx->opcode) != 0)) {
2004 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2005 }
2006 }
2007
2008 /* mulhdu mulhdu. */
2009 static void gen_mulhdu(DisasContext *ctx)
2010 {
2011 TCGv lo = tcg_temp_new();
2012 tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2013 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2014 if (unlikely(Rc(ctx->opcode) != 0)) {
2015 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2016 }
2017 }
2018
2019 /* mulld mulld. */
2020 static void gen_mulld(DisasContext *ctx)
2021 {
2022 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2023 cpu_gpr[rB(ctx->opcode)]);
2024 if (unlikely(Rc(ctx->opcode) != 0)) {
2025 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2026 }
2027 }
2028
2029 /* mulldo mulldo. */
2030 static void gen_mulldo(DisasContext *ctx)
2031 {
2032 TCGv_i64 t0 = tcg_temp_new_i64();
2033 TCGv_i64 t1 = tcg_temp_new_i64();
2034
2035 tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
2036 cpu_gpr[rB(ctx->opcode)]);
2037 tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
2038
2039 tcg_gen_sari_i64(t0, t0, 63);
2040 tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
2041 if (is_isa300(ctx)) {
2042 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2043 }
2044 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2045
2046 if (unlikely(Rc(ctx->opcode) != 0)) {
2047 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2048 }
2049 }
2050 #endif
2051
2052 /* Common subf function */
2053 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2054 TCGv arg2, bool add_ca, bool compute_ca,
2055 bool compute_ov, bool compute_rc0)
2056 {
2057 TCGv t0 = ret;
2058
2059 if (compute_ca || compute_ov) {
2060 t0 = tcg_temp_new();
2061 }
2062
2063 if (compute_ca) {
2064 /* dest = ~arg1 + arg2 [+ ca]. */
2065 if (NARROW_MODE(ctx)) {
2066 /*
2067 * Caution: a non-obvious corner case of the spec is that
2068 * we must produce the *entire* 64-bit addition, but
2069 * produce the carry into bit 32.
2070 */
2071 TCGv inv1 = tcg_temp_new();
2072 TCGv t1 = tcg_temp_new();
2073 tcg_gen_not_tl(inv1, arg1);
2074 if (add_ca) {
2075 tcg_gen_add_tl(t0, arg2, cpu_ca);
2076 } else {
2077 tcg_gen_addi_tl(t0, arg2, 1);
2078 }
2079 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
2080 tcg_gen_add_tl(t0, t0, inv1);
2081 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
2082 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2083 if (is_isa300(ctx)) {
2084 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2085 }
2086 } else if (add_ca) {
2087 TCGv zero, inv1 = tcg_temp_new();
2088 tcg_gen_not_tl(inv1, arg1);
2089 zero = tcg_constant_tl(0);
2090 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2091 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2092 gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2093 } else {
2094 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2095 tcg_gen_sub_tl(t0, arg2, arg1);
2096 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2097 }
2098 } else if (add_ca) {
2099 /*
2100 * Since we're ignoring carry-out, we can simplify the
2101 * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2102 */
2103 tcg_gen_sub_tl(t0, arg2, arg1);
2104 tcg_gen_add_tl(t0, t0, cpu_ca);
2105 tcg_gen_subi_tl(t0, t0, 1);
2106 } else {
2107 tcg_gen_sub_tl(t0, arg2, arg1);
2108 }
2109
2110 if (compute_ov) {
2111 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2112 }
2113 if (unlikely(compute_rc0)) {
2114 gen_set_Rc0(ctx, t0);
2115 }
2116
2117 if (t0 != ret) {
2118 tcg_gen_mov_tl(ret, t0);
2119 }
2120 }
2121 /* Sub functions with Two operands functions */
2122 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
2123 static void glue(gen_, name)(DisasContext *ctx) \
2124 { \
2125 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
2126 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
2127 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
2128 }
2129 /* Sub functions with one operand and one immediate */
2130 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
2131 add_ca, compute_ca, compute_ov) \
2132 static void glue(gen_, name)(DisasContext *ctx) \
2133 { \
2134 TCGv t0 = tcg_constant_tl(const_val); \
2135 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
2136 cpu_gpr[rA(ctx->opcode)], t0, \
2137 add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
2138 }
2139 /* subf subf. subfo subfo. */
2140 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2141 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2142 /* subfc subfc. subfco subfco. */
2143 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2144 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2145 /* subfe subfe. subfeo subfo. */
2146 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2147 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2148 /* subfme subfme. subfmeo subfmeo. */
2149 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2150 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2151 /* subfze subfze. subfzeo subfzeo.*/
2152 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2153 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2154
2155 /* subfic */
2156 static void gen_subfic(DisasContext *ctx)
2157 {
2158 TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
2159 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2160 c, 0, 1, 0, 0);
2161 }
2162
2163 /* neg neg. nego nego. */
2164 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2165 {
2166 TCGv zero = tcg_constant_tl(0);
2167 gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2168 zero, 0, 0, compute_ov, Rc(ctx->opcode));
2169 }
2170
2171 static void gen_neg(DisasContext *ctx)
2172 {
2173 tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2174 if (unlikely(Rc(ctx->opcode))) {
2175 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2176 }
2177 }
2178
2179 static void gen_nego(DisasContext *ctx)
2180 {
2181 gen_op_arith_neg(ctx, 1);
2182 }
2183
2184 /*** Integer logical ***/
2185 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
2186 static void glue(gen_, name)(DisasContext *ctx) \
2187 { \
2188 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
2189 cpu_gpr[rB(ctx->opcode)]); \
2190 if (unlikely(Rc(ctx->opcode) != 0)) \
2191 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
2192 }
2193
2194 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
2195 static void glue(gen_, name)(DisasContext *ctx) \
2196 { \
2197 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
2198 if (unlikely(Rc(ctx->opcode) != 0)) \
2199 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
2200 }
2201
2202 /* and & and. */
2203 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2204 /* andc & andc. */
2205 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2206
2207 /* andi. */
2208 static void gen_andi_(DisasContext *ctx)
2209 {
2210 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2211 UIMM(ctx->opcode));
2212 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2213 }
2214
2215 /* andis. */
2216 static void gen_andis_(DisasContext *ctx)
2217 {
2218 tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2219 UIMM(ctx->opcode) << 16);
2220 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2221 }
2222
2223 /* cntlzw */
2224 static void gen_cntlzw(DisasContext *ctx)
2225 {
2226 TCGv_i32 t = tcg_temp_new_i32();
2227
2228 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2229 tcg_gen_clzi_i32(t, t, 32);
2230 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2231
2232 if (unlikely(Rc(ctx->opcode) != 0)) {
2233 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2234 }
2235 }
2236
2237 /* cnttzw */
2238 static void gen_cnttzw(DisasContext *ctx)
2239 {
2240 TCGv_i32 t = tcg_temp_new_i32();
2241
2242 tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2243 tcg_gen_ctzi_i32(t, t, 32);
2244 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2245
2246 if (unlikely(Rc(ctx->opcode) != 0)) {
2247 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2248 }
2249 }
2250
2251 /* eqv & eqv. */
2252 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2253 /* extsb & extsb. */
2254 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2255 /* extsh & extsh. */
2256 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2257 /* nand & nand. */
2258 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2259 /* nor & nor. */
2260 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2261
2262 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2263 static void gen_pause(DisasContext *ctx)
2264 {
2265 TCGv_i32 t0 = tcg_constant_i32(0);
2266 tcg_gen_st_i32(t0, cpu_env,
2267 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2268
2269 /* Stop translation, this gives other CPUs a chance to run */
2270 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2271 }
2272 #endif /* defined(TARGET_PPC64) */
2273
2274 /* or & or. */
2275 static void gen_or(DisasContext *ctx)
2276 {
2277 int rs, ra, rb;
2278
2279 rs = rS(ctx->opcode);
2280 ra = rA(ctx->opcode);
2281 rb = rB(ctx->opcode);
2282 /* Optimisation for mr. ri case */
2283 if (rs != ra || rs != rb) {
2284 if (rs != rb) {
2285 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2286 } else {
2287 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2288 }
2289 if (unlikely(Rc(ctx->opcode) != 0)) {
2290 gen_set_Rc0(ctx, cpu_gpr[ra]);
2291 }
2292 } else if (unlikely(Rc(ctx->opcode) != 0)) {
2293 gen_set_Rc0(ctx, cpu_gpr[rs]);
2294 #if defined(TARGET_PPC64)
2295 } else if (rs != 0) { /* 0 is nop */
2296 int prio = 0;
2297
2298 switch (rs) {
2299 case 1:
2300 /* Set process priority to low */
2301 prio = 2;
2302 break;
2303 case 6:
2304 /* Set process priority to medium-low */
2305 prio = 3;
2306 break;
2307 case 2:
2308 /* Set process priority to normal */
2309 prio = 4;
2310 break;
2311 #if !defined(CONFIG_USER_ONLY)
2312 case 31:
2313 if (!ctx->pr) {
2314 /* Set process priority to very low */
2315 prio = 1;
2316 }
2317 break;
2318 case 5:
2319 if (!ctx->pr) {
2320 /* Set process priority to medium-hight */
2321 prio = 5;
2322 }
2323 break;
2324 case 3:
2325 if (!ctx->pr) {
2326 /* Set process priority to high */
2327 prio = 6;
2328 }
2329 break;
2330 case 7:
2331 if (ctx->hv && !ctx->pr) {
2332 /* Set process priority to very high */
2333 prio = 7;
2334 }
2335 break;
2336 #endif
2337 default:
2338 break;
2339 }
2340 if (prio) {
2341 TCGv t0 = tcg_temp_new();
2342 gen_load_spr(t0, SPR_PPR);
2343 tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2344 tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2345 gen_store_spr(SPR_PPR, t0);
2346 }
2347 #if !defined(CONFIG_USER_ONLY)
2348 /*
2349 * Pause out of TCG otherwise spin loops with smt_low eat too
2350 * much CPU and the kernel hangs. This applies to all
2351 * encodings other than no-op, e.g., miso(rs=26), yield(27),
2352 * mdoio(29), mdoom(30), and all currently undefined.
2353 */
2354 gen_pause(ctx);
2355 #endif
2356 #endif
2357 }
2358 }
2359 /* orc & orc. */
2360 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2361
2362 /* xor & xor. */
2363 static void gen_xor(DisasContext *ctx)
2364 {
2365 /* Optimisation for "set to zero" case */
2366 if (rS(ctx->opcode) != rB(ctx->opcode)) {
2367 tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2368 cpu_gpr[rB(ctx->opcode)]);
2369 } else {
2370 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2371 }
2372 if (unlikely(Rc(ctx->opcode) != 0)) {
2373 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2374 }
2375 }
2376
2377 /* ori */
2378 static void gen_ori(DisasContext *ctx)
2379 {
2380 target_ulong uimm = UIMM(ctx->opcode);
2381
2382 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2383 return;
2384 }
2385 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2386 }
2387
2388 /* oris */
2389 static void gen_oris(DisasContext *ctx)
2390 {
2391 target_ulong uimm = UIMM(ctx->opcode);
2392
2393 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2394 /* NOP */
2395 return;
2396 }
2397 tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2398 uimm << 16);
2399 }
2400
2401 /* xori */
2402 static void gen_xori(DisasContext *ctx)
2403 {
2404 target_ulong uimm = UIMM(ctx->opcode);
2405
2406 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2407 /* NOP */
2408 return;
2409 }
2410 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2411 }
2412
2413 /* xoris */
2414 static void gen_xoris(DisasContext *ctx)
2415 {
2416 target_ulong uimm = UIMM(ctx->opcode);
2417
2418 if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2419 /* NOP */
2420 return;
2421 }
2422 tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2423 uimm << 16);
2424 }
2425
2426 /* popcntb : PowerPC 2.03 specification */
2427 static void gen_popcntb(DisasContext *ctx)
2428 {
2429 gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2430 }
2431
2432 static void gen_popcntw(DisasContext *ctx)
2433 {
2434 #if defined(TARGET_PPC64)
2435 gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2436 #else
2437 tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2438 #endif
2439 }
2440
2441 #if defined(TARGET_PPC64)
2442 /* popcntd: PowerPC 2.06 specification */
2443 static void gen_popcntd(DisasContext *ctx)
2444 {
2445 tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2446 }
2447 #endif
2448
2449 /* prtyw: PowerPC 2.05 specification */
2450 static void gen_prtyw(DisasContext *ctx)
2451 {
2452 TCGv ra = cpu_gpr[rA(ctx->opcode)];
2453 TCGv rs = cpu_gpr[rS(ctx->opcode)];
2454 TCGv t0 = tcg_temp_new();
2455 tcg_gen_shri_tl(t0, rs, 16);
2456 tcg_gen_xor_tl(ra, rs, t0);
2457 tcg_gen_shri_tl(t0, ra, 8);
2458 tcg_gen_xor_tl(ra, ra, t0);
2459 tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2460 }
2461
2462 #if defined(TARGET_PPC64)
2463 /* prtyd: PowerPC 2.05 specification */
2464 static void gen_prtyd(DisasContext *ctx)
2465 {
2466 TCGv ra = cpu_gpr[rA(ctx->opcode)];
2467 TCGv rs = cpu_gpr[rS(ctx->opcode)];
2468 TCGv t0 = tcg_temp_new();
2469 tcg_gen_shri_tl(t0, rs, 32);
2470 tcg_gen_xor_tl(ra, rs, t0);
2471 tcg_gen_shri_tl(t0, ra, 16);
2472 tcg_gen_xor_tl(ra, ra, t0);
2473 tcg_gen_shri_tl(t0, ra, 8);
2474 tcg_gen_xor_tl(ra, ra, t0);
2475 tcg_gen_andi_tl(ra, ra, 1);
2476 }
2477 #endif
2478
2479 #if defined(TARGET_PPC64)
2480 /* bpermd */
2481 static void gen_bpermd(DisasContext *ctx)
2482 {
2483 gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2484 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2485 }
2486 #endif
2487
2488 #if defined(TARGET_PPC64)
2489 /* extsw & extsw. */
2490 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2491
2492 /* cntlzd */
2493 static void gen_cntlzd(DisasContext *ctx)
2494 {
2495 tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2496 if (unlikely(Rc(ctx->opcode) != 0)) {
2497 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2498 }
2499 }
2500
2501 /* cnttzd */
2502 static void gen_cnttzd(DisasContext *ctx)
2503 {
2504 tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2505 if (unlikely(Rc(ctx->opcode) != 0)) {
2506 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2507 }
2508 }
2509
2510 /* darn */
2511 static void gen_darn(DisasContext *ctx)
2512 {
2513 int l = L(ctx->opcode);
2514
2515 if (l > 2) {
2516 tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2517 } else {
2518 translator_io_start(&ctx->base);
2519 if (l == 0) {
2520 gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2521 } else {
2522 /* Return 64-bit random for both CRN and RRN */
2523 gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2524 }
2525 }
2526 }
2527 #endif
2528
2529 /*** Integer rotate ***/
2530
2531 /* rlwimi & rlwimi. */
2532 static void gen_rlwimi(DisasContext *ctx)
2533 {
2534 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2535 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2536 uint32_t sh = SH(ctx->opcode);
2537 uint32_t mb = MB(ctx->opcode);
2538 uint32_t me = ME(ctx->opcode);
2539
2540 if (sh == (31 - me) && mb <= me) {
2541 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2542 } else {
2543 target_ulong mask;
2544 bool mask_in_32b = true;
2545 TCGv t1;
2546
2547 #if defined(TARGET_PPC64)
2548 mb += 32;
2549 me += 32;
2550 #endif
2551 mask = MASK(mb, me);
2552
2553 #if defined(TARGET_PPC64)
2554 if (mask > 0xffffffffu) {
2555 mask_in_32b = false;
2556 }
2557 #endif
2558 t1 = tcg_temp_new();
2559 if (mask_in_32b) {
2560 TCGv_i32 t0 = tcg_temp_new_i32();
2561 tcg_gen_trunc_tl_i32(t0, t_rs);
2562 tcg_gen_rotli_i32(t0, t0, sh);
2563 tcg_gen_extu_i32_tl(t1, t0);
2564 } else {
2565 #if defined(TARGET_PPC64)
2566 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2567 tcg_gen_rotli_i64(t1, t1, sh);
2568 #else
2569 g_assert_not_reached();
2570 #endif
2571 }
2572
2573 tcg_gen_andi_tl(t1, t1, mask);
2574 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2575 tcg_gen_or_tl(t_ra, t_ra, t1);
2576 }
2577 if (unlikely(Rc(ctx->opcode) != 0)) {
2578 gen_set_Rc0(ctx, t_ra);
2579 }
2580 }
2581
2582 /* rlwinm & rlwinm. */
2583 static void gen_rlwinm(DisasContext *ctx)
2584 {
2585 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2586 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2587 int sh = SH(ctx->opcode);
2588 int mb = MB(ctx->opcode);
2589 int me = ME(ctx->opcode);
2590 int len = me - mb + 1;
2591 int rsh = (32 - sh) & 31;
2592
2593 if (sh != 0 && len > 0 && me == (31 - sh)) {
2594 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2595 } else if (me == 31 && rsh + len <= 32) {
2596 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2597 } else {
2598 target_ulong mask;
2599 bool mask_in_32b = true;
2600 #if defined(TARGET_PPC64)
2601 mb += 32;
2602 me += 32;
2603 #endif
2604 mask = MASK(mb, me);
2605 #if defined(TARGET_PPC64)
2606 if (mask > 0xffffffffu) {
2607 mask_in_32b = false;
2608 }
2609 #endif
2610 if (mask_in_32b) {
2611 if (sh == 0) {
2612 tcg_gen_andi_tl(t_ra, t_rs, mask);
2613 } else {
2614 TCGv_i32 t0 = tcg_temp_new_i32();
2615 tcg_gen_trunc_tl_i32(t0, t_rs);
2616 tcg_gen_rotli_i32(t0, t0, sh);
2617 tcg_gen_andi_i32(t0, t0, mask);
2618 tcg_gen_extu_i32_tl(t_ra, t0);
2619 }
2620 } else {
2621 #if defined(TARGET_PPC64)
2622 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2623 tcg_gen_rotli_i64(t_ra, t_ra, sh);
2624 tcg_gen_andi_i64(t_ra, t_ra, mask);
2625 #else
2626 g_assert_not_reached();
2627 #endif
2628 }
2629 }
2630 if (unlikely(Rc(ctx->opcode) != 0)) {
2631 gen_set_Rc0(ctx, t_ra);
2632 }
2633 }
2634
2635 /* rlwnm & rlwnm. */
2636 static void gen_rlwnm(DisasContext *ctx)
2637 {
2638 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2639 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2640 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2641 uint32_t mb = MB(ctx->opcode);
2642 uint32_t me = ME(ctx->opcode);
2643 target_ulong mask;
2644 bool mask_in_32b = true;
2645
2646 #if defined(TARGET_PPC64)
2647 mb += 32;
2648 me += 32;
2649 #endif
2650 mask = MASK(mb, me);
2651
2652 #if defined(TARGET_PPC64)
2653 if (mask > 0xffffffffu) {
2654 mask_in_32b = false;
2655 }
2656 #endif
2657 if (mask_in_32b) {
2658 TCGv_i32 t0 = tcg_temp_new_i32();
2659 TCGv_i32 t1 = tcg_temp_new_i32();
2660 tcg_gen_trunc_tl_i32(t0, t_rb);
2661 tcg_gen_trunc_tl_i32(t1, t_rs);
2662 tcg_gen_andi_i32(t0, t0, 0x1f);
2663 tcg_gen_rotl_i32(t1, t1, t0);
2664 tcg_gen_extu_i32_tl(t_ra, t1);
2665 } else {
2666 #if defined(TARGET_PPC64)
2667 TCGv_i64 t0 = tcg_temp_new_i64();
2668 tcg_gen_andi_i64(t0, t_rb, 0x1f);
2669 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2670 tcg_gen_rotl_i64(t_ra, t_ra, t0);
2671 #else
2672 g_assert_not_reached();
2673 #endif
2674 }
2675
2676 tcg_gen_andi_tl(t_ra, t_ra, mask);
2677
2678 if (unlikely(Rc(ctx->opcode) != 0)) {
2679 gen_set_Rc0(ctx, t_ra);
2680 }
2681 }
2682
2683 #if defined(TARGET_PPC64)
2684 #define GEN_PPC64_R2(name, opc1, opc2) \
2685 static void glue(gen_, name##0)(DisasContext *ctx) \
2686 { \
2687 gen_##name(ctx, 0); \
2688 } \
2689 \
2690 static void glue(gen_, name##1)(DisasContext *ctx) \
2691 { \
2692 gen_##name(ctx, 1); \
2693 }
2694 #define GEN_PPC64_R4(name, opc1, opc2) \
2695 static void glue(gen_, name##0)(DisasContext *ctx) \
2696 { \
2697 gen_##name(ctx, 0, 0); \
2698 } \
2699 \
2700 static void glue(gen_, name##1)(DisasContext *ctx) \
2701 { \
2702 gen_##name(ctx, 0, 1); \
2703 } \
2704 \
2705 static void glue(gen_, name##2)(DisasContext *ctx) \
2706 { \
2707 gen_##name(ctx, 1, 0); \
2708 } \
2709 \
2710 static void glue(gen_, name##3)(DisasContext *ctx) \
2711 { \
2712 gen_##name(ctx, 1, 1); \
2713 }
2714
2715 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2716 {
2717 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2718 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2719 int len = me - mb + 1;
2720 int rsh = (64 - sh) & 63;
2721
2722 if (sh != 0 && len > 0 && me == (63 - sh)) {
2723 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2724 } else if (me == 63 && rsh + len <= 64) {
2725 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2726 } else {
2727 tcg_gen_rotli_tl(t_ra, t_rs, sh);
2728 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2729 }
2730 if (unlikely(Rc(ctx->opcode) != 0)) {
2731 gen_set_Rc0(ctx, t_ra);
2732 }
2733 }
2734
2735 /* rldicl - rldicl. */
2736 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2737 {
2738 uint32_t sh, mb;
2739
2740 sh = SH(ctx->opcode) | (shn << 5);
2741 mb = MB(ctx->opcode) | (mbn << 5);
2742 gen_rldinm(ctx, mb, 63, sh);
2743 }
2744 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2745
2746 /* rldicr - rldicr. */
2747 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2748 {
2749 uint32_t sh, me;
2750
2751 sh = SH(ctx->opcode) | (shn << 5);
2752 me = MB(ctx->opcode) | (men << 5);
2753 gen_rldinm(ctx, 0, me, sh);
2754 }
2755 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2756
2757 /* rldic - rldic. */
2758 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2759 {
2760 uint32_t sh, mb;
2761
2762 sh = SH(ctx->opcode) | (shn << 5);
2763 mb = MB(ctx->opcode) | (mbn << 5);
2764 gen_rldinm(ctx, mb, 63 - sh, sh);
2765 }
2766 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2767
2768 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2769 {
2770 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2771 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2772 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2773 TCGv t0;
2774
2775 t0 = tcg_temp_new();
2776 tcg_gen_andi_tl(t0, t_rb, 0x3f);
2777 tcg_gen_rotl_tl(t_ra, t_rs, t0);
2778
2779 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2780 if (unlikely(Rc(ctx->opcode) != 0)) {
2781 gen_set_Rc0(ctx, t_ra);
2782 }
2783 }
2784
2785 /* rldcl - rldcl. */
2786 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2787 {
2788 uint32_t mb;
2789
2790 mb = MB(ctx->opcode) | (mbn << 5);
2791 gen_rldnm(ctx, mb, 63);
2792 }
2793 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2794
2795 /* rldcr - rldcr. */
2796 static inline void gen_rldcr(DisasContext *ctx, int men)
2797 {
2798 uint32_t me;
2799
2800 me = MB(ctx->opcode) | (men << 5);
2801 gen_rldnm(ctx, 0, me);
2802 }
2803 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2804
2805 /* rldimi - rldimi. */
2806 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2807 {
2808 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2809 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2810 uint32_t sh = SH(ctx->opcode) | (shn << 5);
2811 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2812 uint32_t me = 63 - sh;
2813
2814 if (mb <= me) {
2815 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2816 } else {
2817 target_ulong mask = MASK(mb, me);
2818 TCGv t1 = tcg_temp_new();
2819
2820 tcg_gen_rotli_tl(t1, t_rs, sh);
2821 tcg_gen_andi_tl(t1, t1, mask);
2822 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2823 tcg_gen_or_tl(t_ra, t_ra, t1);
2824 }
2825 if (unlikely(Rc(ctx->opcode) != 0)) {
2826 gen_set_Rc0(ctx, t_ra);
2827 }
2828 }
2829 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2830 #endif
2831
2832 /*** Integer shift ***/
2833
2834 /* slw & slw. */
2835 static void gen_slw(DisasContext *ctx)
2836 {
2837 TCGv t0, t1;
2838
2839 t0 = tcg_temp_new();
2840 /* AND rS with a mask that is 0 when rB >= 0x20 */
2841 #if defined(TARGET_PPC64)
2842 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2843 tcg_gen_sari_tl(t0, t0, 0x3f);
2844 #else
2845 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2846 tcg_gen_sari_tl(t0, t0, 0x1f);
2847 #endif
2848 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2849 t1 = tcg_temp_new();
2850 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2851 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2852 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2853 if (unlikely(Rc(ctx->opcode) != 0)) {
2854 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2855 }
2856 }
2857
2858 /* sraw & sraw. */
2859 static void gen_sraw(DisasContext *ctx)
2860 {
2861 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2862 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2863 if (unlikely(Rc(ctx->opcode) != 0)) {
2864 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2865 }
2866 }
2867
2868 /* srawi & srawi. */
2869 static void gen_srawi(DisasContext *ctx)
2870 {
2871 int sh = SH(ctx->opcode);
2872 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2873 TCGv src = cpu_gpr[rS(ctx->opcode)];
2874 if (sh == 0) {
2875 tcg_gen_ext32s_tl(dst, src);
2876 tcg_gen_movi_tl(cpu_ca, 0);
2877 if (is_isa300(ctx)) {
2878 tcg_gen_movi_tl(cpu_ca32, 0);
2879 }
2880 } else {
2881 TCGv t0;
2882 tcg_gen_ext32s_tl(dst, src);
2883 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2884 t0 = tcg_temp_new();
2885 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2886 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2887 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2888 if (is_isa300(ctx)) {
2889 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2890 }
2891 tcg_gen_sari_tl(dst, dst, sh);
2892 }
2893 if (unlikely(Rc(ctx->opcode) != 0)) {
2894 gen_set_Rc0(ctx, dst);
2895 }
2896 }
2897
2898 /* srw & srw. */
2899 static void gen_srw(DisasContext *ctx)
2900 {
2901 TCGv t0, t1;
2902
2903 t0 = tcg_temp_new();
2904 /* AND rS with a mask that is 0 when rB >= 0x20 */
2905 #if defined(TARGET_PPC64)
2906 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2907 tcg_gen_sari_tl(t0, t0, 0x3f);
2908 #else
2909 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2910 tcg_gen_sari_tl(t0, t0, 0x1f);
2911 #endif
2912 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2913 tcg_gen_ext32u_tl(t0, t0);
2914 t1 = tcg_temp_new();
2915 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2916 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2917 if (unlikely(Rc(ctx->opcode) != 0)) {
2918 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2919 }
2920 }
2921
2922 #if defined(TARGET_PPC64)
2923 /* sld & sld. */
2924 static void gen_sld(DisasContext *ctx)
2925 {
2926 TCGv t0, t1;
2927
2928 t0 = tcg_temp_new();
2929 /* AND rS with a mask that is 0 when rB >= 0x40 */
2930 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2931 tcg_gen_sari_tl(t0, t0, 0x3f);
2932 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2933 t1 = tcg_temp_new();
2934 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2935 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2936 if (unlikely(Rc(ctx->opcode) != 0)) {
2937 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2938 }
2939 }
2940
2941 /* srad & srad. */
2942 static void gen_srad(DisasContext *ctx)
2943 {
2944 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2945 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2946 if (unlikely(Rc(ctx->opcode) != 0)) {
2947 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2948 }
2949 }
2950 /* sradi & sradi. */
2951 static inline void gen_sradi(DisasContext *ctx, int n)
2952 {
2953 int sh = SH(ctx->opcode) + (n << 5);
2954 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2955 TCGv src = cpu_gpr[rS(ctx->opcode)];
2956 if (sh == 0) {
2957 tcg_gen_mov_tl(dst, src);
2958 tcg_gen_movi_tl(cpu_ca, 0);
2959 if (is_isa300(ctx)) {
2960 tcg_gen_movi_tl(cpu_ca32, 0);
2961 }
2962 } else {
2963 TCGv t0;
2964 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2965 t0 = tcg_temp_new();
2966 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2967 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2968 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2969 if (is_isa300(ctx)) {
2970 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2971 }
2972 tcg_gen_sari_tl(dst, src, sh);
2973 }
2974 if (unlikely(Rc(ctx->opcode) != 0)) {
2975 gen_set_Rc0(ctx, dst);
2976 }
2977 }
2978
2979 static void gen_sradi0(DisasContext *ctx)
2980 {
2981 gen_sradi(ctx, 0);
2982 }
2983
2984 static void gen_sradi1(DisasContext *ctx)
2985 {
2986 gen_sradi(ctx, 1);
2987 }
2988
2989 /* extswsli & extswsli. */
2990 static inline void gen_extswsli(DisasContext *ctx, int n)
2991 {
2992 int sh = SH(ctx->opcode) + (n << 5);
2993 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2994 TCGv src = cpu_gpr[rS(ctx->opcode)];
2995
2996 tcg_gen_ext32s_tl(dst, src);
2997 tcg_gen_shli_tl(dst, dst, sh);
2998 if (unlikely(Rc(ctx->opcode) != 0)) {
2999 gen_set_Rc0(ctx, dst);
3000 }
3001 }
3002
3003 static void gen_extswsli0(DisasContext *ctx)
3004 {
3005 gen_extswsli(ctx, 0);
3006 }
3007
3008 static void gen_extswsli1(DisasContext *ctx)
3009 {
3010 gen_extswsli(ctx, 1);
3011 }
3012
3013 /* srd & srd. */
3014 static void gen_srd(DisasContext *ctx)
3015 {
3016 TCGv t0, t1;
3017
3018 t0 = tcg_temp_new();
3019 /* AND rS with a mask that is 0 when rB >= 0x40 */
3020 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3021 tcg_gen_sari_tl(t0, t0, 0x3f);
3022 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3023 t1 = tcg_temp_new();
3024 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3025 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3026 if (unlikely(Rc(ctx->opcode) != 0)) {
3027 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3028 }
3029 }
3030 #endif
3031
3032 /*** Addressing modes ***/
3033 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
3034 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
3035 target_long maskl)
3036 {
3037 target_long simm = SIMM(ctx->opcode);
3038
3039 simm &= ~maskl;
3040 if (rA(ctx->opcode) == 0) {
3041 if (NARROW_MODE(ctx)) {
3042 simm = (uint32_t)simm;
3043 }
3044 tcg_gen_movi_tl(EA, simm);
3045 } else if (likely(simm != 0)) {
3046 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
3047 if (NARROW_MODE(ctx)) {
3048 tcg_gen_ext32u_tl(EA, EA);
3049 }
3050 } else {
3051 if (NARROW_MODE(ctx)) {
3052 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3053 } else {
3054 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3055 }
3056 }
3057 }
3058
3059 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3060 {
3061 if (rA(ctx->opcode) == 0) {
3062 if (NARROW_MODE(ctx)) {
3063 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3064 } else {
3065 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3066 }
3067 } else {
3068 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3069 if (NARROW_MODE(ctx)) {
3070 tcg_gen_ext32u_tl(EA, EA);
3071 }
3072 }
3073 }
3074
3075 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3076 {
3077 if (rA(ctx->opcode) == 0) {
3078 tcg_gen_movi_tl(EA, 0);
3079 } else if (NARROW_MODE(ctx)) {
3080 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3081 } else {
3082 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3083 }
3084 }
3085
3086 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3087 target_long val)
3088 {
3089 tcg_gen_addi_tl(ret, arg1, val);
3090 if (NARROW_MODE(ctx)) {
3091 tcg_gen_ext32u_tl(ret, ret);
3092 }
3093 }
3094
3095 static inline void gen_align_no_le(DisasContext *ctx)
3096 {
3097 gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3098 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3099 }
3100
3101 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3102 {
3103 TCGv ea = tcg_temp_new();
3104 if (ra) {
3105 tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3106 } else {
3107 tcg_gen_mov_tl(ea, displ);
3108 }
3109 if (NARROW_MODE(ctx)) {
3110 tcg_gen_ext32u_tl(ea, ea);
3111 }
3112 return ea;
3113 }
3114
3115 /*** Integer load ***/
3116 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3117 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3118
3119 #define GEN_QEMU_LOAD_TL(ldop, op) \
3120 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
3121 TCGv val, \
3122 TCGv addr) \
3123 { \
3124 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
3125 }
3126
3127 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
3128 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3129 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3130 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3131 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3132
3133 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3134 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3135
3136 #define GEN_QEMU_LOAD_64(ldop, op) \
3137 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
3138 TCGv_i64 val, \
3139 TCGv addr) \
3140 { \
3141 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
3142 }
3143
3144 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
3145 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3146 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3147 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3148 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ))
3149
3150 #if defined(TARGET_PPC64)
3151 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3152 #endif
3153
3154 #define GEN_QEMU_STORE_TL(stop, op) \
3155 static void glue(gen_qemu_, stop)(DisasContext *ctx, \
3156 TCGv val, \
3157 TCGv addr) \
3158 { \
3159 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
3160 }
3161
3162 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3163 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
3164 #endif
3165 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3166 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3167
3168 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3169 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3170
3171 #define GEN_QEMU_STORE_64(stop, op) \
3172 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
3173 TCGv_i64 val, \
3174 TCGv addr) \
3175 { \
3176 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
3177 }
3178
3179 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
3180 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3181 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3182 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3183
3184 #if defined(TARGET_PPC64)
3185 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3186 #endif
3187
3188 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
3189 static void glue(gen_, name##x)(DisasContext *ctx) \
3190 { \
3191 TCGv EA; \
3192 chk(ctx); \
3193 gen_set_access_type(ctx, ACCESS_INT); \
3194 EA = tcg_temp_new(); \
3195 gen_addr_reg_index(ctx, EA); \
3196 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
3197 }
3198
3199 #define GEN_LDX(name, ldop, opc2, opc3, type) \
3200 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3201
3202 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
3203 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3204
3205 #define GEN_LDEPX(name, ldop, opc2, opc3) \
3206 static void glue(gen_, name##epx)(DisasContext *ctx) \
3207 { \
3208 TCGv EA; \
3209 CHK_SV(ctx); \
3210 gen_set_access_type(ctx, ACCESS_INT); \
3211 EA = tcg_temp_new(); \
3212 gen_addr_reg_index(ctx, EA); \
3213 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3214 }
3215
3216 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3217 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3218 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3219 #if defined(TARGET_PPC64)
3220 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3221 #endif
3222
3223 #if defined(TARGET_PPC64)
3224 /* CI load/store variants */
3225 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3226 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3227 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3228 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3229 #endif
3230
3231 /*** Integer store ***/
3232 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
3233 static void glue(gen_, name##x)(DisasContext *ctx) \
3234 { \
3235 TCGv EA; \
3236 chk(ctx); \
3237 gen_set_access_type(ctx, ACCESS_INT); \
3238 EA = tcg_temp_new(); \
3239 gen_addr_reg_index(ctx, EA); \
3240 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
3241 }
3242 #define GEN_STX(name, stop, opc2, opc3, type) \
3243 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3244
3245 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
3246 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3247
3248 #define GEN_STEPX(name, stop, opc2, opc3) \
3249 static void glue(gen_, name##epx)(DisasContext *ctx) \
3250 { \
3251 TCGv EA; \
3252 CHK_SV(ctx); \
3253 gen_set_access_type(ctx, ACCESS_INT); \
3254 EA = tcg_temp_new(); \
3255 gen_addr_reg_index(ctx, EA); \
3256 tcg_gen_qemu_st_tl( \
3257 cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \
3258 }
3259
3260 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3261 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3262 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3263 #if defined(TARGET_PPC64)
3264 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3265 #endif
3266
3267 #if defined(TARGET_PPC64)
3268 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3269 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3270 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3271 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3272 #endif
3273 /*** Integer load and store with byte reverse ***/
3274
3275 /* lhbrx */
3276 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3277
3278 /* lwbrx */
3279 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3280
3281 #if defined(TARGET_PPC64)
3282 /* ldbrx */
3283 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3284 /* stdbrx */
3285 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3286 #endif /* TARGET_PPC64 */
3287
3288 /* sthbrx */
3289 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3290 /* stwbrx */
3291 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3292
3293 /*** Integer load and store multiple ***/
3294
3295 /* lmw */
3296 static void gen_lmw(DisasContext *ctx)
3297 {
3298 TCGv t0;
3299 TCGv_i32 t1;
3300
3301 if (ctx->le_mode) {
3302 gen_align_no_le(ctx);
3303 return;
3304 }
3305 gen_set_access_type(ctx, ACCESS_INT);
3306 t0 = tcg_temp_new();
3307 t1 = tcg_constant_i32(rD(ctx->opcode));
3308 gen_addr_imm_index(ctx, t0, 0);
3309 gen_helper_lmw(cpu_env, t0, t1);
3310 }
3311
3312 /* stmw */
3313 static void gen_stmw(DisasContext *ctx)
3314 {
3315 TCGv t0;
3316 TCGv_i32 t1;
3317
3318 if (ctx->le_mode) {
3319 gen_align_no_le(ctx);
3320 return;
3321 }
3322 gen_set_access_type(ctx, ACCESS_INT);
3323 t0 = tcg_temp_new();
3324 t1 = tcg_constant_i32(rS(ctx->opcode));
3325 gen_addr_imm_index(ctx, t0, 0);
3326 gen_helper_stmw(cpu_env, t0, t1);
3327 }
3328
3329 /*** Integer load and store strings ***/
3330
3331 /* lswi */
3332 /*
3333 * PowerPC32 specification says we must generate an exception if rA is
3334 * in the range of registers to be loaded. In an other hand, IBM says
3335 * this is valid, but rA won't be loaded. For now, I'll follow the
3336 * spec...
3337 */
3338 static void gen_lswi(DisasContext *ctx)
3339 {
3340 TCGv t0;
3341 TCGv_i32 t1, t2;
3342 int nb = NB(ctx->opcode);
3343 int start = rD(ctx->opcode);
3344 int ra = rA(ctx->opcode);
3345 int nr;
3346
3347 if (ctx->le_mode) {
3348 gen_align_no_le(ctx);
3349 return;
3350 }
3351 if (nb == 0) {
3352 nb = 32;
3353 }
3354 nr = DIV_ROUND_UP(nb, 4);
3355 if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3356 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3357 return;
3358 }
3359 gen_set_access_type(ctx, ACCESS_INT);
3360 t0 = tcg_temp_new();
3361 gen_addr_register(ctx, t0);
3362 t1 = tcg_constant_i32(nb);
3363 t2 = tcg_constant_i32(start);
3364 gen_helper_lsw(cpu_env, t0, t1, t2);
3365 }
3366
3367 /* lswx */
3368 static void gen_lswx(DisasContext *ctx)
3369 {
3370 TCGv t0;
3371 TCGv_i32 t1, t2, t3;
3372
3373 if (ctx->le_mode) {
3374 gen_align_no_le(ctx);
3375 return;
3376 }
3377 gen_set_access_type(ctx, ACCESS_INT);
3378 t0 = tcg_temp_new();
3379 gen_addr_reg_index(ctx, t0);
3380 t1 = tcg_constant_i32(rD(ctx->opcode));
3381 t2 = tcg_constant_i32(rA(ctx->opcode));
3382 t3 = tcg_constant_i32(rB(ctx->opcode));
3383 gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3384 }
3385
3386 /* stswi */
3387 static void gen_stswi(DisasContext *ctx)
3388 {
3389 TCGv t0;
3390 TCGv_i32 t1, t2;
3391 int nb = NB(ctx->opcode);
3392
3393 if (ctx->le_mode) {
3394 gen_align_no_le(ctx);
3395 return;
3396 }
3397 gen_set_access_type(ctx, ACCESS_INT);
3398 t0 = tcg_temp_new();
3399 gen_addr_register(ctx, t0);
3400 if (nb == 0) {
3401 nb = 32;
3402 }
3403 t1 = tcg_constant_i32(nb);
3404 t2 = tcg_constant_i32(rS(ctx->opcode));
3405 gen_helper_stsw(cpu_env, t0, t1, t2);
3406 }
3407
3408 /* stswx */
3409 static void gen_stswx(DisasContext *ctx)
3410 {
3411 TCGv t0;
3412 TCGv_i32 t1, t2;
3413
3414 if (ctx->le_mode) {
3415 gen_align_no_le(ctx);
3416 return;
3417 }
3418 gen_set_access_type(ctx, ACCESS_INT);
3419 t0 = tcg_temp_new();
3420 gen_addr_reg_index(ctx, t0);
3421 t1 = tcg_temp_new_i32();
3422 tcg_gen_trunc_tl_i32(t1, cpu_xer);
3423 tcg_gen_andi_i32(t1, t1, 0x7F);
3424 t2 = tcg_constant_i32(rS(ctx->opcode));
3425 gen_helper_stsw(cpu_env, t0, t1, t2);
3426 }
3427
3428 /*** Memory synchronisation ***/
3429 /* eieio */
3430 static void gen_eieio(DisasContext *ctx)
3431 {
3432 TCGBar bar = TCG_MO_ALL;
3433
3434 /*
3435 * eieio has complex semanitcs. It provides memory ordering between
3436 * operations in the set:
3437 * - loads from CI memory.
3438 * - stores to CI memory.
3439 * - stores to WT memory.
3440 *
3441 * It separately also orders memory for operations in the set:
3442 * - stores to cacheble memory.
3443 *
3444 * It also serializes instructions:
3445 * - dcbt and dcbst.
3446 *
3447 * It separately serializes:
3448 * - tlbie and tlbsync.
3449 *
3450 * And separately serializes:
3451 * - slbieg, slbiag, and slbsync.
3452 *
3453 * The end result is that CI memory ordering requires TCG_MO_ALL
3454 * and it is not possible to special-case more relaxed ordering for
3455 * cacheable accesses. TCG_BAR_SC is required to provide this
3456 * serialization.
3457 */
3458
3459 /*
3460 * POWER9 has a eieio instruction variant using bit 6 as a hint to
3461 * tell the CPU it is a store-forwarding barrier.
3462 */
3463 if (ctx->opcode & 0x2000000) {
3464 /*
3465 * ISA says that "Reserved fields in instructions are ignored
3466 * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3467 * as this is not an instruction software should be using,
3468 * complain to the user.
3469 */
3470 if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3471 qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3472 TARGET_FMT_lx "\n", ctx->cia);
3473 } else {
3474 bar = TCG_MO_ST_LD;
3475 }
3476 }
3477
3478 tcg_gen_mb(bar | TCG_BAR_SC);
3479 }
3480
3481 #if !defined(CONFIG_USER_ONLY)
3482 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3483 {
3484 TCGv_i32 t;
3485 TCGLabel *l;
3486
3487 if (!ctx->lazy_tlb_flush) {
3488 return;
3489 }
3490 l = gen_new_label();
3491 t = tcg_temp_new_i32();
3492 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3493 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3494 if (global) {
3495 gen_helper_check_tlb_flush_global(cpu_env);
3496 } else {
3497 gen_helper_check_tlb_flush_local(cpu_env);
3498 }
3499 gen_set_label(l);
3500 }
3501 #else
3502 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3503 #endif
3504
3505 /* isync */
3506 static void gen_isync(DisasContext *ctx)
3507 {
3508 /*
3509 * We need to check for a pending TLB flush. This can only happen in
3510 * kernel mode however so check MSR_PR
3511 */
3512 if (!ctx->pr) {
3513 gen_check_tlb_flush(ctx, false);
3514 }
3515 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3516 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3517 }
3518
3519 #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
3520
3521 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3522 {
3523 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3524 TCGv t0 = tcg_temp_new();
3525
3526 gen_set_access_type(ctx, ACCESS_RES);
3527 gen_addr_reg_index(ctx, t0);
3528 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3529 tcg_gen_mov_tl(cpu_reserve, t0);
3530 tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
3531 tcg_gen_mov_tl(cpu_reserve_val, gpr);
3532 }
3533
3534 #define LARX(name, memop) \
3535 static void gen_##name(DisasContext *ctx) \
3536 { \
3537 gen_load_locked(ctx, memop); \
3538 }
3539
3540 /* lwarx */
3541 LARX(lbarx, DEF_MEMOP(MO_UB))
3542 LARX(lharx, DEF_MEMOP(MO_UW))
3543 LARX(lwarx, DEF_MEMOP(MO_UL))
3544
3545 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3546 TCGv EA, TCGCond cond, int addend)
3547 {
3548 TCGv t = tcg_temp_new();
3549 TCGv t2 = tcg_temp_new();
3550 TCGv u = tcg_temp_new();
3551
3552 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3553 tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3554 tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3555 tcg_gen_addi_tl(u, t, addend);
3556
3557 /* E.g. for fetch and increment bounded... */
3558 /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3559 tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3560 tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3561
3562 /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3563 tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3564 tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3565 }
3566
3567 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3568 {
3569 uint32_t gpr_FC = FC(ctx->opcode);
3570 TCGv EA = tcg_temp_new();
3571 int rt = rD(ctx->opcode);
3572 bool need_serial;
3573 TCGv src, dst;
3574
3575 gen_addr_register(ctx, EA);
3576 dst = cpu_gpr[rt];
3577 src = cpu_gpr[(rt + 1) & 31];
3578
3579 need_serial = false;
3580 memop |= MO_ALIGN;
3581 switch (gpr_FC) {
3582 case 0: /* Fetch and add */
3583 tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3584 break;
3585 case 1: /* Fetch and xor */
3586 tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3587 break;
3588 case 2: /* Fetch and or */
3589 tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3590 break;
3591 case 3: /* Fetch and 'and' */
3592 tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3593 break;
3594 case 4: /* Fetch and max unsigned */
3595 tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3596 break;
3597 case 5: /* Fetch and max signed */
3598 tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3599 break;
3600 case 6: /* Fetch and min unsigned */
3601 tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3602 break;
3603 case 7: /* Fetch and min signed */
3604 tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3605 break;
3606 case 8: /* Swap */
3607 tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3608 break;
3609
3610 case 16: /* Compare and swap not equal */
3611 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3612 need_serial = true;
3613 } else {
3614 TCGv t0 = tcg_temp_new();
3615 TCGv t1 = tcg_temp_new();
3616
3617 tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3618 if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3619 tcg_gen_mov_tl(t1, src);
3620 } else {
3621 tcg_gen_ext32u_tl(t1, src);
3622 }
3623 tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3624 cpu_gpr[(rt + 2) & 31], t0);
3625 tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3626 tcg_gen_mov_tl(dst, t0);
3627 }
3628 break;
3629
3630 case 24: /* Fetch and increment bounded */
3631 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3632 need_serial = true;
3633 } else {
3634 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3635 }
3636 break;
3637 case 25: /* Fetch and increment equal */
3638 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3639 need_serial = true;
3640 } else {
3641 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3642 }
3643 break;
3644 case 28: /* Fetch and decrement bounded */
3645 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3646 need_serial = true;
3647 } else {
3648 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3649 }
3650 break;
3651
3652 default:
3653 /* invoke data storage error handler */
3654 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3655 }
3656
3657 if (need_serial) {
3658 /* Restart with exclusive lock. */
3659 gen_helper_exit_atomic(cpu_env);
3660 ctx->base.is_jmp = DISAS_NORETURN;
3661 }
3662 }
3663
3664 static void gen_lwat(DisasContext *ctx)
3665 {
3666 gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3667 }
3668
3669 #ifdef TARGET_PPC64
3670 static void gen_ldat(DisasContext *ctx)
3671 {
3672 gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3673 }
3674 #endif
3675
3676 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3677 {
3678 uint32_t gpr_FC = FC(ctx->opcode);
3679 TCGv EA = tcg_temp_new();
3680 TCGv src, discard;
3681
3682 gen_addr_register(ctx, EA);
3683 src = cpu_gpr[rD(ctx->opcode)];
3684 discard = tcg_temp_new();
3685
3686 memop |= MO_ALIGN;
3687 switch (gpr_FC) {
3688 case 0: /* add and Store */
3689 tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3690 break;
3691 case 1: /* xor and Store */
3692 tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3693 break;
3694 case 2: /* Or and Store */
3695 tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3696 break;
3697 case 3: /* 'and' and Store */
3698 tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3699 break;
3700 case 4: /* Store max unsigned */
3701 tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3702 break;
3703 case 5: /* Store max signed */
3704 tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3705 break;
3706 case 6: /* Store min unsigned */
3707 tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3708 break;
3709 case 7: /* Store min signed */
3710 tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3711 break;
3712 case 24: /* Store twin */
3713 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3714 /* Restart with exclusive lock. */
3715 gen_helper_exit_atomic(cpu_env);
3716 ctx->base.is_jmp = DISAS_NORETURN;
3717 } else {
3718 TCGv t = tcg_temp_new();
3719 TCGv t2 = tcg_temp_new();
3720 TCGv s = tcg_temp_new();
3721 TCGv s2 = tcg_temp_new();
3722 TCGv ea_plus_s = tcg_temp_new();
3723
3724 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3725 tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3726 tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3727 tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3728 tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3729 tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3730 tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3731 }
3732 break;
3733 default:
3734 /* invoke data storage error handler */
3735 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3736 }
3737 }
3738
3739 static void gen_stwat(DisasContext *ctx)
3740 {
3741 gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3742 }
3743
3744 #ifdef TARGET_PPC64
3745 static void gen_stdat(DisasContext *ctx)
3746 {
3747 gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3748 }
3749 #endif
3750
3751 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3752 {
3753 TCGLabel *lfail;
3754 TCGv EA;
3755 TCGv cr0;
3756 TCGv t0;
3757 int rs = rS(ctx->opcode);
3758
3759 lfail = gen_new_label();
3760 EA = tcg_temp_new();
3761 cr0 = tcg_temp_new();
3762 t0 = tcg_temp_new();
3763
3764 tcg_gen_mov_tl(cr0, cpu_so);
3765 gen_set_access_type(ctx, ACCESS_RES);
3766 gen_addr_reg_index(ctx, EA);
3767 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3768 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
3769
3770 tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3771 cpu_gpr[rs], ctx->mem_idx,
3772 DEF_MEMOP(memop) | MO_ALIGN);
3773 tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3774 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3775 tcg_gen_or_tl(cr0, cr0, t0);
3776
3777 gen_set_label(lfail);
3778 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3779 tcg_gen_movi_tl(cpu_reserve, -1);
3780 }
3781
3782 #define STCX(name, memop) \
3783 static void gen_##name(DisasContext *ctx) \
3784 { \
3785 gen_conditional_store(ctx, memop); \
3786 }
3787
3788 STCX(stbcx_, DEF_MEMOP(MO_UB))
3789 STCX(sthcx_, DEF_MEMOP(MO_UW))
3790 STCX(stwcx_, DEF_MEMOP(MO_UL))
3791
3792 #if defined(TARGET_PPC64)
3793 /* ldarx */
3794 LARX(ldarx, DEF_MEMOP(MO_UQ))
3795 /* stdcx. */
3796 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3797
3798 /* lqarx */
3799 static void gen_lqarx(DisasContext *ctx)
3800 {
3801 int rd = rD(ctx->opcode);
3802 TCGv EA, hi, lo;
3803 TCGv_i128 t16;
3804
3805 if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3806 (rd == rB(ctx->opcode)))) {
3807 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3808 return;
3809 }
3810
3811 gen_set_access_type(ctx, ACCESS_RES);
3812 EA = tcg_temp_new();
3813 gen_addr_reg_index(ctx, EA);
3814
3815 /* Note that the low part is always in RD+1, even in LE mode. */
3816 lo = cpu_gpr[rd + 1];
3817 hi = cpu_gpr[rd];
3818
3819 t16 = tcg_temp_new_i128();
3820 tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
3821 tcg_gen_extr_i128_i64(lo, hi, t16);
3822
3823 tcg_gen_mov_tl(cpu_reserve, EA);
3824 tcg_gen_movi_tl(cpu_reserve_length, 16);
3825 tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3826 tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3827 }
3828
3829 /* stqcx. */
3830 static void gen_stqcx_(DisasContext *ctx)
3831 {
3832 TCGLabel *lfail;
3833 TCGv EA, t0, t1;
3834 TCGv cr0;
3835 TCGv_i128 cmp, val;
3836 int rs = rS(ctx->opcode);
3837
3838 if (unlikely(rs & 1)) {
3839 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3840 return;
3841 }
3842
3843 lfail = gen_new_label();
3844 EA = tcg_temp_new();
3845 cr0 = tcg_temp_new();
3846
3847 tcg_gen_mov_tl(cr0, cpu_so);
3848 gen_set_access_type(ctx, ACCESS_RES);
3849 gen_addr_reg_index(ctx, EA);
3850 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3851 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
3852
3853 cmp = tcg_temp_new_i128();
3854 val = tcg_temp_new_i128();
3855
3856 tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3857
3858 /* Note that the low part is always in RS+1, even in LE mode. */
3859 tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3860
3861 tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3862 DEF_MEMOP(MO_128 | MO_ALIGN));
3863
3864 t0 = tcg_temp_new();
3865 t1 = tcg_temp_new();
3866 tcg_gen_extr_i128_i64(t1, t0, val);
3867
3868 tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3869 tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3870 tcg_gen_or_tl(t0, t0, t1);
3871
3872 tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3873 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3874 tcg_gen_or_tl(cr0, cr0, t0);
3875
3876 gen_set_label(lfail);
3877 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3878 tcg_gen_movi_tl(cpu_reserve, -1);
3879 }
3880 #endif /* defined(TARGET_PPC64) */
3881
3882 /* sync */
3883 static void gen_sync(DisasContext *ctx)
3884 {
3885 TCGBar bar = TCG_MO_ALL;
3886 uint32_t l = (ctx->opcode >> 21) & 3;
3887
3888 if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
3889 bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
3890 }
3891
3892 /*
3893 * We may need to check for a pending TLB flush.
3894 *
3895 * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3896 *
3897 * Additionally, this can only happen in kernel mode however so
3898 * check MSR_PR as well.
3899 */
3900 if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3901 gen_check_tlb_flush(ctx, true);
3902 }
3903
3904 tcg_gen_mb(bar | TCG_BAR_SC);
3905 }
3906
3907 /* wait */
3908 static void gen_wait(DisasContext *ctx)
3909 {
3910 uint32_t wc;
3911
3912 if (ctx->insns_flags & PPC_WAIT) {
3913 /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
3914
3915 if (ctx->insns_flags2 & PPC2_PM_ISA206) {
3916 /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
3917 wc = WC(ctx->opcode);
3918 } else {
3919 wc = 0;
3920 }
3921
3922 } else if (ctx->insns_flags2 & PPC2_ISA300) {
3923 /* v3.0 defines a new 'wait' encoding. */
3924 wc = WC(ctx->opcode);
3925 if (ctx->insns_flags2 & PPC2_ISA310) {
3926 uint32_t pl = PL(ctx->opcode);
3927
3928 /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
3929 if (wc == 3) {
3930 gen_invalid(ctx);
3931 return;
3932 }
3933
3934 /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
3935 if (pl > 0 && wc != 2) {
3936 gen_invalid(ctx);
3937 return;
3938 }
3939
3940 } else { /* ISA300 */
3941 /* WC 1-3 are reserved */
3942 if (wc > 0) {
3943 gen_invalid(ctx);
3944 return;
3945 }
3946 }
3947
3948 } else {
3949 warn_report("wait instruction decoded with wrong ISA flags.");
3950 gen_invalid(ctx);
3951 return;
3952 }
3953
3954 /*
3955 * wait without WC field or with WC=0 waits for an exception / interrupt
3956 * to occur.
3957 */
3958 if (wc == 0) {
3959 TCGv_i32 t0 = tcg_constant_i32(1);
3960 tcg_gen_st_i32(t0, cpu_env,
3961 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3962 /* Stop translation, as the CPU is supposed to sleep from now */
3963 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3964 }
3965
3966 /*
3967 * Other wait types must not just wait until an exception occurs because
3968 * ignoring their other wake-up conditions could cause a hang.
3969 *
3970 * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
3971 * no-ops.
3972 *
3973 * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
3974 *
3975 * wc=2 waits for an implementation-specific condition, such could be
3976 * always true, so it can be implemented as a no-op.
3977 *
3978 * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
3979 *
3980 * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
3981 * Reservation-loss may have implementation-specific conditions, so it
3982 * can be implemented as a no-op.
3983 *
3984 * wc=2 waits for an exception or an amount of time to pass. This
3985 * amount is implementation-specific so it can be implemented as a
3986 * no-op.
3987 *
3988 * ISA v3.1 allows for execution to resume "in the rare case of
3989 * an implementation-dependent event", so in any case software must
3990 * not depend on the architected resumption condition to become
3991 * true, so no-op implementations should be architecturally correct
3992 * (if suboptimal).
3993 */
3994 }
3995
3996 #if defined(TARGET_PPC64)
3997 static void gen_doze(DisasContext *ctx)
3998 {
3999 #if defined(CONFIG_USER_ONLY)
4000 GEN_PRIV(ctx);
4001 #else
4002 TCGv_i32 t;
4003
4004 CHK_HV(ctx);
4005 t = tcg_constant_i32(PPC_PM_DOZE);
4006 gen_helper_pminsn(cpu_env, t);
4007 /* Stop translation, as the CPU is supposed to sleep from now */
4008 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4009 #endif /* defined(CONFIG_USER_ONLY) */
4010 }
4011
4012 static void gen_nap(DisasContext *ctx)
4013 {
4014 #if defined(CONFIG_USER_ONLY)
4015 GEN_PRIV(ctx);
4016 #else
4017 TCGv_i32 t;
4018
4019 CHK_HV(ctx);
4020 t = tcg_constant_i32(PPC_PM_NAP);
4021 gen_helper_pminsn(cpu_env, t);
4022 /* Stop translation, as the CPU is supposed to sleep from now */
4023 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4024 #endif /* defined(CONFIG_USER_ONLY) */
4025 }
4026
4027 static void gen_stop(DisasContext *ctx)
4028 {
4029 #if defined(CONFIG_USER_ONLY)
4030 GEN_PRIV(ctx);
4031 #else
4032 TCGv_i32 t;
4033
4034 CHK_HV(ctx);
4035 t = tcg_constant_i32(PPC_PM_STOP);
4036 gen_helper_pminsn(cpu_env, t);
4037 /* Stop translation, as the CPU is supposed to sleep from now */
4038 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4039 #endif /* defined(CONFIG_USER_ONLY) */
4040 }
4041
4042 static void gen_sleep(DisasContext *ctx)
4043 {
4044 #if defined(CONFIG_USER_ONLY)
4045 GEN_PRIV(ctx);
4046 #else
4047 TCGv_i32 t;
4048
4049 CHK_HV(ctx);
4050 t = tcg_constant_i32(PPC_PM_SLEEP);
4051 gen_helper_pminsn(cpu_env, t);
4052 /* Stop translation, as the CPU is supposed to sleep from now */
4053 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4054 #endif /* defined(CONFIG_USER_ONLY) */
4055 }
4056
4057 static void gen_rvwinkle(DisasContext *ctx)
4058 {
4059 #if defined(CONFIG_USER_ONLY)
4060 GEN_PRIV(ctx);
4061 #else
4062 TCGv_i32 t;
4063
4064 CHK_HV(ctx);
4065 t = tcg_constant_i32(PPC_PM_RVWINKLE);
4066 gen_helper_pminsn(cpu_env, t);
4067 /* Stop translation, as the CPU is supposed to sleep from now */
4068 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4069 #endif /* defined(CONFIG_USER_ONLY) */
4070 }
4071 #endif /* #if defined(TARGET_PPC64) */
4072
4073 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4074 {
4075 #if defined(TARGET_PPC64)
4076 if (ctx->has_cfar) {
4077 tcg_gen_movi_tl(cpu_cfar, nip);
4078 }
4079 #endif
4080 }
4081
4082 #if defined(TARGET_PPC64)
4083 static void pmu_count_insns(DisasContext *ctx)
4084 {
4085 /*
4086 * Do not bother calling the helper if the PMU isn't counting
4087 * instructions.
4088 */
4089 if (!ctx->pmu_insn_cnt) {
4090 return;
4091 }
4092
4093 #if !defined(CONFIG_USER_ONLY)
4094 TCGLabel *l;
4095 TCGv t0;
4096
4097 /*
4098 * The PMU insns_inc() helper stops the internal PMU timer if a
4099 * counter overflows happens. In that case, if the guest is
4100 * running with icount and we do not handle it beforehand,
4101 * the helper can trigger a 'bad icount read'.
4102 */
4103 translator_io_start(&ctx->base);
4104
4105 /* Avoid helper calls when only PMC5-6 are enabled. */
4106 if (!ctx->pmc_other) {
4107 l = gen_new_label();
4108 t0 = tcg_temp_new();
4109
4110 gen_load_spr(t0, SPR_POWER_PMC5);
4111 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4112 gen_store_spr(SPR_POWER_PMC5, t0);
4113 /* Check for overflow, if it's enabled */
4114 if (ctx->mmcr0_pmcjce) {
4115 tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
4116 gen_helper_handle_pmc5_overflow(cpu_env);
4117 }
4118
4119 gen_set_label(l);
4120 } else {
4121 gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
4122 }
4123 #else
4124 /*
4125 * User mode can read (but not write) PMC5 and start/stop
4126 * the PMU via MMCR0_FC. In this case just increment
4127 * PMC5 with base.num_insns.
4128 */
4129 TCGv t0 = tcg_temp_new();
4130
4131 gen_load_spr(t0, SPR_POWER_PMC5);
4132 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4133 gen_store_spr(SPR_POWER_PMC5, t0);
4134 #endif /* #if !defined(CONFIG_USER_ONLY) */
4135 }
4136 #else
4137 static void pmu_count_insns(DisasContext *ctx)
4138 {
4139 return;
4140 }
4141 #endif /* #if defined(TARGET_PPC64) */
4142
4143 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4144 {
4145 return translator_use_goto_tb(&ctx->base, dest);
4146 }
4147
4148 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4149 {
4150 if (unlikely(ctx->singlestep_enabled)) {
4151 gen_debug_exception(ctx);
4152 } else {
4153 /*
4154 * tcg_gen_lookup_and_goto_ptr will exit the TB if
4155 * CF_NO_GOTO_PTR is set. Count insns now.
4156 */
4157 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4158 pmu_count_insns(ctx);
4159 }
4160
4161 tcg_gen_lookup_and_goto_ptr();
4162 }
4163 }
4164
4165 /*** Branch ***/
4166 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4167 {
4168 if (NARROW_MODE(ctx)) {
4169 dest = (uint32_t) dest;
4170 }
4171 if (use_goto_tb(ctx, dest)) {
4172 pmu_count_insns(ctx);
4173 tcg_gen_goto_tb(n);
4174 tcg_gen_movi_tl(cpu_nip, dest & ~3);
4175 tcg_gen_exit_tb(ctx->base.tb, n);
4176 } else {
4177 tcg_gen_movi_tl(cpu_nip, dest & ~3);
4178 gen_lookup_and_goto_ptr(ctx);
4179 }
4180 }
4181
4182 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4183 {
4184 if (NARROW_MODE(ctx)) {
4185 nip = (uint32_t)nip;
4186 }
4187 tcg_gen_movi_tl(cpu_lr, nip);
4188 }
4189
4190 /* b ba bl bla */
4191 static void gen_b(DisasContext *ctx)
4192 {
4193 target_ulong li, target;
4194
4195 /* sign extend LI */
4196 li = LI(ctx->opcode);
4197 li = (li ^ 0x02000000) - 0x02000000;
4198 if (likely(AA(ctx->opcode) == 0)) {
4199 target = ctx->cia + li;
4200 } else {
4201 target = li;
4202 }
4203 if (LK(ctx->opcode)) {
4204 gen_setlr(ctx, ctx->base.pc_next);
4205 }
4206 gen_update_cfar(ctx, ctx->cia);
4207 gen_goto_tb(ctx, 0, target);
4208 ctx->base.is_jmp = DISAS_NORETURN;
4209 }
4210
4211 #define BCOND_IM 0
4212 #define BCOND_LR 1
4213 #define BCOND_CTR 2
4214 #define BCOND_TAR 3
4215
4216 static void gen_bcond(DisasContext *ctx, int type)
4217 {
4218 uint32_t bo = BO(ctx->opcode);
4219 TCGLabel *l1;
4220 TCGv target;
4221
4222 if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4223 target = tcg_temp_new();
4224 if (type == BCOND_CTR) {
4225 tcg_gen_mov_tl(target, cpu_ctr);
4226 } else if (type == BCOND_TAR) {
4227 gen_load_spr(target, SPR_TAR);
4228 } else {
4229 tcg_gen_mov_tl(target, cpu_lr);
4230 }
4231 } else {
4232 target = NULL;
4233 }
4234 if (LK(ctx->opcode)) {
4235 gen_setlr(ctx, ctx->base.pc_next);
4236 }
4237 l1 = gen_new_label();
4238 if ((bo & 0x4) == 0) {
4239 /* Decrement and test CTR */
4240 TCGv temp = tcg_temp_new();
4241
4242 if (type == BCOND_CTR) {
4243 /*
4244 * All ISAs up to v3 describe this form of bcctr as invalid but
4245 * some processors, ie. 64-bit server processors compliant with
4246 * arch 2.x, do implement a "test and decrement" logic instead,
4247 * as described in their respective UMs. This logic involves CTR
4248 * to act as both the branch target and a counter, which makes
4249 * it basically useless and thus never used in real code.
4250 *
4251 * This form was hence chosen to trigger extra micro-architectural
4252 * side-effect on real HW needed for the Spectre v2 workaround.
4253 * It is up to guests that implement such workaround, ie. linux, to
4254 * use this form in a way it just triggers the side-effect without
4255 * doing anything else harmful.
4256 */
4257 if (unlikely(!is_book3s_arch2x(ctx))) {
4258 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4259 return;
4260 }
4261
4262 if (NARROW_MODE(ctx)) {
4263 tcg_gen_ext32u_tl(temp, cpu_ctr);
4264 } else {
4265 tcg_gen_mov_tl(temp, cpu_ctr);
4266 }
4267 if (bo & 0x2) {
4268 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4269 } else {
4270 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4271 }
4272 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4273 } else {
4274 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4275 if (NARROW_MODE(ctx)) {
4276 tcg_gen_ext32u_tl(temp, cpu_ctr);
4277 } else {
4278 tcg_gen_mov_tl(temp, cpu_ctr);
4279 }
4280 if (bo & 0x2) {
4281 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4282 } else {
4283 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4284 }
4285 }
4286 }
4287 if ((bo & 0x10) == 0) {
4288 /* Test CR */
4289 uint32_t bi = BI(ctx->opcode);
4290 uint32_t mask = 0x08 >> (bi & 0x03);
4291 TCGv_i32 temp = tcg_temp_new_i32();
4292
4293 if (bo & 0x8) {
4294 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4295 tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4296 } else {
4297 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4298 tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4299 }
4300 }
4301 gen_update_cfar(ctx, ctx->cia);
4302 if (type == BCOND_IM) {
4303 target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4304 if (likely(AA(ctx->opcode) == 0)) {
4305 gen_goto_tb(ctx, 0, ctx->cia + li);
4306 } else {
4307 gen_goto_tb(ctx, 0, li);
4308 }
4309 } else {
4310 if (NARROW_MODE(ctx)) {
4311 tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4312 } else {
4313 tcg_gen_andi_tl(cpu_nip, target, ~3);
4314 }
4315 gen_lookup_and_goto_ptr(ctx);
4316 }
4317 if ((bo & 0x14) != 0x14) {
4318 /* fallthrough case */
4319 gen_set_label(l1);
4320 gen_goto_tb(ctx, 1, ctx->base.pc_next);
4321 }
4322 ctx->base.is_jmp = DISAS_NORETURN;
4323 }
4324
4325 static void gen_bc(DisasContext *ctx)
4326 {
4327 gen_bcond(ctx, BCOND_IM);
4328 }
4329
4330 static void gen_bcctr(DisasContext *ctx)
4331 {
4332 gen_bcond(ctx, BCOND_CTR);
4333 }
4334
4335 static void gen_bclr(DisasContext *ctx)
4336 {
4337 gen_bcond(ctx, BCOND_LR);
4338 }
4339
4340 static void gen_bctar(DisasContext *ctx)
4341 {
4342 gen_bcond(ctx, BCOND_TAR);
4343 }
4344
4345 /*** Condition register logical ***/
4346 #define GEN_CRLOGIC(name, tcg_op, opc) \
4347 static void glue(gen_, name)(DisasContext *ctx) \
4348 { \
4349 uint8_t bitmask; \
4350 int sh; \
4351 TCGv_i32 t0, t1; \
4352 sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \
4353 t0 = tcg_temp_new_i32(); \
4354 if (sh > 0) \
4355 tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \
4356 else if (sh < 0) \
4357 tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \
4358 else \
4359 tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \
4360 t1 = tcg_temp_new_i32(); \
4361 sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \
4362 if (sh > 0) \
4363 tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \
4364 else if (sh < 0) \
4365 tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \
4366 else \
4367 tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \
4368 tcg_op(t0, t0, t1); \
4369 bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \
4370 tcg_gen_andi_i32(t0, t0, bitmask); \
4371 tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
4372 tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
4373 }
4374
4375 /* crand */
4376 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4377 /* crandc */
4378 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4379 /* creqv */
4380 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4381 /* crnand */
4382 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4383 /* crnor */
4384 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4385 /* cror */
4386 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4387 /* crorc */
4388 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4389 /* crxor */
4390 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4391
4392 /* mcrf */
4393 static void gen_mcrf(DisasContext *ctx)
4394 {
4395 tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4396 }
4397
4398 /*** System linkage ***/
4399
4400 /* rfi (supervisor only) */
4401 static void gen_rfi(DisasContext *ctx)
4402 {
4403 #if defined(CONFIG_USER_ONLY)
4404 GEN_PRIV(ctx);
4405 #else
4406 /*
4407 * This instruction doesn't exist anymore on 64-bit server
4408 * processors compliant with arch 2.x
4409 */
4410 if (is_book3s_arch2x(ctx)) {
4411 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4412 return;
4413 }
4414 /* Restore CPU state */
4415 CHK_SV(ctx);
4416 translator_io_start(&ctx->base);
4417 gen_update_cfar(ctx, ctx->cia);
4418 gen_helper_rfi(cpu_env);
4419 ctx->base.is_jmp = DISAS_EXIT;
4420 #endif
4421 }
4422
4423 #if defined(TARGET_PPC64)
4424 static void gen_rfid(DisasContext *ctx)
4425 {
4426 #if defined(CONFIG_USER_ONLY)
4427 GEN_PRIV(ctx);
4428 #else
4429 /* Restore CPU state */
4430 CHK_SV(ctx);
4431 translator_io_start(&ctx->base);
4432 gen_update_cfar(ctx, ctx->cia);
4433 gen_helper_rfid(cpu_env);
4434 ctx->base.is_jmp = DISAS_EXIT;
4435 #endif
4436 }
4437
4438 #if !defined(CONFIG_USER_ONLY)
4439 static void gen_rfscv(DisasContext *ctx)
4440 {
4441 #if defined(CONFIG_USER_ONLY)
4442 GEN_PRIV(ctx);
4443 #else
4444 /* Restore CPU state */
4445 CHK_SV(ctx);
4446 translator_io_start(&ctx->base);
4447 gen_update_cfar(ctx, ctx->cia);
4448 gen_helper_rfscv(cpu_env);
4449 ctx->base.is_jmp = DISAS_EXIT;
4450 #endif
4451 }
4452 #endif
4453
4454 static void gen_hrfid(DisasContext *ctx)
4455 {
4456 #if defined(CONFIG_USER_ONLY)
4457 GEN_PRIV(ctx);
4458 #else
4459 /* Restore CPU state */
4460 CHK_HV(ctx);
4461 gen_helper_hrfid(cpu_env);
4462 ctx->base.is_jmp = DISAS_EXIT;
4463 #endif
4464 }
4465 #endif
4466
4467 /* sc */
4468 #if defined(CONFIG_USER_ONLY)
4469 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4470 #else
4471 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4472 #define POWERPC_SYSCALL_VECTORED POWERPC_EXCP_SYSCALL_VECTORED
4473 #endif
4474 static void gen_sc(DisasContext *ctx)
4475 {
4476 uint32_t lev;
4477
4478 /*
4479 * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
4480 * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
4481 * for Ultravisor which TCG does not support, so just ignore the top 6.
4482 */
4483 lev = (ctx->opcode >> 5) & 0x1;
4484 gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4485 }
4486
4487 #if defined(TARGET_PPC64)
4488 #if !defined(CONFIG_USER_ONLY)
4489 static void gen_scv(DisasContext *ctx)
4490 {
4491 uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4492
4493 /* Set the PC back to the faulting instruction. */
4494 gen_update_nip(ctx, ctx->cia);
4495 gen_helper_scv(cpu_env, tcg_constant_i32(lev));
4496
4497 ctx->base.is_jmp = DISAS_NORETURN;
4498 }
4499 #endif
4500 #endif
4501
4502 /*** Trap ***/
4503
4504 /* Check for unconditional traps (always or never) */
4505 static bool check_unconditional_trap(DisasContext *ctx)
4506 {
4507 /* Trap never */
4508 if (TO(ctx->opcode) == 0) {
4509 return true;
4510 }
4511 /* Trap always */
4512 if (TO(ctx->opcode) == 31) {
4513 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4514 return true;
4515 }
4516 return false;
4517 }
4518
4519 /* tw */
4520 static void gen_tw(DisasContext *ctx)
4521 {
4522 TCGv_i32 t0;
4523
4524 if (check_unconditional_trap(ctx)) {
4525 return;
4526 }
4527 t0 = tcg_constant_i32(TO(ctx->opcode));
4528 gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4529 t0);
4530 }
4531
4532 /* twi */
4533 static void gen_twi(DisasContext *ctx)
4534 {
4535 TCGv t0;
4536 TCGv_i32 t1;
4537
4538 if (check_unconditional_trap(ctx)) {
4539 return;
4540 }
4541 t0 = tcg_constant_tl(SIMM(ctx->opcode));
4542 t1 = tcg_constant_i32(TO(ctx->opcode));
4543 gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4544 }
4545
4546 #if defined(TARGET_PPC64)
4547 /* td */
4548 static void gen_td(DisasContext *ctx)
4549 {
4550 TCGv_i32 t0;
4551
4552 if (check_unconditional_trap(ctx)) {
4553 return;
4554 }
4555 t0 = tcg_constant_i32(TO(ctx->opcode));
4556 gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4557 t0);
4558 }
4559
4560 /* tdi */
4561 static void gen_tdi(DisasContext *ctx)
4562 {
4563 TCGv t0;
4564 TCGv_i32 t1;
4565
4566 if (check_unconditional_trap(ctx)) {
4567 return;
4568 }
4569 t0 = tcg_constant_tl(SIMM(ctx->opcode));
4570 t1 = tcg_constant_i32(TO(ctx->opcode));
4571 gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4572 }
4573 #endif
4574
4575 /*** Processor control ***/
4576
4577 /* mcrxr */
4578 static void gen_mcrxr(DisasContext *ctx)
4579 {
4580 TCGv_i32 t0 = tcg_temp_new_i32();
4581 TCGv_i32 t1 = tcg_temp_new_i32();
4582 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4583
4584 tcg_gen_trunc_tl_i32(t0, cpu_so);
4585 tcg_gen_trunc_tl_i32(t1, cpu_ov);
4586 tcg_gen_trunc_tl_i32(dst, cpu_ca);
4587 tcg_gen_shli_i32(t0, t0, 3);
4588 tcg_gen_shli_i32(t1, t1, 2);
4589 tcg_gen_shli_i32(dst, dst, 1);
4590 tcg_gen_or_i32(dst, dst, t0);
4591 tcg_gen_or_i32(dst, dst, t1);
4592
4593 tcg_gen_movi_tl(cpu_so, 0);
4594 tcg_gen_movi_tl(cpu_ov, 0);
4595 tcg_gen_movi_tl(cpu_ca, 0);
4596 }
4597
4598 #ifdef TARGET_PPC64
4599 /* mcrxrx */
4600 static void gen_mcrxrx(DisasContext *ctx)
4601 {
4602 TCGv t0 = tcg_temp_new();
4603 TCGv t1 = tcg_temp_new();
4604 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4605
4606 /* copy OV and OV32 */
4607 tcg_gen_shli_tl(t0, cpu_ov, 1);
4608 tcg_gen_or_tl(t0, t0, cpu_ov32);
4609 tcg_gen_shli_tl(t0, t0, 2);
4610 /* copy CA and CA32 */
4611 tcg_gen_shli_tl(t1, cpu_ca, 1);
4612 tcg_gen_or_tl(t1, t1, cpu_ca32);
4613 tcg_gen_or_tl(t0, t0, t1);
4614 tcg_gen_trunc_tl_i32(dst, t0);
4615 }
4616 #endif
4617
4618 /* mfcr mfocrf */
4619 static void gen_mfcr(DisasContext *ctx)
4620 {
4621 uint32_t crm, crn;
4622
4623 if (likely(ctx->opcode & 0x00100000)) {
4624 crm = CRM(ctx->opcode);
4625 if (likely(crm && ((crm & (crm - 1)) == 0))) {
4626 crn = ctz32(crm);
4627 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4628 tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4629 cpu_gpr[rD(ctx->opcode)], crn * 4);
4630 }
4631 } else {
4632 TCGv_i32 t0 = tcg_temp_new_i32();
4633 tcg_gen_mov_i32(t0, cpu_crf[0]);
4634 tcg_gen_shli_i32(t0, t0, 4);
4635 tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4636 tcg_gen_shli_i32(t0, t0, 4);
4637 tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4638 tcg_gen_shli_i32(t0, t0, 4);
4639 tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4640 tcg_gen_shli_i32(t0, t0, 4);
4641 tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4642 tcg_gen_shli_i32(t0, t0, 4);
4643 tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4644 tcg_gen_shli_i32(t0, t0, 4);
4645 tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4646 tcg_gen_shli_i32(t0, t0, 4);
4647 tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4648 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4649 }
4650 }
4651
4652 /* mfmsr */
4653 static void gen_mfmsr(DisasContext *ctx)
4654 {
4655 CHK_SV(ctx);
4656 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4657 }
4658
4659 /* mfspr */
4660 static inline void gen_op_mfspr(DisasContext *ctx)
4661 {
4662 void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4663 uint32_t sprn = SPR(ctx->opcode);
4664
4665 #if defined(CONFIG_USER_ONLY)
4666 read_cb = ctx->spr_cb[sprn].uea_read;
4667 #else
4668 if (ctx->pr) {
4669 read_cb = ctx->spr_cb[sprn].uea_read;
4670 } else if (ctx->hv) {
4671 read_cb = ctx->spr_cb[sprn].hea_read;
4672 } else {
4673 read_cb = ctx->spr_cb[sprn].oea_read;
4674 }
4675 #endif
4676 if (likely(read_cb != NULL)) {
4677 if (likely(read_cb != SPR_NOACCESS)) {
4678 (*read_cb)(ctx, rD(ctx->opcode), sprn);
4679 } else {
4680 /* Privilege exception */
4681 /*
4682 * This is a hack to avoid warnings when running Linux:
4683 * this OS breaks the PowerPC virtualisation model,
4684 * allowing userland application to read the PVR
4685 */
4686 if (sprn != SPR_PVR) {
4687 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4688 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4689 ctx->cia);
4690 }
4691 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4692 }
4693 } else {
4694 /* ISA 2.07 defines these as no-ops */
4695 if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4696 (sprn >= 808 && sprn <= 811)) {
4697 /* This is a nop */
4698 return;
4699 }
4700 /* Not defined */
4701 qemu_log_mask(LOG_GUEST_ERROR,
4702 "Trying to read invalid spr %d (0x%03x) at "
4703 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4704
4705 /*
4706 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4707 * generate a priv, a hv emu or a no-op
4708 */
4709 if (sprn & 0x10) {
4710 if (ctx->pr) {
4711 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4712 }
4713 } else {
4714 if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4715 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4716 }
4717 }
4718 }
4719 }
4720
4721 static void gen_mfspr(DisasContext *ctx)
4722 {
4723 gen_op_mfspr(ctx);
4724 }
4725
4726 /* mftb */
4727 static void gen_mftb(DisasContext *ctx)
4728 {
4729 gen_op_mfspr(ctx);
4730 }
4731
4732 /* mtcrf mtocrf*/
4733 static void gen_mtcrf(DisasContext *ctx)
4734 {
4735 uint32_t crm, crn;
4736
4737 crm = CRM(ctx->opcode);
4738 if (likely((ctx->opcode & 0x00100000))) {
4739 if (crm && ((crm & (crm - 1)) == 0)) {
4740 TCGv_i32 temp = tcg_temp_new_i32();
4741 crn = ctz32(crm);
4742 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4743 tcg_gen_shri_i32(temp, temp, crn * 4);
4744 tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4745 }
4746 } else {
4747 TCGv_i32 temp = tcg_temp_new_i32();
4748 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4749 for (crn = 0 ; crn < 8 ; crn++) {
4750 if (crm & (1 << crn)) {
4751 tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4752 tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4753 }
4754 }
4755 }
4756 }
4757
4758 /* mtmsr */
4759 #if defined(TARGET_PPC64)
4760 static void gen_mtmsrd(DisasContext *ctx)
4761 {
4762 if (unlikely(!is_book3s_arch2x(ctx))) {
4763 gen_invalid(ctx);
4764 return;
4765 }
4766
4767 CHK_SV(ctx);
4768
4769 #if !defined(CONFIG_USER_ONLY)
4770 TCGv t0, t1;
4771 target_ulong mask;
4772
4773 t0 = tcg_temp_new();
4774 t1 = tcg_temp_new();
4775
4776 translator_io_start(&ctx->base);
4777
4778 if (ctx->opcode & 0x00010000) {
4779 /* L=1 form only updates EE and RI */
4780 mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4781 } else {
4782 /* mtmsrd does not alter HV, S, ME, or LE */
4783 mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4784 (1ULL << MSR_HV));
4785 /*
4786 * XXX: we need to update nip before the store if we enter
4787 * power saving mode, we will exit the loop directly from
4788 * ppc_store_msr
4789 */
4790 gen_update_nip(ctx, ctx->base.pc_next);
4791 }
4792
4793 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4794 tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4795 tcg_gen_or_tl(t0, t0, t1);
4796
4797 gen_helper_store_msr(cpu_env, t0);
4798
4799 /* Must stop the translation as machine state (may have) changed */
4800 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4801 #endif /* !defined(CONFIG_USER_ONLY) */
4802 }
4803 #endif /* defined(TARGET_PPC64) */
4804
4805 static void gen_mtmsr(DisasContext *ctx)
4806 {
4807 CHK_SV(ctx);
4808
4809 #if !defined(CONFIG_USER_ONLY)
4810 TCGv t0, t1;
4811 target_ulong mask = 0xFFFFFFFF;
4812
4813 t0 = tcg_temp_new();
4814 t1 = tcg_temp_new();
4815
4816 translator_io_start(&ctx->base);
4817 if (ctx->opcode & 0x00010000) {
4818 /* L=1 form only updates EE and RI */
4819 mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4820 } else {
4821 /* mtmsr does not alter S, ME, or LE */
4822 mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4823
4824 /*
4825 * XXX: we need to update nip before the store if we enter
4826 * power saving mode, we will exit the loop directly from
4827 * ppc_store_msr
4828 */
4829 gen_update_nip(ctx, ctx->base.pc_next);
4830 }
4831
4832 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4833 tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4834 tcg_gen_or_tl(t0, t0, t1);
4835
4836 gen_helper_store_msr(cpu_env, t0);
4837
4838 /* Must stop the translation as machine state (may have) changed */
4839 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4840 #endif
4841 }
4842
4843 /* mtspr */
4844 static void gen_mtspr(DisasContext *ctx)
4845 {
4846 void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4847 uint32_t sprn = SPR(ctx->opcode);
4848
4849 #if defined(CONFIG_USER_ONLY)
4850 write_cb = ctx->spr_cb[sprn].uea_write;
4851 #else
4852 if (ctx->pr) {
4853 write_cb = ctx->spr_cb[sprn].uea_write;
4854 } else if (ctx->hv) {
4855 write_cb = ctx->spr_cb[sprn].hea_write;
4856 } else {
4857 write_cb = ctx->spr_cb[sprn].oea_write;
4858 }
4859 #endif
4860 if (likely(write_cb != NULL)) {
4861 if (likely(write_cb != SPR_NOACCESS)) {
4862 (*write_cb)(ctx, sprn, rS(ctx->opcode));
4863 } else {
4864 /* Privilege exception */
4865 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4866 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4867 ctx->cia);
4868 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4869 }
4870 } else {
4871 /* ISA 2.07 defines these as no-ops */
4872 if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4873 (sprn >= 808 && sprn <= 811)) {
4874 /* This is a nop */
4875 return;
4876 }
4877
4878 /* Not defined */
4879 qemu_log_mask(LOG_GUEST_ERROR,
4880 "Trying to write invalid spr %d (0x%03x) at "
4881 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4882
4883
4884 /*
4885 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4886 * generate a priv, a hv emu or a no-op
4887 */
4888 if (sprn & 0x10) {
4889 if (ctx->pr) {
4890 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4891 }
4892 } else {
4893 if (ctx->pr || sprn == 0) {
4894 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4895 }
4896 }
4897 }
4898 }
4899
4900 #if defined(TARGET_PPC64)
4901 /* setb */
4902 static void gen_setb(DisasContext *ctx)
4903 {
4904 TCGv_i32 t0 = tcg_temp_new_i32();
4905 TCGv_i32 t8 = tcg_constant_i32(8);
4906 TCGv_i32 tm1 = tcg_constant_i32(-1);
4907 int crf = crfS(ctx->opcode);
4908
4909 tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4910 tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4911 tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4912 }
4913 #endif
4914
4915 /*** Cache management ***/
4916
4917 /* dcbf */
4918 static void gen_dcbf(DisasContext *ctx)
4919 {
4920 /* XXX: specification says this is treated as a load by the MMU */
4921 TCGv t0;
4922 gen_set_access_type(ctx, ACCESS_CACHE);
4923 t0 = tcg_temp_new();
4924 gen_addr_reg_index(ctx, t0);
4925 gen_qemu_ld8u(ctx, t0, t0);
4926 }
4927
4928 /* dcbfep (external PID dcbf) */
4929 static void gen_dcbfep(DisasContext *ctx)
4930 {
4931 /* XXX: specification says this is treated as a load by the MMU */
4932 TCGv t0;
4933 CHK_SV(ctx);
4934 gen_set_access_type(ctx, ACCESS_CACHE);
4935 t0 = tcg_temp_new();
4936 gen_addr_reg_index(ctx, t0);
4937 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4938 }
4939
4940 /* dcbi (Supervisor only) */
4941 static void gen_dcbi(DisasContext *ctx)
4942 {
4943 #if defined(CONFIG_USER_ONLY)
4944 GEN_PRIV(ctx);
4945 #else
4946 TCGv EA, val;
4947
4948 CHK_SV(ctx);
4949 EA = tcg_temp_new();
4950 gen_set_access_type(ctx, ACCESS_CACHE);
4951 gen_addr_reg_index(ctx, EA);
4952 val = tcg_temp_new();
4953 /* XXX: specification says this should be treated as a store by the MMU */
4954 gen_qemu_ld8u(ctx, val, EA);
4955 gen_qemu_st8(ctx, val, EA);
4956 #endif /* defined(CONFIG_USER_ONLY) */
4957 }
4958
4959 /* dcdst */
4960 static void gen_dcbst(DisasContext *ctx)
4961 {
4962 /* XXX: specification say this is treated as a load by the MMU */
4963 TCGv t0;
4964 gen_set_access_type(ctx, ACCESS_CACHE);
4965 t0 = tcg_temp_new();
4966 gen_addr_reg_index(ctx, t0);
4967 gen_qemu_ld8u(ctx, t0, t0);
4968 }
4969
4970 /* dcbstep (dcbstep External PID version) */
4971 static void gen_dcbstep(DisasContext *ctx)
4972 {
4973 /* XXX: specification say this is treated as a load by the MMU */
4974 TCGv t0;
4975 gen_set_access_type(ctx, ACCESS_CACHE);
4976 t0 = tcg_temp_new();
4977 gen_addr_reg_index(ctx, t0);
4978 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4979 }
4980
4981 /* dcbt */
4982 static void gen_dcbt(DisasContext *ctx)
4983 {
4984 /*
4985 * interpreted as no-op
4986 * XXX: specification say this is treated as a load by the MMU but
4987 * does not generate any exception
4988 */
4989 }
4990
4991 /* dcbtep */
4992 static void gen_dcbtep(DisasContext *ctx)
4993 {
4994 /*
4995 * interpreted as no-op
4996 * XXX: specification say this is treated as a load by the MMU but
4997 * does not generate any exception
4998 */
4999 }
5000
5001 /* dcbtst */
5002 static void gen_dcbtst(DisasContext *ctx)
5003 {
5004 /*
5005 * interpreted as no-op
5006 * XXX: specification say this is treated as a load by the MMU but
5007 * does not generate any exception
5008 */
5009 }
5010
5011 /* dcbtstep */
5012 static void gen_dcbtstep(DisasContext *ctx)
5013 {
5014 /*
5015 * interpreted as no-op
5016 * XXX: specification say this is treated as a load by the MMU but
5017 * does not generate any exception
5018 */
5019 }
5020
5021 /* dcbtls */
5022 static void gen_dcbtls(DisasContext *ctx)
5023 {
5024 /* Always fails locking the cache */
5025 TCGv t0 = tcg_temp_new();
5026 gen_load_spr(t0, SPR_Exxx_L1CSR0);
5027 tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5028 gen_store_spr(SPR_Exxx_L1CSR0, t0);
5029 }
5030
5031 /* dcblc */
5032 static void gen_dcblc(DisasContext *ctx)
5033 {
5034 /*
5035 * interpreted as no-op
5036 */
5037 }
5038
5039 /* dcbz */
5040 static void gen_dcbz(DisasContext *ctx)
5041 {
5042 TCGv tcgv_addr;
5043 TCGv_i32 tcgv_op;
5044
5045 gen_set_access_type(ctx, ACCESS_CACHE);
5046 tcgv_addr = tcg_temp_new();
5047 tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5048 gen_addr_reg_index(ctx, tcgv_addr);
5049 gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
5050 }
5051
5052 /* dcbzep */
5053 static void gen_dcbzep(DisasContext *ctx)
5054 {
5055 TCGv tcgv_addr;
5056 TCGv_i32 tcgv_op;
5057
5058 gen_set_access_type(ctx, ACCESS_CACHE);
5059 tcgv_addr = tcg_temp_new();
5060 tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5061 gen_addr_reg_index(ctx, tcgv_addr);
5062 gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
5063 }
5064
5065 /* dst / dstt */
5066 static void gen_dst(DisasContext *ctx)
5067 {
5068 if (rA(ctx->opcode) == 0) {
5069 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5070 } else {
5071 /* interpreted as no-op */
5072 }
5073 }
5074
5075 /* dstst /dststt */
5076 static void gen_dstst(DisasContext *ctx)
5077 {
5078 if (rA(ctx->opcode) == 0) {
5079 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5080 } else {
5081 /* interpreted as no-op */
5082 }
5083
5084 }
5085
5086 /* dss / dssall */
5087 static void gen_dss(DisasContext *ctx)
5088 {
5089 /* interpreted as no-op */
5090 }
5091
5092 /* icbi */
5093 static void gen_icbi(DisasContext *ctx)
5094 {
5095 TCGv t0;
5096 gen_set_access_type(ctx, ACCESS_CACHE);
5097 t0 = tcg_temp_new();
5098 gen_addr_reg_index(ctx, t0);
5099 gen_helper_icbi(cpu_env, t0);
5100 }
5101
5102 /* icbiep */
5103 static void gen_icbiep(DisasContext *ctx)
5104 {
5105 TCGv t0;
5106 gen_set_access_type(ctx, ACCESS_CACHE);
5107 t0 = tcg_temp_new();
5108 gen_addr_reg_index(ctx, t0);
5109 gen_helper_icbiep(cpu_env, t0);
5110 }
5111
5112 /* Optional: */
5113 /* dcba */
5114 static void gen_dcba(DisasContext *ctx)
5115 {
5116 /*
5117 * interpreted as no-op
5118 * XXX: specification say this is treated as a store by the MMU
5119 * but does not generate any exception
5120 */
5121 }
5122
5123 /*** Segment register manipulation ***/
5124 /* Supervisor only: */
5125
5126 /* mfsr */
5127 static void gen_mfsr(DisasContext *ctx)
5128 {
5129 #if defined(CONFIG_USER_ONLY)
5130 GEN_PRIV(ctx);
5131 #else
5132 TCGv t0;
5133
5134 CHK_SV(ctx);
5135 t0 = tcg_constant_tl(SR(ctx->opcode));
5136 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5137 #endif /* defined(CONFIG_USER_ONLY) */
5138 }
5139
5140 /* mfsrin */
5141 static void gen_mfsrin(DisasContext *ctx)
5142 {
5143 #if defined(CONFIG_USER_ONLY)
5144 GEN_PRIV(ctx);
5145 #else
5146 TCGv t0;
5147
5148 CHK_SV(ctx);
5149 t0 = tcg_temp_new();
5150 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5151 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5152 #endif /* defined(CONFIG_USER_ONLY) */
5153 }
5154
5155 /* mtsr */
5156 static void gen_mtsr(DisasContext *ctx)
5157 {
5158 #if defined(CONFIG_USER_ONLY)
5159 GEN_PRIV(ctx);
5160 #else
5161 TCGv t0;
5162
5163 CHK_SV(ctx);
5164 t0 = tcg_constant_tl(SR(ctx->opcode));
5165 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5166 #endif /* defined(CONFIG_USER_ONLY) */
5167 }
5168
5169 /* mtsrin */
5170 static void gen_mtsrin(DisasContext *ctx)
5171 {
5172 #if defined(CONFIG_USER_ONLY)
5173 GEN_PRIV(ctx);
5174 #else
5175 TCGv t0;
5176 CHK_SV(ctx);
5177
5178 t0 = tcg_temp_new();
5179 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5180 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
5181 #endif /* defined(CONFIG_USER_ONLY) */
5182 }
5183
5184 #if defined(TARGET_PPC64)
5185 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5186
5187 /* mfsr */
5188 static void gen_mfsr_64b(DisasContext *ctx)
5189 {
5190 #if defined(CONFIG_USER_ONLY)
5191 GEN_PRIV(ctx);
5192 #else
5193 TCGv t0;
5194
5195 CHK_SV(ctx);
5196 t0 = tcg_constant_tl(SR(ctx->opcode));
5197 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5198 #endif /* defined(CONFIG_USER_ONLY) */
5199 }
5200
5201 /* mfsrin */
5202 static void gen_mfsrin_64b(DisasContext *ctx)
5203 {
5204 #if defined(CONFIG_USER_ONLY)
5205 GEN_PRIV(ctx);
5206 #else
5207 TCGv t0;
5208
5209 CHK_SV(ctx);
5210 t0 = tcg_temp_new();
5211 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5212 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5213 #endif /* defined(CONFIG_USER_ONLY) */
5214 }
5215
5216 /* mtsr */
5217 static void gen_mtsr_64b(DisasContext *ctx)
5218 {
5219 #if defined(CONFIG_USER_ONLY)
5220 GEN_PRIV(ctx);
5221 #else
5222 TCGv t0;
5223
5224 CHK_SV(ctx);
5225 t0 = tcg_constant_tl(SR(ctx->opcode));
5226 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5227 #endif /* defined(CONFIG_USER_ONLY) */
5228 }
5229
5230 /* mtsrin */
5231 static void gen_mtsrin_64b(DisasContext *ctx)
5232 {
5233 #if defined(CONFIG_USER_ONLY)
5234 GEN_PRIV(ctx);
5235 #else
5236 TCGv t0;
5237
5238 CHK_SV(ctx);
5239 t0 = tcg_temp_new();
5240 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5241 gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5242 #endif /* defined(CONFIG_USER_ONLY) */
5243 }
5244
5245 #endif /* defined(TARGET_PPC64) */
5246
5247 /*** Lookaside buffer management ***/
5248 /* Optional & supervisor only: */
5249
5250 /* tlbia */
5251 static void gen_tlbia(DisasContext *ctx)
5252 {
5253 #if defined(CONFIG_USER_ONLY)
5254 GEN_PRIV(ctx);
5255 #else
5256 CHK_HV(ctx);
5257
5258 gen_helper_tlbia(cpu_env);
5259 #endif /* defined(CONFIG_USER_ONLY) */
5260 }
5261
5262 /* tlbsync */
5263 static void gen_tlbsync(DisasContext *ctx)
5264 {
5265 #if defined(CONFIG_USER_ONLY)
5266 GEN_PRIV(ctx);
5267 #else
5268
5269 if (ctx->gtse) {
5270 CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5271 } else {
5272 CHK_HV(ctx); /* Else hypervisor privileged */
5273 }
5274
5275 /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5276 if (ctx->insns_flags & PPC_BOOKE) {
5277 gen_check_tlb_flush(ctx, true);
5278 }
5279 #endif /* defined(CONFIG_USER_ONLY) */
5280 }
5281
5282 /*** External control ***/
5283 /* Optional: */
5284
5285 /* eciwx */
5286 static void gen_eciwx(DisasContext *ctx)
5287 {
5288 TCGv t0;
5289 /* Should check EAR[E] ! */
5290 gen_set_access_type(ctx, ACCESS_EXT);
5291 t0 = tcg_temp_new();
5292 gen_addr_reg_index(ctx, t0);
5293 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5294 DEF_MEMOP(MO_UL | MO_ALIGN));
5295 }
5296
5297 /* ecowx */
5298 static void gen_ecowx(DisasContext *ctx)
5299 {
5300 TCGv t0;
5301 /* Should check EAR[E] ! */
5302 gen_set_access_type(ctx, ACCESS_EXT);
5303 t0 = tcg_temp_new();
5304 gen_addr_reg_index(ctx, t0);
5305 tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5306 DEF_MEMOP(MO_UL | MO_ALIGN));
5307 }
5308
5309 /* 602 - 603 - G2 TLB management */
5310
5311 /* tlbld */
5312 static void gen_tlbld_6xx(DisasContext *ctx)
5313 {
5314 #if defined(CONFIG_USER_ONLY)
5315 GEN_PRIV(ctx);
5316 #else
5317 CHK_SV(ctx);
5318 gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5319 #endif /* defined(CONFIG_USER_ONLY) */
5320 }
5321
5322 /* tlbli */
5323 static void gen_tlbli_6xx(DisasContext *ctx)
5324 {
5325 #if defined(CONFIG_USER_ONLY)
5326 GEN_PRIV(ctx);
5327 #else
5328 CHK_SV(ctx);
5329 gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5330 #endif /* defined(CONFIG_USER_ONLY) */
5331 }
5332
5333 /* BookE specific instructions */
5334
5335 /* XXX: not implemented on 440 ? */
5336 static void gen_mfapidi(DisasContext *ctx)
5337 {
5338 /* XXX: TODO */
5339 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5340 }
5341
5342 /* XXX: not implemented on 440 ? */
5343 static void gen_tlbiva(DisasContext *ctx)
5344 {
5345 #if defined(CONFIG_USER_ONLY)
5346 GEN_PRIV(ctx);
5347 #else
5348 TCGv t0;
5349
5350 CHK_SV(ctx);
5351 t0 = tcg_temp_new();
5352 gen_addr_reg_index(ctx, t0);
5353 gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5354 #endif /* defined(CONFIG_USER_ONLY) */
5355 }
5356
5357 /* All 405 MAC instructions are translated here */
5358 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5359 int ra, int rb, int rt, int Rc)
5360 {
5361 TCGv t0, t1;
5362
5363 t0 = tcg_temp_new();
5364 t1 = tcg_temp_new();
5365
5366 switch (opc3 & 0x0D) {
5367 case 0x05:
5368 /* macchw - macchw. - macchwo - macchwo. */
5369 /* macchws - macchws. - macchwso - macchwso. */
5370 /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
5371 /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
5372 /* mulchw - mulchw. */
5373 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5374 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5375 tcg_gen_ext16s_tl(t1, t1);
5376 break;
5377 case 0x04:
5378 /* macchwu - macchwu. - macchwuo - macchwuo. */
5379 /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
5380 /* mulchwu - mulchwu. */
5381 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5382 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5383 tcg_gen_ext16u_tl(t1, t1);
5384 break;
5385 case 0x01:
5386 /* machhw - machhw. - machhwo - machhwo. */
5387 /* machhws - machhws. - machhwso - machhwso. */
5388 /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
5389 /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
5390 /* mulhhw - mulhhw. */
5391 tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5392 tcg_gen_ext16s_tl(t0, t0);
5393 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5394 tcg_gen_ext16s_tl(t1, t1);
5395 break;
5396 case 0x00:
5397 /* machhwu - machhwu. - machhwuo - machhwuo. */
5398 /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
5399 /* mulhhwu - mulhhwu. */
5400 tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5401 tcg_gen_ext16u_tl(t0, t0);
5402 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5403 tcg_gen_ext16u_tl(t1, t1);
5404 break;
5405 case 0x0D:
5406 /* maclhw - maclhw. - maclhwo - maclhwo. */
5407 /* maclhws - maclhws. - maclhwso - maclhwso. */
5408 /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
5409 /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
5410 /* mullhw - mullhw. */
5411 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5412 tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5413 break;
5414 case 0x0C:
5415 /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
5416 /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
5417 /* mullhwu - mullhwu. */
5418 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5419 tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5420 break;
5421 }
5422 if (opc2 & 0x04) {
5423 /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5424 tcg_gen_mul_tl(t1, t0, t1);
5425 if (opc2 & 0x02) {
5426 /* nmultiply-and-accumulate (0x0E) */
5427 tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5428 } else {
5429 /* multiply-and-accumulate (0x0C) */
5430 tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5431 }
5432
5433 if (opc3 & 0x12) {
5434 /* Check overflow and/or saturate */
5435 TCGLabel *l1 = gen_new_label();
5436
5437 if (opc3 & 0x10) {
5438 /* Start with XER OV disabled, the most likely case */
5439 tcg_gen_movi_tl(cpu_ov, 0);
5440 }
5441 if (opc3 & 0x01) {
5442 /* Signed */
5443 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5444 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5445 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5446 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5447 if (opc3 & 0x02) {
5448 /* Saturate */
5449 tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5450 tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5451 }
5452 } else {
5453 /* Unsigned */
5454 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5455 if (opc3 & 0x02) {
5456 /* Saturate */
5457 tcg_gen_movi_tl(t0, UINT32_MAX);
5458 }
5459 }
5460 if (opc3 & 0x10) {
5461 /* Check overflow */
5462 tcg_gen_movi_tl(cpu_ov, 1);
5463 tcg_gen_movi_tl(cpu_so, 1);
5464 }
5465 gen_set_label(l1);
5466 tcg_gen_mov_tl(cpu_gpr[rt], t0);
5467 }
5468 } else {
5469 tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5470 }
5471 if (unlikely(Rc) != 0) {
5472 /* Update Rc0 */
5473 gen_set_Rc0(ctx, cpu_gpr[rt]);
5474 }
5475 }
5476
5477 #define GEN_MAC_HANDLER(name, opc2, opc3) \
5478 static void glue(gen_, name)(DisasContext *ctx) \
5479 { \
5480 gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \
5481 rD(ctx->opcode), Rc(ctx->opcode)); \
5482 }
5483
5484 /* macchw - macchw. */
5485 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5486 /* macchwo - macchwo. */
5487 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5488 /* macchws - macchws. */
5489 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5490 /* macchwso - macchwso. */
5491 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5492 /* macchwsu - macchwsu. */
5493 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5494 /* macchwsuo - macchwsuo. */
5495 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5496 /* macchwu - macchwu. */
5497 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5498 /* macchwuo - macchwuo. */
5499 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5500 /* machhw - machhw. */
5501 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5502 /* machhwo - machhwo. */
5503 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5504 /* machhws - machhws. */
5505 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5506 /* machhwso - machhwso. */
5507 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5508 /* machhwsu - machhwsu. */
5509 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5510 /* machhwsuo - machhwsuo. */
5511 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5512 /* machhwu - machhwu. */
5513 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5514 /* machhwuo - machhwuo. */
5515 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5516 /* maclhw - maclhw. */
5517 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5518 /* maclhwo - maclhwo. */
5519 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5520 /* maclhws - maclhws. */
5521 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5522 /* maclhwso - maclhwso. */
5523 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5524 /* maclhwu - maclhwu. */
5525 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5526 /* maclhwuo - maclhwuo. */
5527 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5528 /* maclhwsu - maclhwsu. */
5529 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5530 /* maclhwsuo - maclhwsuo. */
5531 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5532 /* nmacchw - nmacchw. */
5533 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5534 /* nmacchwo - nmacchwo. */
5535 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5536 /* nmacchws - nmacchws. */
5537 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5538 /* nmacchwso - nmacchwso. */
5539 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5540 /* nmachhw - nmachhw. */
5541 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5542 /* nmachhwo - nmachhwo. */
5543 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5544 /* nmachhws - nmachhws. */
5545 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5546 /* nmachhwso - nmachhwso. */
5547 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5548 /* nmaclhw - nmaclhw. */
5549 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5550 /* nmaclhwo - nmaclhwo. */
5551 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5552 /* nmaclhws - nmaclhws. */
5553 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5554 /* nmaclhwso - nmaclhwso. */
5555 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5556
5557 /* mulchw - mulchw. */
5558 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5559 /* mulchwu - mulchwu. */
5560 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5561 /* mulhhw - mulhhw. */
5562 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5563 /* mulhhwu - mulhhwu. */
5564 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5565 /* mullhw - mullhw. */
5566 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5567 /* mullhwu - mullhwu. */
5568 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5569
5570 /* mfdcr */
5571 static void gen_mfdcr(DisasContext *ctx)
5572 {
5573 #if defined(CONFIG_USER_ONLY)
5574 GEN_PRIV(ctx);
5575 #else
5576 TCGv dcrn;
5577
5578 CHK_SV(ctx);
5579 dcrn = tcg_constant_tl(SPR(ctx->opcode));
5580 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
5581 #endif /* defined(CONFIG_USER_ONLY) */
5582 }
5583
5584 /* mtdcr */
5585 static void gen_mtdcr(DisasContext *ctx)
5586 {
5587 #if defined(CONFIG_USER_ONLY)
5588 GEN_PRIV(ctx);
5589 #else
5590 TCGv dcrn;
5591
5592 CHK_SV(ctx);
5593 dcrn = tcg_constant_tl(SPR(ctx->opcode));
5594 gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5595 #endif /* defined(CONFIG_USER_ONLY) */
5596 }
5597
5598 /* mfdcrx */
5599 /* XXX: not implemented on 440 ? */
5600 static void gen_mfdcrx(DisasContext *ctx)
5601 {
5602 #if defined(CONFIG_USER_ONLY)
5603 GEN_PRIV(ctx);
5604 #else
5605 CHK_SV(ctx);
5606 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
5607 cpu_gpr[rA(ctx->opcode)]);
5608 /* Note: Rc update flag set leads to undefined state of Rc0 */
5609 #endif /* defined(CONFIG_USER_ONLY) */
5610 }
5611
5612 /* mtdcrx */
5613 /* XXX: not implemented on 440 ? */
5614 static void gen_mtdcrx(DisasContext *ctx)
5615 {
5616 #if defined(CONFIG_USER_ONLY)
5617 GEN_PRIV(ctx);
5618 #else
5619 CHK_SV(ctx);
5620 gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
5621 cpu_gpr[rS(ctx->opcode)]);
5622 /* Note: Rc update flag set leads to undefined state of Rc0 */
5623 #endif /* defined(CONFIG_USER_ONLY) */
5624 }
5625
5626 /* dccci */
5627 static void gen_dccci(DisasContext *ctx)
5628 {
5629 CHK_SV(ctx);
5630 /* interpreted as no-op */
5631 }
5632
5633 /* dcread */
5634 static void gen_dcread(DisasContext *ctx)
5635 {
5636 #if defined(CONFIG_USER_ONLY)
5637 GEN_PRIV(ctx);
5638 #else
5639 TCGv EA, val;
5640
5641 CHK_SV(ctx);
5642 gen_set_access_type(ctx, ACCESS_CACHE);
5643 EA = tcg_temp_new();
5644 gen_addr_reg_index(ctx, EA);
5645 val = tcg_temp_new();
5646 gen_qemu_ld32u(ctx, val, EA);
5647 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5648 #endif /* defined(CONFIG_USER_ONLY) */
5649 }
5650
5651 /* icbt */
5652 static void gen_icbt_40x(DisasContext *ctx)
5653 {
5654 /*
5655 * interpreted as no-op
5656 * XXX: specification say this is treated as a load by the MMU but
5657 * does not generate any exception
5658 */
5659 }
5660
5661 /* iccci */
5662 static void gen_iccci(DisasContext *ctx)
5663 {
5664 CHK_SV(ctx);
5665 /* interpreted as no-op */
5666 }
5667
5668 /* icread */
5669 static void gen_icread(DisasContext *ctx)
5670 {
5671 CHK_SV(ctx);
5672 /* interpreted as no-op */
5673 }
5674
5675 /* rfci (supervisor only) */
5676 static void gen_rfci_40x(DisasContext *ctx)
5677 {
5678 #if defined(CONFIG_USER_ONLY)
5679 GEN_PRIV(ctx);
5680 #else
5681 CHK_SV(ctx);
5682 /* Restore CPU state */
5683 gen_helper_40x_rfci(cpu_env);
5684 ctx->base.is_jmp = DISAS_EXIT;
5685 #endif /* defined(CONFIG_USER_ONLY) */
5686 }
5687
5688 static void gen_rfci(DisasContext *ctx)
5689 {
5690 #if defined(CONFIG_USER_ONLY)
5691 GEN_PRIV(ctx);
5692 #else
5693 CHK_SV(ctx);
5694 /* Restore CPU state */
5695 gen_helper_rfci(cpu_env);
5696 ctx->base.is_jmp = DISAS_EXIT;
5697 #endif /* defined(CONFIG_USER_ONLY) */
5698 }
5699
5700 /* BookE specific */
5701
5702 /* XXX: not implemented on 440 ? */
5703 static void gen_rfdi(DisasContext *ctx)
5704 {
5705 #if defined(CONFIG_USER_ONLY)
5706 GEN_PRIV(ctx);
5707 #else
5708 CHK_SV(ctx);
5709 /* Restore CPU state */
5710 gen_helper_rfdi(cpu_env);
5711 ctx->base.is_jmp = DISAS_EXIT;
5712 #endif /* defined(CONFIG_USER_ONLY) */
5713 }
5714
5715 /* XXX: not implemented on 440 ? */
5716 static void gen_rfmci(DisasContext *ctx)
5717 {
5718 #if defined(CONFIG_USER_ONLY)
5719 GEN_PRIV(ctx);
5720 #else
5721 CHK_SV(ctx);
5722 /* Restore CPU state */
5723 gen_helper_rfmci(cpu_env);
5724 ctx->base.is_jmp = DISAS_EXIT;
5725 #endif /* defined(CONFIG_USER_ONLY) */
5726 }
5727
5728 /* TLB management - PowerPC 405 implementation */
5729
5730 /* tlbre */
5731 static void gen_tlbre_40x(DisasContext *ctx)
5732 {
5733 #if defined(CONFIG_USER_ONLY)
5734 GEN_PRIV(ctx);
5735 #else
5736 CHK_SV(ctx);
5737 switch (rB(ctx->opcode)) {
5738 case 0:
5739 gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
5740 cpu_gpr[rA(ctx->opcode)]);
5741 break;
5742 case 1:
5743 gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
5744 cpu_gpr[rA(ctx->opcode)]);
5745 break;
5746 default:
5747 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5748 break;
5749 }
5750 #endif /* defined(CONFIG_USER_ONLY) */
5751 }
5752
5753 /* tlbsx - tlbsx. */
5754 static void gen_tlbsx_40x(DisasContext *ctx)
5755 {
5756 #if defined(CONFIG_USER_ONLY)
5757 GEN_PRIV(ctx);
5758 #else
5759 TCGv t0;
5760
5761 CHK_SV(ctx);
5762 t0 = tcg_temp_new();
5763 gen_addr_reg_index(ctx, t0);
5764 gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5765 if (Rc(ctx->opcode)) {
5766 TCGLabel *l1 = gen_new_label();
5767 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5768 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5769 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5770 gen_set_label(l1);
5771 }
5772 #endif /* defined(CONFIG_USER_ONLY) */
5773 }
5774
5775 /* tlbwe */
5776 static void gen_tlbwe_40x(DisasContext *ctx)
5777 {
5778 #if defined(CONFIG_USER_ONLY)
5779 GEN_PRIV(ctx);
5780 #else
5781 CHK_SV(ctx);
5782
5783 switch (rB(ctx->opcode)) {
5784 case 0:
5785 gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
5786 cpu_gpr[rS(ctx->opcode)]);
5787 break;
5788 case 1:
5789 gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
5790 cpu_gpr[rS(ctx->opcode)]);
5791 break;
5792 default:
5793 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5794 break;
5795 }
5796 #endif /* defined(CONFIG_USER_ONLY) */
5797 }
5798
5799 /* TLB management - PowerPC 440 implementation */
5800
5801 /* tlbre */
5802 static void gen_tlbre_440(DisasContext *ctx)
5803 {
5804 #if defined(CONFIG_USER_ONLY)
5805 GEN_PRIV(ctx);
5806 #else
5807 CHK_SV(ctx);
5808
5809 switch (rB(ctx->opcode)) {
5810 case 0:
5811 case 1:
5812 case 2:
5813 {
5814 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5815 gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
5816 t0, cpu_gpr[rA(ctx->opcode)]);
5817 }
5818 break;
5819 default:
5820 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5821 break;
5822 }
5823 #endif /* defined(CONFIG_USER_ONLY) */
5824 }
5825
5826 /* tlbsx - tlbsx. */
5827 static void gen_tlbsx_440(DisasContext *ctx)
5828 {
5829 #if defined(CONFIG_USER_ONLY)
5830 GEN_PRIV(ctx);
5831 #else
5832 TCGv t0;
5833
5834 CHK_SV(ctx);
5835 t0 = tcg_temp_new();
5836 gen_addr_reg_index(ctx, t0);
5837 gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5838 if (Rc(ctx->opcode)) {
5839 TCGLabel *l1 = gen_new_label();
5840 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5841 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5842 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5843 gen_set_label(l1);
5844 }
5845 #endif /* defined(CONFIG_USER_ONLY) */
5846 }
5847
5848 /* tlbwe */
5849 static void gen_tlbwe_440(DisasContext *ctx)
5850 {
5851 #if defined(CONFIG_USER_ONLY)
5852 GEN_PRIV(ctx);
5853 #else
5854 CHK_SV(ctx);
5855 switch (rB(ctx->opcode)) {
5856 case 0:
5857 case 1:
5858 case 2:
5859 {
5860 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5861 gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
5862 cpu_gpr[rS(ctx->opcode)]);
5863 }
5864 break;
5865 default:
5866 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5867 break;
5868 }
5869 #endif /* defined(CONFIG_USER_ONLY) */
5870 }
5871
5872 /* TLB management - PowerPC BookE 2.06 implementation */
5873
5874 /* tlbre */
5875 static void gen_tlbre_booke206(DisasContext *ctx)
5876 {
5877 #if defined(CONFIG_USER_ONLY)
5878 GEN_PRIV(ctx);
5879 #else
5880 CHK_SV(ctx);
5881 gen_helper_booke206_tlbre(cpu_env);
5882 #endif /* defined(CONFIG_USER_ONLY) */
5883 }
5884
5885 /* tlbsx - tlbsx. */
5886 static void gen_tlbsx_booke206(DisasContext *ctx)
5887 {
5888 #if defined(CONFIG_USER_ONLY)
5889 GEN_PRIV(ctx);
5890 #else
5891 TCGv t0;
5892
5893 CHK_SV(ctx);
5894 if (rA(ctx->opcode)) {
5895 t0 = tcg_temp_new();
5896 tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5897 } else {
5898 t0 = cpu_gpr[rB(ctx->opcode)];
5899 }
5900 gen_helper_booke206_tlbsx(cpu_env, t0);
5901 #endif /* defined(CONFIG_USER_ONLY) */
5902 }
5903
5904 /* tlbwe */
5905 static void gen_tlbwe_booke206(DisasContext *ctx)
5906 {
5907 #if defined(CONFIG_USER_ONLY)
5908 GEN_PRIV(ctx);
5909 #else
5910 CHK_SV(ctx);
5911 gen_helper_booke206_tlbwe(cpu_env);
5912 #endif /* defined(CONFIG_USER_ONLY) */
5913 }
5914
5915 static void gen_tlbivax_booke206(DisasContext *ctx)
5916 {
5917 #if defined(CONFIG_USER_ONLY)
5918 GEN_PRIV(ctx);
5919 #else
5920 TCGv t0;
5921
5922 CHK_SV(ctx);
5923 t0 = tcg_temp_new();
5924 gen_addr_reg_index(ctx, t0);
5925 gen_helper_booke206_tlbivax(cpu_env, t0);
5926 #endif /* defined(CONFIG_USER_ONLY) */
5927 }
5928
5929 static void gen_tlbilx_booke206(DisasContext *ctx)
5930 {
5931 #if defined(CONFIG_USER_ONLY)
5932 GEN_PRIV(ctx);
5933 #else
5934 TCGv t0;
5935
5936 CHK_SV(ctx);
5937 t0 = tcg_temp_new();
5938 gen_addr_reg_index(ctx, t0);
5939
5940 switch ((ctx->opcode >> 21) & 0x3) {
5941 case 0:
5942 gen_helper_booke206_tlbilx0(cpu_env, t0);
5943 break;
5944 case 1:
5945 gen_helper_booke206_tlbilx1(cpu_env, t0);
5946 break;
5947 case 3:
5948 gen_helper_booke206_tlbilx3(cpu_env, t0);
5949 break;
5950 default:
5951 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5952 break;
5953 }
5954 #endif /* defined(CONFIG_USER_ONLY) */
5955 }
5956
5957 /* wrtee */
5958 static void gen_wrtee(DisasContext *ctx)
5959 {
5960 #if defined(CONFIG_USER_ONLY)
5961 GEN_PRIV(ctx);
5962 #else
5963 TCGv t0;
5964
5965 CHK_SV(ctx);
5966 t0 = tcg_temp_new();
5967 tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
5968 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5969 tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
5970 gen_ppc_maybe_interrupt(ctx);
5971 /*
5972 * Stop translation to have a chance to raise an exception if we
5973 * just set msr_ee to 1
5974 */
5975 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5976 #endif /* defined(CONFIG_USER_ONLY) */
5977 }
5978
5979 /* wrteei */
5980 static void gen_wrteei(DisasContext *ctx)
5981 {
5982 #if defined(CONFIG_USER_ONLY)
5983 GEN_PRIV(ctx);
5984 #else
5985 CHK_SV(ctx);
5986 if (ctx->opcode & 0x00008000) {
5987 tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
5988 gen_ppc_maybe_interrupt(ctx);
5989 /* Stop translation to have a chance to raise an exception */
5990 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5991 } else {
5992 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5993 }
5994 #endif /* defined(CONFIG_USER_ONLY) */
5995 }
5996
5997 /* PowerPC 440 specific instructions */
5998
5999 /* dlmzb */
6000 static void gen_dlmzb(DisasContext *ctx)
6001 {
6002 TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
6003 gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
6004 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
6005 }
6006
6007 /* mbar replaces eieio on 440 */
6008 static void gen_mbar(DisasContext *ctx)
6009 {
6010 /* interpreted as no-op */
6011 }
6012
6013 /* msync replaces sync on 440 */
6014 static void gen_msync_4xx(DisasContext *ctx)
6015 {
6016 /* Only e500 seems to treat reserved bits as invalid */
6017 if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6018 (ctx->opcode & 0x03FFF801)) {
6019 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6020 }
6021 /* otherwise interpreted as no-op */
6022 }
6023
6024 /* icbt */
6025 static void gen_icbt_440(DisasContext *ctx)
6026 {
6027 /*
6028 * interpreted as no-op
6029 * XXX: specification say this is treated as a load by the MMU but
6030 * does not generate any exception
6031 */
6032 }
6033
6034 #if defined(TARGET_PPC64)
6035 static void gen_maddld(DisasContext *ctx)
6036 {
6037 TCGv_i64 t1 = tcg_temp_new_i64();
6038
6039 tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6040 tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6041 }
6042
6043 /* maddhd maddhdu */
6044 static void gen_maddhd_maddhdu(DisasContext *ctx)
6045 {
6046 TCGv_i64 lo = tcg_temp_new_i64();
6047 TCGv_i64 hi = tcg_temp_new_i64();
6048 TCGv_i64 t1 = tcg_temp_new_i64();
6049
6050 if (Rc(ctx->opcode)) {
6051 tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6052 cpu_gpr[rB(ctx->opcode)]);
6053 tcg_gen_movi_i64(t1, 0);
6054 } else {
6055 tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6056 cpu_gpr[rB(ctx->opcode)]);
6057 tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6058 }
6059 tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6060 cpu_gpr[rC(ctx->opcode)], t1);
6061 }
6062 #endif /* defined(TARGET_PPC64) */
6063
6064 static void gen_tbegin(DisasContext *ctx)
6065 {
6066 if (unlikely(!ctx->tm_enabled)) {
6067 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6068 return;
6069 }
6070 gen_helper_tbegin(cpu_env);
6071 }
6072
6073 #define GEN_TM_NOOP(name) \
6074 static inline void gen_##name(DisasContext *ctx) \
6075 { \
6076 if (unlikely(!ctx->tm_enabled)) { \
6077 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
6078 return; \
6079 } \
6080 /* \
6081 * Because tbegin always fails in QEMU, these user \
6082 * space instructions all have a simple implementation: \
6083 * \
6084 * CR[0] = 0b0 || MSR[TS] || 0b0 \
6085 * = 0b0 || 0b00 || 0b0 \
6086 */ \
6087 tcg_gen_movi_i32(cpu_crf[0], 0); \
6088 }
6089
6090 GEN_TM_NOOP(tend);
6091 GEN_TM_NOOP(tabort);
6092 GEN_TM_NOOP(tabortwc);
6093 GEN_TM_NOOP(tabortwci);
6094 GEN_TM_NOOP(tabortdc);
6095 GEN_TM_NOOP(tabortdci);
6096 GEN_TM_NOOP(tsr);
6097
6098 static inline void gen_cp_abort(DisasContext *ctx)
6099 {
6100 /* Do Nothing */
6101 }
6102
6103 #define GEN_CP_PASTE_NOOP(name) \
6104 static inline void gen_##name(DisasContext *ctx) \
6105 { \
6106 /* \
6107 * Generate invalid exception until we have an \
6108 * implementation of the copy paste facility \
6109 */ \
6110 gen_invalid(ctx); \
6111 }
6112
6113 GEN_CP_PASTE_NOOP(copy)
6114 GEN_CP_PASTE_NOOP(paste)
6115
6116 static void gen_tcheck(DisasContext *ctx)
6117 {
6118 if (unlikely(!ctx->tm_enabled)) {
6119 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6120 return;
6121 }
6122 /*
6123 * Because tbegin always fails, the tcheck implementation is
6124 * simple:
6125 *
6126 * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6127 * = 0b1 || 0b00 || 0b0
6128 */
6129 tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6130 }
6131
6132 #if defined(CONFIG_USER_ONLY)
6133 #define GEN_TM_PRIV_NOOP(name) \
6134 static inline void gen_##name(DisasContext *ctx) \
6135 { \
6136 gen_priv_opc(ctx); \
6137 }
6138
6139 #else
6140
6141 #define GEN_TM_PRIV_NOOP(name) \
6142 static inline void gen_##name(DisasContext *ctx) \
6143 { \
6144 CHK_SV(ctx); \
6145 if (unlikely(!ctx->tm_enabled)) { \
6146 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
6147 return; \
6148 } \
6149 /* \
6150 * Because tbegin always fails, the implementation is \
6151 * simple: \
6152 * \
6153 * CR[0] = 0b0 || MSR[TS] || 0b0 \
6154 * = 0b0 || 0b00 | 0b0 \
6155 */ \
6156 tcg_gen_movi_i32(cpu_crf[0], 0); \
6157 }
6158
6159 #endif
6160
6161 GEN_TM_PRIV_NOOP(treclaim);
6162 GEN_TM_PRIV_NOOP(trechkpt);
6163
6164 static inline void get_fpr(TCGv_i64 dst, int regno)
6165 {
6166 tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
6167 }
6168
6169 static inline void set_fpr(int regno, TCGv_i64 src)
6170 {
6171 tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
6172 /*
6173 * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6174 * corresponding to the target FPR was undefined. However,
6175 * most (if not all) real hardware were setting the result to 0.
6176 * Starting at ISA v3.1, the result for doubleword 1 is now defined
6177 * to be 0.
6178 */
6179 tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
6180 }
6181
6182 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6183 {
6184 tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
6185 }
6186
6187 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6188 {
6189 tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
6190 }
6191
6192 /*
6193 * Helpers for decodetree used by !function for decoding arguments.
6194 */
6195 static int times_2(DisasContext *ctx, int x)
6196 {
6197 return x * 2;
6198 }
6199
6200 static int times_4(DisasContext *ctx, int x)
6201 {
6202 return x * 4;
6203 }
6204
6205 static int times_16(DisasContext *ctx, int x)
6206 {
6207 return x * 16;
6208 }
6209
6210 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6211 {
6212 return deposit64(0xfffffffffffffe00, 3, 6, x);
6213 }
6214
6215 /*
6216 * Helpers for trans_* functions to check for specific insns flags.
6217 * Use token pasting to ensure that we use the proper flag with the
6218 * proper variable.
6219 */
6220 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6221 do { \
6222 if (((CTX)->insns_flags & PPC_##NAME) == 0) { \
6223 return false; \
6224 } \
6225 } while (0)
6226
6227 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6228 do { \
6229 if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6230 return false; \
6231 } \
6232 } while (0)
6233
6234 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6235 #if TARGET_LONG_BITS == 32
6236 # define REQUIRE_64BIT(CTX) return false
6237 #else
6238 # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B)
6239 #endif
6240
6241 #define REQUIRE_VECTOR(CTX) \
6242 do { \
6243 if (unlikely(!(CTX)->altivec_enabled)) { \
6244 gen_exception((CTX), POWERPC_EXCP_VPU); \
6245 return true; \
6246 } \
6247 } while (0)
6248
6249 #define REQUIRE_VSX(CTX) \
6250 do { \
6251 if (unlikely(!(CTX)->vsx_enabled)) { \
6252 gen_exception((CTX), POWERPC_EXCP_VSXU); \
6253 return true; \
6254 } \
6255 } while (0)
6256
6257 #define REQUIRE_FPU(ctx) \
6258 do { \
6259 if (unlikely(!(ctx)->fpu_enabled)) { \
6260 gen_exception((ctx), POWERPC_EXCP_FPU); \
6261 return true; \
6262 } \
6263 } while (0)
6264
6265 #if !defined(CONFIG_USER_ONLY)
6266 #define REQUIRE_SV(CTX) \
6267 do { \
6268 if (unlikely((CTX)->pr)) { \
6269 gen_priv_opc(CTX); \
6270 return true; \
6271 } \
6272 } while (0)
6273
6274 #define REQUIRE_HV(CTX) \
6275 do { \
6276 if (unlikely((CTX)->pr || !(CTX)->hv)) { \
6277 gen_priv_opc(CTX); \
6278 return true; \
6279 } \
6280 } while (0)
6281 #else
6282 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6283 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6284 #endif
6285
6286 /*
6287 * Helpers for implementing sets of trans_* functions.
6288 * Defer the implementation of NAME to FUNC, with optional extra arguments.
6289 */
6290 #define TRANS(NAME, FUNC, ...) \
6291 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6292 { return FUNC(ctx, a, __VA_ARGS__); }
6293 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6294 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6295 { \
6296 REQUIRE_INSNS_FLAGS(ctx, FLAGS); \
6297 return FUNC(ctx, a, __VA_ARGS__); \
6298 }
6299 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6300 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6301 { \
6302 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
6303 return FUNC(ctx, a, __VA_ARGS__); \
6304 }
6305
6306 #define TRANS64(NAME, FUNC, ...) \
6307 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6308 { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6309 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6310 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6311 { \
6312 REQUIRE_64BIT(ctx); \
6313 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
6314 return FUNC(ctx, a, __VA_ARGS__); \
6315 }
6316
6317 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6318
6319
6320 #include "decode-insn32.c.inc"
6321 #include "decode-insn64.c.inc"
6322 #include "power8-pmu-regs.c.inc"
6323
6324 /*
6325 * Incorporate CIA into the constant when R=1.
6326 * Validate that when R=1, RA=0.
6327 */
6328 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6329 {
6330 d->rt = a->rt;
6331 d->ra = a->ra;
6332 d->si = a->si;
6333 if (a->r) {
6334 if (unlikely(a->ra != 0)) {
6335 gen_invalid(ctx);
6336 return false;
6337 }
6338 d->si += ctx->cia;
6339 }
6340 return true;
6341 }
6342
6343 #include "translate/fixedpoint-impl.c.inc"
6344
6345 #include "translate/fp-impl.c.inc"
6346
6347 #include "translate/vmx-impl.c.inc"
6348
6349 #include "translate/vsx-impl.c.inc"
6350
6351 #include "translate/dfp-impl.c.inc"
6352
6353 #include "translate/spe-impl.c.inc"
6354
6355 #include "translate/branch-impl.c.inc"
6356
6357 #include "translate/processor-ctrl-impl.c.inc"
6358
6359 #include "translate/storage-ctrl-impl.c.inc"
6360
6361 /* Handles lfdp */
6362 static void gen_dform39(DisasContext *ctx)
6363 {
6364 if ((ctx->opcode & 0x3) == 0) {
6365 if (ctx->insns_flags2 & PPC2_ISA205) {
6366 return gen_lfdp(ctx);
6367 }
6368 }
6369 return gen_invalid(ctx);
6370 }
6371
6372 /* Handles stfdp */
6373 static void gen_dform3D(DisasContext *ctx)
6374 {
6375 if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6376 /* stfdp */
6377 if (ctx->insns_flags2 & PPC2_ISA205) {
6378 return gen_stfdp(ctx);
6379 }
6380 }
6381 return gen_invalid(ctx);
6382 }
6383
6384 #if defined(TARGET_PPC64)
6385 /* brd */
6386 static void gen_brd(DisasContext *ctx)
6387 {
6388 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6389 }
6390
6391 /* brw */
6392 static void gen_brw(DisasContext *ctx)
6393 {
6394 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6395 tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6396
6397 }
6398
6399 /* brh */
6400 static void gen_brh(DisasContext *ctx)
6401 {
6402 TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6403 TCGv_i64 t1 = tcg_temp_new_i64();
6404 TCGv_i64 t2 = tcg_temp_new_i64();
6405
6406 tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6407 tcg_gen_and_i64(t2, t1, mask);
6408 tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6409 tcg_gen_shli_i64(t1, t1, 8);
6410 tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6411 }
6412 #endif
6413
6414 static opcode_t opcodes[] = {
6415 #if defined(TARGET_PPC64)
6416 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6417 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6418 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6419 #endif
6420 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6421 #if defined(TARGET_PPC64)
6422 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6423 #endif
6424 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6425 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6426 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6427 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6428 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6429 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6430 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6431 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6432 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6433 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6434 #if defined(TARGET_PPC64)
6435 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6436 #endif
6437 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6438 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6439 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6440 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6441 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6442 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6443 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6444 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6445 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6446 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6447 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6448 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6449 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6450 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6451 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6452 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6453 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6454 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6455 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6456 #if defined(TARGET_PPC64)
6457 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6458 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6459 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6460 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6461 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6462 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6463 #endif
6464 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6465 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6466 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6467 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6468 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6469 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6470 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6471 #if defined(TARGET_PPC64)
6472 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6473 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6474 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6475 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6476 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6477 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6478 PPC_NONE, PPC2_ISA300),
6479 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6480 PPC_NONE, PPC2_ISA300),
6481 #endif
6482 /* handles lfdp, lxsd, lxssp */
6483 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6484 /* handles stfdp, stxsd, stxssp */
6485 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6486 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6487 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6488 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6489 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6490 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6491 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6492 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6493 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6494 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6495 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6496 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6497 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6498 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6499 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6500 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6501 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6502 #if defined(TARGET_PPC64)
6503 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6504 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6505 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6506 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6507 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6508 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6509 #endif
6510 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6511 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6512 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6513 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6514 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6515 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6516 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6517 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6518 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6519 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6520 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6521 #if defined(TARGET_PPC64)
6522 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6523 #if !defined(CONFIG_USER_ONLY)
6524 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6525 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6526 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6527 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6528 #endif
6529 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6530 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6531 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6532 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6533 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6534 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6535 #endif
6536 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6537 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6538 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6539 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6540 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6541 #if defined(TARGET_PPC64)
6542 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6543 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6544 #endif
6545 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6546 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6547 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6548 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6549 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6550 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6551 #if defined(TARGET_PPC64)
6552 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6553 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6554 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6555 #endif
6556 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6557 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6558 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6559 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6560 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6561 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6562 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6563 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6564 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6565 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6566 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6567 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6568 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6569 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6570 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6571 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6572 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6573 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6574 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6575 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6576 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6577 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6578 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6579 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6580 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6581 #if defined(TARGET_PPC64)
6582 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6583 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6584 PPC_SEGMENT_64B),
6585 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6586 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6587 PPC_SEGMENT_64B),
6588 #endif
6589 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6590 /*
6591 * XXX Those instructions will need to be handled differently for
6592 * different ISA versions
6593 */
6594 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6595 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6596 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6597 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6598 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6599 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6600 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6601 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6602 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6603 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6604 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6605 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6606 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6607 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6608 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6609 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6610 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6611 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6612 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6613 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6614 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6615 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6616 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6617 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6618 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6619 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6620 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6621 PPC_NONE, PPC2_BOOKE206),
6622 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6623 PPC_NONE, PPC2_BOOKE206),
6624 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6625 PPC_NONE, PPC2_BOOKE206),
6626 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6627 PPC_NONE, PPC2_BOOKE206),
6628 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6629 PPC_NONE, PPC2_BOOKE206),
6630 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6631 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6632 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6633 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6634 PPC_BOOKE, PPC2_BOOKE206),
6635 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6636 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6637 PPC_BOOKE, PPC2_BOOKE206),
6638 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6639 PPC_440_SPEC),
6640 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6641 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6642 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6643 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6644 #if defined(TARGET_PPC64)
6645 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6646 PPC2_ISA300),
6647 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6648 #endif
6649
6650 #undef GEN_INT_ARITH_ADD
6651 #undef GEN_INT_ARITH_ADD_CONST
6652 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
6653 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6654 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
6655 add_ca, compute_ca, compute_ov) \
6656 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6657 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6658 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6659 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6660 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6661 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6662 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6663 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6664 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6665 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6666 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6667 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6668
6669 #undef GEN_INT_ARITH_DIVW
6670 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
6671 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6672 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6673 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6674 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6675 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6676 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6677 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6678 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6679 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6680 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6681 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6682
6683 #if defined(TARGET_PPC64)
6684 #undef GEN_INT_ARITH_DIVD
6685 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
6686 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6687 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6688 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6689 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6690 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6691
6692 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6693 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6694 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6695 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6696 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6697 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6698
6699 #undef GEN_INT_ARITH_MUL_HELPER
6700 #define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
6701 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6702 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6703 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6704 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6705 #endif
6706
6707 #undef GEN_INT_ARITH_SUBF
6708 #undef GEN_INT_ARITH_SUBF_CONST
6709 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
6710 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6711 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
6712 add_ca, compute_ca, compute_ov) \
6713 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6714 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
6715 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
6716 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
6717 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
6718 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
6719 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
6720 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
6721 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
6722 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
6723 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
6724
6725 #undef GEN_LOGICAL1
6726 #undef GEN_LOGICAL2
6727 #define GEN_LOGICAL2(name, tcg_op, opc, type) \
6728 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
6729 #define GEN_LOGICAL1(name, tcg_op, opc, type) \
6730 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
6731 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
6732 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
6733 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
6734 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
6735 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
6736 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
6737 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
6738 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
6739 #if defined(TARGET_PPC64)
6740 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
6741 #endif
6742
6743 #if defined(TARGET_PPC64)
6744 #undef GEN_PPC64_R2
6745 #undef GEN_PPC64_R4
6746 #define GEN_PPC64_R2(name, opc1, opc2) \
6747 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6748 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
6749 PPC_64B)
6750 #define GEN_PPC64_R4(name, opc1, opc2) \
6751 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6752 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \
6753 PPC_64B), \
6754 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
6755 PPC_64B), \
6756 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \
6757 PPC_64B)
6758 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6759 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6760 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6761 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6762 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6763 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6764 #endif
6765
6766 #undef GEN_LDX_E
6767 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
6768 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6769
6770 #if defined(TARGET_PPC64)
6771 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6772
6773 /* HV/P7 and later only */
6774 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6775 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6776 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6777 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6778 #endif
6779 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6780 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6781
6782 /* External PID based load */
6783 #undef GEN_LDEPX
6784 #define GEN_LDEPX(name, ldop, opc2, opc3) \
6785 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
6786 0x00000001, PPC_NONE, PPC2_BOOKE206),
6787
6788 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6789 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6790 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6791 #if defined(TARGET_PPC64)
6792 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6793 #endif
6794
6795 #undef GEN_STX_E
6796 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
6797 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6798
6799 #if defined(TARGET_PPC64)
6800 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6801 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6802 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6803 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6804 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6805 #endif
6806 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6807 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6808
6809 #undef GEN_STEPX
6810 #define GEN_STEPX(name, ldop, opc2, opc3) \
6811 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
6812 0x00000001, PPC_NONE, PPC2_BOOKE206),
6813
6814 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6815 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6816 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6817 #if defined(TARGET_PPC64)
6818 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6819 #endif
6820
6821 #undef GEN_CRLOGIC
6822 #define GEN_CRLOGIC(name, tcg_op, opc) \
6823 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6824 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6825 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6826 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6827 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6828 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6829 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6830 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6831 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6832
6833 #undef GEN_MAC_HANDLER
6834 #define GEN_MAC_HANDLER(name, opc2, opc3) \
6835 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6836 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6837 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6838 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6839 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6840 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6841 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6842 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6843 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6844 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6845 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6846 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6847 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6848 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6849 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6850 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6851 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6852 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6853 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6854 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6855 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6856 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6857 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6858 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6859 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6860 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6861 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6862 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6863 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6864 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6865 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6866 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6867 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6868 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6869 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6870 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6871 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6872 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6873 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6874 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6875 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6876 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6877 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6878
6879 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6880 PPC_NONE, PPC2_TM),
6881 GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
6882 PPC_NONE, PPC2_TM),
6883 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6884 PPC_NONE, PPC2_TM),
6885 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6886 PPC_NONE, PPC2_TM),
6887 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6888 PPC_NONE, PPC2_TM),
6889 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6890 PPC_NONE, PPC2_TM),
6891 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6892 PPC_NONE, PPC2_TM),
6893 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6894 PPC_NONE, PPC2_TM),
6895 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6896 PPC_NONE, PPC2_TM),
6897 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6898 PPC_NONE, PPC2_TM),
6899 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
6900 PPC_NONE, PPC2_TM),
6901
6902 #include "translate/fp-ops.c.inc"
6903
6904 #include "translate/vmx-ops.c.inc"
6905
6906 #include "translate/vsx-ops.c.inc"
6907
6908 #include "translate/spe-ops.c.inc"
6909 };
6910
6911 /*****************************************************************************/
6912 /* Opcode types */
6913 enum {
6914 PPC_DIRECT = 0, /* Opcode routine */
6915 PPC_INDIRECT = 1, /* Indirect opcode table */
6916 };
6917
6918 #define PPC_OPCODE_MASK 0x3
6919
6920 static inline int is_indirect_opcode(void *handler)
6921 {
6922 return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
6923 }
6924
6925 static inline opc_handler_t **ind_table(void *handler)
6926 {
6927 return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
6928 }
6929
6930 /* Instruction table creation */
6931 /* Opcodes tables creation */
6932 static void fill_new_table(opc_handler_t **table, int len)
6933 {
6934 int i;
6935
6936 for (i = 0; i < len; i++) {
6937 table[i] = &invalid_handler;
6938 }
6939 }
6940
6941 static int create_new_table(opc_handler_t **table, unsigned char idx)
6942 {
6943 opc_handler_t **tmp;
6944
6945 tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
6946 fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
6947 table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
6948
6949 return 0;
6950 }
6951
6952 static int insert_in_table(opc_handler_t **table, unsigned char idx,
6953 opc_handler_t *handler)
6954 {
6955 if (table[idx] != &invalid_handler) {
6956 return -1;
6957 }
6958 table[idx] = handler;
6959
6960 return 0;
6961 }
6962
6963 static int register_direct_insn(opc_handler_t **ppc_opcodes,
6964 unsigned char idx, opc_handler_t *handler)
6965 {
6966 if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
6967 printf("*** ERROR: opcode %02x already assigned in main "
6968 "opcode table\n", idx);
6969 return -1;
6970 }
6971
6972 return 0;
6973 }
6974
6975 static int register_ind_in_table(opc_handler_t **table,
6976 unsigned char idx1, unsigned char idx2,
6977 opc_handler_t *handler)
6978 {
6979 if (table[idx1] == &invalid_handler) {
6980 if (create_new_table(table, idx1) < 0) {
6981 printf("*** ERROR: unable to create indirect table "
6982 "idx=%02x\n", idx1);
6983 return -1;
6984 }
6985 } else {
6986 if (!is_indirect_opcode(table[idx1])) {
6987 printf("*** ERROR: idx %02x already assigned to a direct "
6988 "opcode\n", idx1);
6989 return -1;
6990 }
6991 }
6992 if (handler != NULL &&
6993 insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
6994 printf("*** ERROR: opcode %02x already assigned in "
6995 "opcode table %02x\n", idx2, idx1);
6996 return -1;
6997 }
6998
6999 return 0;
7000 }
7001
7002 static int register_ind_insn(opc_handler_t **ppc_opcodes,
7003 unsigned char idx1, unsigned char idx2,
7004 opc_handler_t *handler)
7005 {
7006 return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
7007 }
7008
7009 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
7010 unsigned char idx1, unsigned char idx2,
7011 unsigned char idx3, opc_handler_t *handler)
7012 {
7013 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7014 printf("*** ERROR: unable to join indirect table idx "
7015 "[%02x-%02x]\n", idx1, idx2);
7016 return -1;
7017 }
7018 if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7019 handler) < 0) {
7020 printf("*** ERROR: unable to insert opcode "
7021 "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7022 return -1;
7023 }
7024
7025 return 0;
7026 }
7027
7028 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7029 unsigned char idx1, unsigned char idx2,
7030 unsigned char idx3, unsigned char idx4,
7031 opc_handler_t *handler)
7032 {
7033 opc_handler_t **table;
7034
7035 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7036 printf("*** ERROR: unable to join indirect table idx "
7037 "[%02x-%02x]\n", idx1, idx2);
7038 return -1;
7039 }
7040 table = ind_table(ppc_opcodes[idx1]);
7041 if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7042 printf("*** ERROR: unable to join 2nd-level indirect table idx "
7043 "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7044 return -1;
7045 }
7046 table = ind_table(table[idx2]);
7047 if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7048 printf("*** ERROR: unable to insert opcode "
7049 "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7050 return -1;
7051 }
7052 return 0;
7053 }
7054 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7055 {
7056 if (insn->opc2 != 0xFF) {
7057 if (insn->opc3 != 0xFF) {
7058 if (insn->opc4 != 0xFF) {
7059 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7060 insn->opc3, insn->opc4,
7061 &insn->handler) < 0) {
7062 return -1;
7063 }
7064 } else {
7065 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7066 insn->opc3, &insn->handler) < 0) {
7067 return -1;
7068 }
7069 }
7070 } else {
7071 if (register_ind_insn(ppc_opcodes, insn->opc1,
7072 insn->opc2, &insn->handler) < 0) {
7073 return -1;
7074 }
7075 }
7076 } else {
7077 if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7078 return -1;
7079 }
7080 }
7081
7082 return 0;
7083 }
7084
7085 static int test_opcode_table(opc_handler_t **table, int len)
7086 {
7087 int i, count, tmp;
7088
7089 for (i = 0, count = 0; i < len; i++) {
7090 /* Consistency fixup */
7091 if (table[i] == NULL) {
7092 table[i] = &invalid_handler;
7093 }
7094 if (table[i] != &invalid_handler) {
7095 if (is_indirect_opcode(table[i])) {
7096 tmp = test_opcode_table(ind_table(table[i]),
7097 PPC_CPU_INDIRECT_OPCODES_LEN);
7098 if (tmp == 0) {
7099 free(table[i]);
7100 table[i] = &invalid_handler;
7101 } else {
7102 count++;
7103 }
7104 } else {
7105 count++;
7106 }
7107 }
7108 }
7109
7110 return count;
7111 }
7112
7113 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7114 {
7115 if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7116 printf("*** WARNING: no opcode defined !\n");
7117 }
7118 }
7119
7120 /*****************************************************************************/
7121 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7122 {
7123 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7124 opcode_t *opc;
7125
7126 fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7127 for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7128 if (((opc->handler.type & pcc->insns_flags) != 0) ||
7129 ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7130 if (register_insn(cpu->opcodes, opc) < 0) {
7131 error_setg(errp, "ERROR initializing PowerPC instruction "
7132 "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7133 opc->opc3);
7134 return;
7135 }
7136 }
7137 }
7138 fix_opcode_tables(cpu->opcodes);
7139 fflush(stdout);
7140 fflush(stderr);
7141 }
7142
7143 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7144 {
7145 opc_handler_t **table, **table_2;
7146 int i, j, k;
7147
7148 for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7149 if (cpu->opcodes[i] == &invalid_handler) {
7150 continue;
7151 }
7152 if (is_indirect_opcode(cpu->opcodes[i])) {
7153 table = ind_table(cpu->opcodes[i]);
7154 for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7155 if (table[j] == &invalid_handler) {
7156 continue;
7157 }
7158 if (is_indirect_opcode(table[j])) {
7159 table_2 = ind_table(table[j]);
7160 for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7161 if (table_2[k] != &invalid_handler &&
7162 is_indirect_opcode(table_2[k])) {
7163 g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7164 ~PPC_INDIRECT));
7165 }
7166 }
7167 g_free((opc_handler_t *)((uintptr_t)table[j] &
7168 ~PPC_INDIRECT));
7169 }
7170 }
7171 g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7172 ~PPC_INDIRECT));
7173 }
7174 }
7175 }
7176
7177 int ppc_fixup_cpu(PowerPCCPU *cpu)
7178 {
7179 CPUPPCState *env = &cpu->env;
7180
7181 /*
7182 * TCG doesn't (yet) emulate some groups of instructions that are
7183 * implemented on some otherwise supported CPUs (e.g. VSX and
7184 * decimal floating point instructions on POWER7). We remove
7185 * unsupported instruction groups from the cpu state's instruction
7186 * masks and hope the guest can cope. For at least the pseries
7187 * machine, the unavailability of these instructions can be
7188 * advertised to the guest via the device tree.
7189 */
7190 if ((env->insns_flags & ~PPC_TCG_INSNS)
7191 || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7192 warn_report("Disabling some instructions which are not "
7193 "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7194 env->insns_flags & ~PPC_TCG_INSNS,
7195 env->insns_flags2 & ~PPC_TCG_INSNS2);
7196 }
7197 env->insns_flags &= PPC_TCG_INSNS;
7198 env->insns_flags2 &= PPC_TCG_INSNS2;
7199 return 0;
7200 }
7201
7202 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7203 {
7204 opc_handler_t **table, *handler;
7205 uint32_t inval;
7206
7207 ctx->opcode = insn;
7208
7209 LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7210 insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7211 ctx->le_mode ? "little" : "big");
7212
7213 table = cpu->opcodes;
7214 handler = table[opc1(insn)];
7215 if (is_indirect_opcode(handler)) {
7216 table = ind_table(handler);
7217 handler = table[opc2(insn)];
7218 if (is_indirect_opcode(handler)) {
7219 table = ind_table(handler);
7220 handler = table[opc3(insn)];
7221 if (is_indirect_opcode(handler)) {
7222 table = ind_table(handler);
7223 handler = table[opc4(insn)];
7224 }
7225 }
7226 }
7227
7228 /* Is opcode *REALLY* valid ? */
7229 if (unlikely(handler->handler == &gen_invalid)) {
7230 qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7231 "%02x - %02x - %02x - %02x (%08x) "
7232 TARGET_FMT_lx "\n",
7233 opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7234 insn, ctx->cia);
7235 return false;
7236 }
7237
7238 if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7239 && Rc(insn))) {
7240 inval = handler->inval2;
7241 } else {
7242 inval = handler->inval1;
7243 }
7244
7245 if (unlikely((insn & inval) != 0)) {
7246 qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7247 "%02x - %02x - %02x - %02x (%08x) "
7248 TARGET_FMT_lx "\n", insn & inval,
7249 opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7250 insn, ctx->cia);
7251 return false;
7252 }
7253
7254 handler->handler(ctx);
7255 return true;
7256 }
7257
7258 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7259 {
7260 DisasContext *ctx = container_of(dcbase, DisasContext, base);
7261 CPUPPCState *env = cs->env_ptr;
7262 uint32_t hflags = ctx->base.tb->flags;
7263
7264 ctx->spr_cb = env->spr_cb;
7265 ctx->pr = (hflags >> HFLAGS_PR) & 1;
7266 ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7267 ctx->dr = (hflags >> HFLAGS_DR) & 1;
7268 ctx->hv = (hflags >> HFLAGS_HV) & 1;
7269 ctx->insns_flags = env->insns_flags;
7270 ctx->insns_flags2 = env->insns_flags2;
7271 ctx->access_type = -1;
7272 ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7273 ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7274 ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7275 ctx->flags = env->flags;
7276 #if defined(TARGET_PPC64)
7277 ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7278 ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7279 #endif
7280 ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7281 || env->mmu_model & POWERPC_MMU_64;
7282
7283 ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7284 ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7285 ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7286 ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7287 ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7288 ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7289 ctx->hr = (hflags >> HFLAGS_HR) & 1;
7290 ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7291 ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7292 ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
7293 ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
7294 ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7295
7296 ctx->singlestep_enabled = 0;
7297 if ((hflags >> HFLAGS_SE) & 1) {
7298 ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7299 ctx->base.max_insns = 1;
7300 }
7301 if ((hflags >> HFLAGS_BE) & 1) {
7302 ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7303 }
7304 }
7305
7306 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7307 {
7308 }
7309
7310 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7311 {
7312 tcg_gen_insn_start(dcbase->pc_next);
7313 }
7314
7315 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7316 {
7317 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7318 return opc1(insn) == 1;
7319 }
7320
7321 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7322 {
7323 DisasContext *ctx = container_of(dcbase, DisasContext, base);
7324 PowerPCCPU *cpu = POWERPC_CPU(cs);
7325 CPUPPCState *env = cs->env_ptr;
7326 target_ulong pc;
7327 uint32_t insn;
7328 bool ok;
7329
7330 LOG_DISAS("----------------\n");
7331 LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7332 ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7333
7334 ctx->cia = pc = ctx->base.pc_next;
7335 insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7336 ctx->base.pc_next = pc += 4;
7337
7338 if (!is_prefix_insn(ctx, insn)) {
7339 ok = (decode_insn32(ctx, insn) ||
7340 decode_legacy(cpu, ctx, insn));
7341 } else if ((pc & 63) == 0) {
7342 /*
7343 * Power v3.1, section 1.9 Exceptions:
7344 * attempt to execute a prefixed instruction that crosses a
7345 * 64-byte address boundary (system alignment error).
7346 */
7347 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7348 ok = true;
7349 } else {
7350 uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7351 need_byteswap(ctx));
7352 ctx->base.pc_next = pc += 4;
7353 ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7354 }
7355 if (!ok) {
7356 gen_invalid(ctx);
7357 }
7358
7359 /* End the TB when crossing a page boundary. */
7360 if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7361 ctx->base.is_jmp = DISAS_TOO_MANY;
7362 }
7363 }
7364
7365 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7366 {
7367 DisasContext *ctx = container_of(dcbase, DisasContext, base);
7368 DisasJumpType is_jmp = ctx->base.is_jmp;
7369 target_ulong nip = ctx->base.pc_next;
7370
7371 if (is_jmp == DISAS_NORETURN) {
7372 /* We have already exited the TB. */
7373 return;
7374 }
7375
7376 /* Honor single stepping. */
7377 if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
7378 && (nip <= 0x100 || nip > 0xf00)) {
7379 switch (is_jmp) {
7380 case DISAS_TOO_MANY:
7381 case DISAS_EXIT_UPDATE:
7382 case DISAS_CHAIN_UPDATE:
7383 gen_update_nip(ctx, nip);
7384 break;
7385 case DISAS_EXIT:
7386 case DISAS_CHAIN:
7387 break;
7388 default:
7389 g_assert_not_reached();
7390 }
7391
7392 gen_debug_exception(ctx);
7393 return;
7394 }
7395
7396 switch (is_jmp) {
7397 case DISAS_TOO_MANY:
7398 if (use_goto_tb(ctx, nip)) {
7399 pmu_count_insns(ctx);
7400 tcg_gen_goto_tb(0);
7401 gen_update_nip(ctx, nip);
7402 tcg_gen_exit_tb(ctx->base.tb, 0);
7403 break;
7404 }
7405 /* fall through */
7406 case DISAS_CHAIN_UPDATE:
7407 gen_update_nip(ctx, nip);
7408 /* fall through */
7409 case DISAS_CHAIN:
7410 /*
7411 * tcg_gen_lookup_and_goto_ptr will exit the TB if
7412 * CF_NO_GOTO_PTR is set. Count insns now.
7413 */
7414 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7415 pmu_count_insns(ctx);
7416 }
7417
7418 tcg_gen_lookup_and_goto_ptr();
7419 break;
7420
7421 case DISAS_EXIT_UPDATE:
7422 gen_update_nip(ctx, nip);
7423 /* fall through */
7424 case DISAS_EXIT:
7425 pmu_count_insns(ctx);
7426 tcg_gen_exit_tb(NULL, 0);
7427 break;
7428
7429 default:
7430 g_assert_not_reached();
7431 }
7432 }
7433
7434 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7435 CPUState *cs, FILE *logfile)
7436 {
7437 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7438 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7439 }
7440
7441 static const TranslatorOps ppc_tr_ops = {
7442 .init_disas_context = ppc_tr_init_disas_context,
7443 .tb_start = ppc_tr_tb_start,
7444 .insn_start = ppc_tr_insn_start,
7445 .translate_insn = ppc_tr_translate_insn,
7446 .tb_stop = ppc_tr_tb_stop,
7447 .disas_log = ppc_tr_disas_log,
7448 };
7449
7450 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
7451 target_ulong pc, void *host_pc)
7452 {
7453 DisasContext ctx;
7454
7455 translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7456 }