2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
22 #include "exec/exec-all.h"
24 #include "helper_regs.h"
29 #include "exec/helper-proto.h"
30 #include "exec/cpu_ldst.h"
33 /* #define DEBUG_SOFTWARE_TLB */
35 /*****************************************************************************/
36 /* Exception processing */
37 #if !defined(CONFIG_USER_ONLY)
39 static inline void dump_syscall(CPUPPCState
*env
)
41 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
42 " r3=%016" PRIx64
" r4=%016" PRIx64
" r5=%016" PRIx64
43 " r6=%016" PRIx64
" r7=%016" PRIx64
" r8=%016" PRIx64
44 " nip=" TARGET_FMT_lx
"\n",
45 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
46 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
47 ppc_dump_gpr(env
, 6), ppc_dump_gpr(env
, 7),
48 ppc_dump_gpr(env
, 8), env
->nip
);
51 static inline void dump_hcall(CPUPPCState
*env
)
53 qemu_log_mask(CPU_LOG_INT
, "hypercall r3=%016" PRIx64
54 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
55 " r7=%016" PRIx64
" r8=%016" PRIx64
" r9=%016" PRIx64
56 " r10=%016" PRIx64
" r11=%016" PRIx64
" r12=%016" PRIx64
57 " nip=" TARGET_FMT_lx
"\n",
58 ppc_dump_gpr(env
, 3), ppc_dump_gpr(env
, 4),
59 ppc_dump_gpr(env
, 5), ppc_dump_gpr(env
, 6),
60 ppc_dump_gpr(env
, 7), ppc_dump_gpr(env
, 8),
61 ppc_dump_gpr(env
, 9), ppc_dump_gpr(env
, 10),
62 ppc_dump_gpr(env
, 11), ppc_dump_gpr(env
, 12),
66 static int powerpc_reset_wakeup(CPUState
*cs
, CPUPPCState
*env
, int excp
,
69 /* We no longer are in a PM state */
70 env
->resume_as_sreset
= false;
72 /* Pretend to be returning from doze always as we don't lose state */
73 *msr
|= SRR1_WS_NOLOSS
;
75 /* Machine checks are sent normally */
76 if (excp
== POWERPC_EXCP_MCHECK
) {
80 case POWERPC_EXCP_RESET
:
81 *msr
|= SRR1_WAKERESET
;
83 case POWERPC_EXCP_EXTERNAL
:
86 case POWERPC_EXCP_DECR
:
89 case POWERPC_EXCP_SDOOR
:
90 *msr
|= SRR1_WAKEDBELL
;
92 case POWERPC_EXCP_SDOOR_HV
:
93 *msr
|= SRR1_WAKEHDBELL
;
95 case POWERPC_EXCP_HV_MAINT
:
98 case POWERPC_EXCP_HVIRT
:
102 cpu_abort(cs
, "Unsupported exception %d in Power Save mode\n",
105 return POWERPC_EXCP_RESET
;
109 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
110 * taken with the MMU on, and which uses an alternate location (e.g., so the
111 * kernel/hv can map the vectors there with an effective address).
113 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
114 * are delivered in this way. AIL requires the LPCR to be set to enable this
115 * mode, and then a number of conditions have to be true for AIL to apply.
117 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
118 * they specifically want to be in real mode (e.g., the MCE might be signaling
119 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
121 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
122 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
123 * radix mode (LPCR[HR]).
125 * POWER8, POWER9 with LPCR[HR]=0
126 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
127 * +-----------+-------------+---------+-------------+-----+
128 * | a | 00/01/10 | x | x | 0 |
129 * | a | 11 | 0 | 1 | 0 |
130 * | a | 11 | 1 | 1 | a |
131 * | a | 11 | 0 | 0 | a |
132 * +-------------------------------------------------------+
134 * POWER9 with LPCR[HR]=1
135 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
136 * +-----------+-------------+---------+-------------+-----+
137 * | a | 00/01/10 | x | x | 0 |
138 * | a | 11 | x | x | a |
139 * +-------------------------------------------------------+
141 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
142 * the hypervisor in AIL mode if the guest is radix. This is good for
143 * performance but allows the guest to influence the AIL of hypervisor
144 * interrupts using its MSR, and also the hypervisor must disallow guest
145 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
146 * use AIL for its MSR[HV] 0->1 interrupts.
148 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
149 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
152 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
154 * POWER10 behaviour is
155 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
156 * +-----------+------------+-------------+---------+-------------+-----+
157 * | a | h | 00/01/10 | 0 | 0 | 0 |
158 * | a | h | 11 | 0 | 0 | a |
159 * | a | h | x | 0 | 1 | h |
160 * | a | h | 00/01/10 | 1 | 1 | 0 |
161 * | a | h | 11 | 1 | 1 | h |
162 * +--------------------------------------------------------------------+
164 static inline void ppc_excp_apply_ail(PowerPCCPU
*cpu
, int excp_model
, int excp
,
166 target_ulong
*new_msr
,
167 target_ulong
*vector
)
169 #if defined(TARGET_PPC64)
170 CPUPPCState
*env
= &cpu
->env
;
171 bool mmu_all_on
= ((msr
>> MSR_IR
) & 1) && ((msr
>> MSR_DR
) & 1);
172 bool hv_escalation
= !(msr
& MSR_HVB
) && (*new_msr
& MSR_HVB
);
175 if (excp
== POWERPC_EXCP_MCHECK
||
176 excp
== POWERPC_EXCP_RESET
||
177 excp
== POWERPC_EXCP_HV_MAINT
) {
178 /* SRESET, MCE, HMI never apply AIL */
182 if (excp_model
== POWERPC_EXCP_POWER8
||
183 excp_model
== POWERPC_EXCP_POWER9
) {
185 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
188 if (hv_escalation
&& !(env
->spr
[SPR_LPCR
] & LPCR_HR
)) {
190 * AIL does not work if there is a MSR[HV] 0->1 transition and the
191 * partition is in HPT mode. For radix guests, such interrupts are
192 * allowed to be delivered to the hypervisor in ail mode.
197 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
202 /* AIL=1 is reserved, treat it like AIL=0 */
206 } else if (excp_model
== POWERPC_EXCP_POWER10
) {
207 if (!mmu_all_on
&& !hv_escalation
) {
209 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
210 * Guest->guest and HV->HV interrupts do require MMU on.
215 if (*new_msr
& MSR_HVB
) {
216 if (!(env
->spr
[SPR_LPCR
] & LPCR_HAIL
)) {
217 /* HV interrupts depend on LPCR[HAIL] */
220 ail
= 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
222 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
227 if (ail
== 1 || ail
== 2) {
228 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
232 /* Other processors do not support AIL */
237 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
240 *new_msr
|= (1 << MSR_IR
) | (1 << MSR_DR
);
242 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
244 *vector
|= 0x0000000000018000ull
;
245 } else if (ail
== 3) {
246 *vector
|= 0xc000000000004000ull
;
250 * scv AIL is a little different. AIL=2 does not change the address,
251 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
254 *vector
&= ~0x0000000000017000ull
; /* Un-apply the base offset */
255 *vector
|= 0xc000000000003000ull
; /* Apply scv's AIL=3 offset */
261 static inline void powerpc_set_excp_state(PowerPCCPU
*cpu
,
262 target_ulong vector
, target_ulong msr
)
264 CPUState
*cs
= CPU(cpu
);
265 CPUPPCState
*env
= &cpu
->env
;
268 * We don't use hreg_store_msr here as already have treated any
269 * special case that could occur. Just store MSR and update hflags
271 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
272 * will prevent setting of the HV bit which some exceptions might need
275 env
->msr
= msr
& env
->msr_mask
;
276 hreg_compute_hflags(env
);
278 /* Reset exception state */
279 cs
->exception_index
= POWERPC_EXCP_NONE
;
282 /* Reset the reservation */
283 env
->reserve_addr
= -1;
286 * Any interrupt is context synchronizing, check if TCG TLB needs
287 * a delayed flush on ppc64
289 check_tlb_flush(env
, false);
293 * Note that this function should be greatly optimized when called
294 * with a constant excp, from ppc_hw_interrupt
296 static inline void powerpc_excp(PowerPCCPU
*cpu
, int excp_model
, int excp
)
298 CPUState
*cs
= CPU(cpu
);
299 CPUPPCState
*env
= &cpu
->env
;
300 target_ulong msr
, new_msr
, vector
;
301 int srr0
, srr1
, asrr0
, asrr1
, lev
= -1;
303 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
304 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
306 /* new srr1 value excluding must-be-zero bits */
307 if (excp_model
== POWERPC_EXCP_BOOKE
) {
310 msr
= env
->msr
& ~0x783f0000ULL
;
314 * new interrupt handler msr preserves existing HV and ME unless
315 * explicitly overriden
317 new_msr
= env
->msr
& (((target_ulong
)1 << MSR_ME
) | MSR_HVB
);
319 /* target registers */
326 * check for special resume at 0x100 from doze/nap/sleep/winkle on
329 if (env
->resume_as_sreset
) {
330 excp
= powerpc_reset_wakeup(cs
, env
, excp
, &msr
);
334 * Hypervisor emulation assistance interrupt only exists on server
335 * arch 2.05 server or later. We also don't want to generate it if
336 * we don't have HVB in msr_mask (PAPR mode).
338 if (excp
== POWERPC_EXCP_HV_EMU
339 #if defined(TARGET_PPC64)
340 && !(mmu_is_64bit(env
->mmu_model
) && (env
->msr_mask
& MSR_HVB
))
341 #endif /* defined(TARGET_PPC64) */
344 excp
= POWERPC_EXCP_PROGRAM
;
349 * SPEU and VPU share the same IVOR but they exist in different
350 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
352 if (excp_model
== POWERPC_EXCP_BOOKE
&& excp
== POWERPC_EXCP_VPU
) {
353 excp
= POWERPC_EXCP_SPEU
;
358 case POWERPC_EXCP_NONE
:
359 /* Should never happen */
361 case POWERPC_EXCP_CRITICAL
: /* Critical input */
362 switch (excp_model
) {
363 case POWERPC_EXCP_40x
:
367 case POWERPC_EXCP_BOOKE
:
368 srr0
= SPR_BOOKE_CSRR0
;
369 srr1
= SPR_BOOKE_CSRR1
;
371 case POWERPC_EXCP_G2
:
377 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
380 * Machine check exception is not enabled. Enter
383 fprintf(stderr
, "Machine check while not allowed. "
384 "Entering checkstop state\n");
385 if (qemu_log_separate()) {
386 qemu_log("Machine check while not allowed. "
387 "Entering checkstop state\n");
390 cpu_interrupt_exittb(cs
);
392 if (env
->msr_mask
& MSR_HVB
) {
394 * ISA specifies HV, but can be delivered to guest with HV
395 * clear (e.g., see FWNMI in PAPR).
397 new_msr
|= (target_ulong
)MSR_HVB
;
400 /* machine check exceptions don't have ME set */
401 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
403 /* XXX: should also have something loaded in DAR / DSISR */
404 switch (excp_model
) {
405 case POWERPC_EXCP_40x
:
409 case POWERPC_EXCP_BOOKE
:
410 /* FIXME: choose one or the other based on CPU type */
411 srr0
= SPR_BOOKE_MCSRR0
;
412 srr1
= SPR_BOOKE_MCSRR1
;
413 asrr0
= SPR_BOOKE_CSRR0
;
414 asrr1
= SPR_BOOKE_CSRR1
;
420 case POWERPC_EXCP_DSI
: /* Data storage exception */
421 trace_ppc_excp_dsi(env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
423 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
424 trace_ppc_excp_isi(msr
, env
->nip
);
425 msr
|= env
->error_code
;
427 case POWERPC_EXCP_EXTERNAL
: /* External input */
434 * Exception targeting modifiers
436 * LPES0 is supported on POWER7/8/9
437 * LPES1 is not supported (old iSeries mode)
439 * On anything else, we behave as if LPES0 is 1
440 * (externals don't alter MSR:HV)
442 #if defined(TARGET_PPC64)
443 if (excp_model
== POWERPC_EXCP_POWER7
||
444 excp_model
== POWERPC_EXCP_POWER8
||
445 excp_model
== POWERPC_EXCP_POWER9
||
446 excp_model
== POWERPC_EXCP_POWER10
) {
447 lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
449 #endif /* defined(TARGET_PPC64) */
455 new_msr
|= (target_ulong
)MSR_HVB
;
456 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
460 if (env
->mpic_proxy
) {
461 /* IACK the IRQ on delivery */
462 env
->spr
[SPR_BOOKE_EPR
] = ldl_phys(cs
->as
, env
->mpic_iack
);
466 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
467 /* Get rS/rD and rA from faulting opcode */
469 * Note: the opcode fields will not be set properly for a
470 * direct store load/store, but nobody cares as nobody
471 * actually uses direct store segments.
473 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
475 case POWERPC_EXCP_PROGRAM
: /* Program exception */
476 switch (env
->error_code
& ~0xF) {
477 case POWERPC_EXCP_FP
:
478 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
479 trace_ppc_excp_fp_ignore();
480 cs
->exception_index
= POWERPC_EXCP_NONE
;
486 * FP exceptions always have NIP pointing to the faulting
487 * instruction, so always use store_next and claim we are
488 * precise in the MSR.
491 env
->spr
[SPR_BOOKE_ESR
] = ESR_FP
;
493 case POWERPC_EXCP_INVAL
:
494 trace_ppc_excp_inval(env
->nip
);
496 env
->spr
[SPR_BOOKE_ESR
] = ESR_PIL
;
498 case POWERPC_EXCP_PRIV
:
500 env
->spr
[SPR_BOOKE_ESR
] = ESR_PPR
;
502 case POWERPC_EXCP_TRAP
:
504 env
->spr
[SPR_BOOKE_ESR
] = ESR_PTR
;
507 /* Should never occur */
508 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
513 case POWERPC_EXCP_SYSCALL
: /* System call exception */
514 lev
= env
->error_code
;
516 if ((lev
== 1) && cpu
->vhyp
) {
523 * We need to correct the NIP which in this case is supposed
524 * to point to the next instruction
528 /* "PAPR mode" built-in hypercall emulation */
529 if ((lev
== 1) && cpu
->vhyp
) {
530 PPCVirtualHypervisorClass
*vhc
=
531 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
532 vhc
->hypercall(cpu
->vhyp
, cpu
);
536 new_msr
|= (target_ulong
)MSR_HVB
;
539 case POWERPC_EXCP_SYSCALL_VECTORED
: /* scv exception */
540 lev
= env
->error_code
;
543 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_EE
);
544 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
546 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
547 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
548 case POWERPC_EXCP_DECR
: /* Decrementer exception */
550 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
552 trace_ppc_excp_print("FIT");
554 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
555 trace_ppc_excp_print("WDT");
556 switch (excp_model
) {
557 case POWERPC_EXCP_BOOKE
:
558 srr0
= SPR_BOOKE_CSRR0
;
559 srr1
= SPR_BOOKE_CSRR1
;
565 case POWERPC_EXCP_DTLB
: /* Data TLB error */
566 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
568 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
569 if (env
->flags
& POWERPC_FLAG_DE
) {
570 /* FIXME: choose one or the other based on CPU type */
571 srr0
= SPR_BOOKE_DSRR0
;
572 srr1
= SPR_BOOKE_DSRR1
;
573 asrr0
= SPR_BOOKE_CSRR0
;
574 asrr1
= SPR_BOOKE_CSRR1
;
575 /* DBSR already modified by caller */
577 cpu_abort(cs
, "Debug exception triggered on unsupported model\n");
580 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable/VPU */
581 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
583 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
585 cpu_abort(cs
, "Embedded floating point data exception "
586 "is not implemented yet !\n");
587 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
589 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
591 cpu_abort(cs
, "Embedded floating point round exception "
592 "is not implemented yet !\n");
593 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
595 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
598 "Performance counter exception is not implemented yet !\n");
600 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
602 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
603 srr0
= SPR_BOOKE_CSRR0
;
604 srr1
= SPR_BOOKE_CSRR1
;
606 case POWERPC_EXCP_RESET
: /* System reset exception */
607 /* A power-saving exception sets ME, otherwise it is unchanged */
609 /* indicate that we resumed from power save mode */
611 new_msr
|= ((target_ulong
)1 << MSR_ME
);
613 if (env
->msr_mask
& MSR_HVB
) {
615 * ISA specifies HV, but can be delivered to guest with HV
616 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
618 new_msr
|= (target_ulong
)MSR_HVB
;
621 cpu_abort(cs
, "Trying to deliver power-saving system reset "
622 "exception %d with no HV support\n", excp
);
626 case POWERPC_EXCP_DSEG
: /* Data segment exception */
627 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
628 case POWERPC_EXCP_TRACE
: /* Trace exception */
630 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
631 msr
|= env
->error_code
;
633 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
634 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
635 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
636 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
637 case POWERPC_EXCP_SDOOR_HV
: /* Hypervisor Doorbell interrupt */
638 case POWERPC_EXCP_HV_EMU
:
639 case POWERPC_EXCP_HVIRT
: /* Hypervisor virtualization */
642 new_msr
|= (target_ulong
)MSR_HVB
;
643 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
645 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
646 case POWERPC_EXCP_VSXU
: /* VSX unavailable exception */
647 case POWERPC_EXCP_FU
: /* Facility unavailable exception */
649 env
->spr
[SPR_FSCR
] |= ((target_ulong
)env
->error_code
<< 56);
652 case POWERPC_EXCP_HV_FU
: /* Hypervisor Facility Unavailable Exception */
654 env
->spr
[SPR_HFSCR
] |= ((target_ulong
)env
->error_code
<< FSCR_IC_POS
);
657 new_msr
|= (target_ulong
)MSR_HVB
;
658 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
661 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
662 trace_ppc_excp_print("PIT");
664 case POWERPC_EXCP_IO
: /* IO error exception */
666 cpu_abort(cs
, "601 IO error exception is not implemented yet !\n");
668 case POWERPC_EXCP_RUNM
: /* Run mode exception */
670 cpu_abort(cs
, "601 run mode exception is not implemented yet !\n");
672 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
674 cpu_abort(cs
, "602 emulation trap exception "
675 "is not implemented yet !\n");
677 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
678 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
679 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
680 switch (excp_model
) {
681 case POWERPC_EXCP_602
:
682 case POWERPC_EXCP_603
:
683 case POWERPC_EXCP_G2
:
684 /* Swap temporary saved registers with GPRs */
685 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
686 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
687 hreg_swap_gpr_tgpr(env
);
690 case POWERPC_EXCP_7x5
:
691 #if defined(DEBUG_SOFTWARE_TLB)
692 if (qemu_log_enabled()) {
694 target_ulong
*miss
, *cmp
;
697 if (excp
== POWERPC_EXCP_IFTLB
) {
700 miss
= &env
->spr
[SPR_IMISS
];
701 cmp
= &env
->spr
[SPR_ICMP
];
703 if (excp
== POWERPC_EXCP_DLTLB
) {
709 miss
= &env
->spr
[SPR_DMISS
];
710 cmp
= &env
->spr
[SPR_DCMP
];
712 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
713 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
714 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
715 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
719 msr
|= env
->crf
[0] << 28;
720 msr
|= env
->error_code
; /* key, D/I, S/L bits */
721 /* Set way using a LRU mechanism */
722 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
725 cpu_abort(cs
, "Invalid TLB miss exception\n");
729 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
731 cpu_abort(cs
, "Floating point assist exception "
732 "is not implemented yet !\n");
734 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
736 cpu_abort(cs
, "DABR exception is not implemented yet !\n");
738 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
740 cpu_abort(cs
, "IABR exception is not implemented yet !\n");
742 case POWERPC_EXCP_SMI
: /* System management interrupt */
744 cpu_abort(cs
, "SMI exception is not implemented yet !\n");
746 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
748 cpu_abort(cs
, "Thermal management exception "
749 "is not implemented yet !\n");
751 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
754 "Performance counter exception is not implemented yet !\n");
756 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
758 cpu_abort(cs
, "VPU assist exception is not implemented yet !\n");
760 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
763 "970 soft-patch exception is not implemented yet !\n");
765 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
768 "970 maintenance exception is not implemented yet !\n");
770 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
772 cpu_abort(cs
, "Maskable external exception "
773 "is not implemented yet !\n");
775 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
777 cpu_abort(cs
, "Non maskable external exception "
778 "is not implemented yet !\n");
782 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
787 if (!(env
->msr_mask
& MSR_HVB
)) {
788 if (new_msr
& MSR_HVB
) {
789 cpu_abort(cs
, "Trying to deliver HV exception (MSR) %d with "
790 "no HV support\n", excp
);
792 if (srr0
== SPR_HSRR0
) {
793 cpu_abort(cs
, "Trying to deliver HV exception (HSRR) %d with "
794 "no HV support\n", excp
);
799 * Sort out endianness of interrupt, this differs depending on the
800 * CPU, the HV mode, etc...
803 if (excp_model
== POWERPC_EXCP_POWER7
) {
804 if (!(new_msr
& MSR_HVB
) && (env
->spr
[SPR_LPCR
] & LPCR_ILE
)) {
805 new_msr
|= (target_ulong
)1 << MSR_LE
;
807 } else if (excp_model
== POWERPC_EXCP_POWER8
) {
808 if (new_msr
& MSR_HVB
) {
809 if (env
->spr
[SPR_HID0
] & HID0_HILE
) {
810 new_msr
|= (target_ulong
)1 << MSR_LE
;
812 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
813 new_msr
|= (target_ulong
)1 << MSR_LE
;
815 } else if (excp_model
== POWERPC_EXCP_POWER9
||
816 excp_model
== POWERPC_EXCP_POWER10
) {
817 if (new_msr
& MSR_HVB
) {
818 if (env
->spr
[SPR_HID0
] & HID0_POWER9_HILE
) {
819 new_msr
|= (target_ulong
)1 << MSR_LE
;
821 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
822 new_msr
|= (target_ulong
)1 << MSR_LE
;
824 } else if (msr_ile
) {
825 new_msr
|= (target_ulong
)1 << MSR_LE
;
829 new_msr
|= (target_ulong
)1 << MSR_LE
;
833 vector
= env
->excp_vectors
[excp
];
834 if (vector
== (target_ulong
)-1ULL) {
835 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
839 vector
|= env
->excp_prefix
;
841 /* If any alternate SRR register are defined, duplicate saved values */
843 env
->spr
[asrr0
] = env
->nip
;
846 env
->spr
[asrr1
] = msr
;
849 #if defined(TARGET_PPC64)
850 if (excp_model
== POWERPC_EXCP_BOOKE
) {
851 if (env
->spr
[SPR_BOOKE_EPCR
] & EPCR_ICM
) {
852 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
853 new_msr
|= (target_ulong
)1 << MSR_CM
;
855 vector
= (uint32_t)vector
;
858 if (!msr_isf
&& !mmu_is_64bit(env
->mmu_model
)) {
859 vector
= (uint32_t)vector
;
861 new_msr
|= (target_ulong
)1 << MSR_SF
;
866 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
868 env
->spr
[srr0
] = env
->nip
;
871 env
->spr
[srr1
] = msr
;
873 #if defined(TARGET_PPC64)
875 vector
+= lev
* 0x20;
882 /* This can update new_msr and vector if AIL applies */
883 ppc_excp_apply_ail(cpu
, excp_model
, excp
, msr
, &new_msr
, &vector
);
885 powerpc_set_excp_state(cpu
, vector
, new_msr
);
888 void ppc_cpu_do_interrupt(CPUState
*cs
)
890 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
891 CPUPPCState
*env
= &cpu
->env
;
893 powerpc_excp(cpu
, env
->excp_model
, cs
->exception_index
);
896 static void ppc_hw_interrupt(CPUPPCState
*env
)
898 PowerPCCPU
*cpu
= env_archcpu(env
);
902 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
903 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
904 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
907 /* Machine check exception */
908 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
909 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
910 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
914 /* External debug exception */
915 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
916 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
917 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
923 * For interrupts that gate on MSR:EE, we need to do something a
924 * bit more subtle, as we need to let them through even when EE is
925 * clear when coming out of some power management states (in order
926 * for them to become a 0x100).
928 async_deliver
= (msr_ee
!= 0) || env
->resume_as_sreset
;
930 /* Hypervisor decrementer exception */
931 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
932 /* LPCR will be clear when not supported so this will work */
933 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
934 if ((async_deliver
|| msr_hv
== 0) && hdice
) {
935 /* HDEC clears on delivery */
936 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
937 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HDECR
);
942 /* Hypervisor virtualization interrupt */
943 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HVIRT
)) {
944 /* LPCR will be clear when not supported so this will work */
945 bool hvice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HVICE
);
946 if ((async_deliver
|| msr_hv
== 0) && hvice
) {
947 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HVIRT
);
952 /* External interrupt can ignore MSR:EE under some circumstances */
953 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
954 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
955 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
956 /* HEIC blocks delivery to the hypervisor */
957 if ((async_deliver
&& !(heic
&& msr_hv
&& !msr_pr
)) ||
958 (env
->has_hv_mode
&& msr_hv
== 0 && !lpes0
)) {
959 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
964 /* External critical interrupt */
965 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
966 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
970 if (async_deliver
!= 0) {
971 /* Watchdog timer on embedded PowerPC */
972 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
973 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
974 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_WDT
);
977 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
978 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
979 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
982 /* Fixed interval timer on embedded PowerPC */
983 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
984 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
985 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_FIT
);
988 /* Programmable interval timer on embedded PowerPC */
989 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
990 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
991 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PIT
);
994 /* Decrementer exception */
995 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
996 if (ppc_decr_clear_on_delivery(env
)) {
997 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
999 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DECR
);
1002 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
1003 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1004 if (is_book3s_arch2x(env
)) {
1005 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR
);
1007 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORI
);
1011 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDOORBELL
)) {
1012 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1013 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR_HV
);
1016 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
1017 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
1018 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PERFM
);
1021 /* Thermal interrupt */
1022 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
1023 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
1024 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_THERM
);
1029 if (env
->resume_as_sreset
) {
1031 * This is a bug ! It means that has_work took us out of halt without
1032 * anything to deliver while in a PM state that requires getting
1035 * This means we will incorrectly execute past the power management
1036 * instruction instead of triggering a reset.
1038 * It generally means a discrepancy between the wakeup conditions in the
1039 * processor has_work implementation and the logic in this function.
1041 cpu_abort(env_cpu(env
),
1042 "Wakeup from PM state but interrupt Undelivered");
1046 void ppc_cpu_do_system_reset(CPUState
*cs
)
1048 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1049 CPUPPCState
*env
= &cpu
->env
;
1051 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
1054 void ppc_cpu_do_fwnmi_machine_check(CPUState
*cs
, target_ulong vector
)
1056 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1057 CPUPPCState
*env
= &cpu
->env
;
1058 target_ulong msr
= 0;
1061 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1064 msr
= (1ULL << MSR_ME
);
1065 msr
|= env
->msr
& (1ULL << MSR_SF
);
1066 if (ppc_interrupts_little_endian(cpu
)) {
1067 msr
|= (1ULL << MSR_LE
);
1070 powerpc_set_excp_state(cpu
, vector
, msr
);
1073 bool ppc_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1075 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1076 CPUPPCState
*env
= &cpu
->env
;
1078 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1079 ppc_hw_interrupt(env
);
1080 if (env
->pending_interrupts
== 0) {
1081 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1088 #endif /* !CONFIG_USER_ONLY */
1090 /*****************************************************************************/
1091 /* Exceptions processing helpers */
1093 void raise_exception_err_ra(CPUPPCState
*env
, uint32_t exception
,
1094 uint32_t error_code
, uintptr_t raddr
)
1096 CPUState
*cs
= env_cpu(env
);
1098 cs
->exception_index
= exception
;
1099 env
->error_code
= error_code
;
1100 cpu_loop_exit_restore(cs
, raddr
);
1103 void raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1104 uint32_t error_code
)
1106 raise_exception_err_ra(env
, exception
, error_code
, 0);
1109 void raise_exception(CPUPPCState
*env
, uint32_t exception
)
1111 raise_exception_err_ra(env
, exception
, 0, 0);
1114 void raise_exception_ra(CPUPPCState
*env
, uint32_t exception
,
1117 raise_exception_err_ra(env
, exception
, 0, raddr
);
1121 void helper_raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1122 uint32_t error_code
)
1124 raise_exception_err_ra(env
, exception
, error_code
, 0);
1127 void helper_raise_exception(CPUPPCState
*env
, uint32_t exception
)
1129 raise_exception_err_ra(env
, exception
, 0, 0);
1133 #if !defined(CONFIG_USER_ONLY)
1135 void helper_store_msr(CPUPPCState
*env
, target_ulong val
)
1137 uint32_t excp
= hreg_store_msr(env
, val
, 0);
1140 CPUState
*cs
= env_cpu(env
);
1141 cpu_interrupt_exittb(cs
);
1142 raise_exception(env
, excp
);
1146 #if defined(TARGET_PPC64)
1147 void helper_scv(CPUPPCState
*env
, uint32_t lev
)
1149 if (env
->spr
[SPR_FSCR
] & (1ull << FSCR_SCV
)) {
1150 raise_exception_err(env
, POWERPC_EXCP_SYSCALL_VECTORED
, lev
);
1152 raise_exception_err(env
, POWERPC_EXCP_FU
, FSCR_IC_SCV
);
1156 void helper_pminsn(CPUPPCState
*env
, powerpc_pm_insn_t insn
)
1163 /* Condition for waking up at 0x100 */
1164 env
->resume_as_sreset
= (insn
!= PPC_PM_STOP
) ||
1165 (env
->spr
[SPR_PSSCR
] & PSSCR_EC
);
1167 #endif /* defined(TARGET_PPC64) */
1168 #endif /* CONFIG_TCG */
1170 static inline void do_rfi(CPUPPCState
*env
, target_ulong nip
, target_ulong msr
)
1172 CPUState
*cs
= env_cpu(env
);
1174 /* MSR:POW cannot be set by any form of rfi */
1175 msr
&= ~(1ULL << MSR_POW
);
1177 #if defined(TARGET_PPC64)
1178 /* Switching to 32-bit ? Crop the nip */
1179 if (!msr_is_64bit(env
, msr
)) {
1180 nip
= (uint32_t)nip
;
1183 nip
= (uint32_t)nip
;
1185 /* XXX: beware: this is false if VLE is supported */
1186 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1187 hreg_store_msr(env
, msr
, 1);
1188 trace_ppc_excp_rfi(env
->nip
, env
->msr
);
1190 * No need to raise an exception here, as rfi is always the last
1193 cpu_interrupt_exittb(cs
);
1194 /* Reset the reservation */
1195 env
->reserve_addr
= -1;
1197 /* Context synchronizing: check if TCG TLB needs flush */
1198 check_tlb_flush(env
, false);
1202 void helper_rfi(CPUPPCState
*env
)
1204 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
] & 0xfffffffful
);
1207 #define MSR_BOOK3S_MASK
1208 #if defined(TARGET_PPC64)
1209 void helper_rfid(CPUPPCState
*env
)
1212 * The architecture defines a number of rules for which bits can
1213 * change but in practice, we handle this in hreg_store_msr()
1214 * which will be called by do_rfi(), so there is no need to filter
1217 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
]);
1220 void helper_rfscv(CPUPPCState
*env
)
1222 do_rfi(env
, env
->lr
, env
->ctr
);
1225 void helper_hrfid(CPUPPCState
*env
)
1227 do_rfi(env
, env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
]);
1231 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1232 void helper_rfebb(CPUPPCState
*env
, target_ulong s
)
1234 target_ulong msr
= env
->msr
;
1237 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
1239 * "If BESCR 32:33 != 0b00 the instruction is treated as if
1240 * the instruction form were invalid."
1242 if (env
->spr
[SPR_BESCR
] & BESCR_INVALID
) {
1243 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
1244 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1247 env
->nip
= env
->spr
[SPR_EBBRR
];
1249 /* Switching to 32-bit ? Crop the nip */
1250 if (!msr_is_64bit(env
, msr
)) {
1251 env
->nip
= (uint32_t)env
->spr
[SPR_EBBRR
];
1255 env
->spr
[SPR_BESCR
] |= BESCR_GE
;
1257 env
->spr
[SPR_BESCR
] &= ~BESCR_GE
;
1262 /*****************************************************************************/
1263 /* Embedded PowerPC specific helpers */
1264 void helper_40x_rfci(CPUPPCState
*env
)
1266 do_rfi(env
, env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
]);
1269 void helper_rfci(CPUPPCState
*env
)
1271 do_rfi(env
, env
->spr
[SPR_BOOKE_CSRR0
], env
->spr
[SPR_BOOKE_CSRR1
]);
1274 void helper_rfdi(CPUPPCState
*env
)
1276 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1277 do_rfi(env
, env
->spr
[SPR_BOOKE_DSRR0
], env
->spr
[SPR_BOOKE_DSRR1
]);
1280 void helper_rfmci(CPUPPCState
*env
)
1282 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1283 do_rfi(env
, env
->spr
[SPR_BOOKE_MCSRR0
], env
->spr
[SPR_BOOKE_MCSRR1
]);
1285 #endif /* CONFIG_TCG */
1286 #endif /* !defined(CONFIG_USER_ONLY) */
1289 void helper_tw(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1292 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1293 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1294 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1295 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1296 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1297 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1298 POWERPC_EXCP_TRAP
, GETPC());
1302 #if defined(TARGET_PPC64)
1303 void helper_td(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1306 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1307 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1308 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1309 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1310 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01))))) {
1311 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1312 POWERPC_EXCP_TRAP
, GETPC());
1318 #if !defined(CONFIG_USER_ONLY)
1319 /*****************************************************************************/
1320 /* PowerPC 601 specific instructions (POWER bridge) */
1323 void helper_rfsvc(CPUPPCState
*env
)
1325 do_rfi(env
, env
->lr
, env
->ctr
& 0x0000FFFF);
1328 /* Embedded.Processor Control */
1329 static int dbell2irq(target_ulong rb
)
1331 int msg
= rb
& DBELL_TYPE_MASK
;
1335 case DBELL_TYPE_DBELL
:
1336 irq
= PPC_INTERRUPT_DOORBELL
;
1338 case DBELL_TYPE_DBELL_CRIT
:
1339 irq
= PPC_INTERRUPT_CDOORBELL
;
1341 case DBELL_TYPE_G_DBELL
:
1342 case DBELL_TYPE_G_DBELL_CRIT
:
1343 case DBELL_TYPE_G_DBELL_MC
:
1352 void helper_msgclr(CPUPPCState
*env
, target_ulong rb
)
1354 int irq
= dbell2irq(rb
);
1360 env
->pending_interrupts
&= ~(1 << irq
);
1363 void helper_msgsnd(target_ulong rb
)
1365 int irq
= dbell2irq(rb
);
1366 int pir
= rb
& DBELL_PIRTAG_MASK
;
1373 qemu_mutex_lock_iothread();
1375 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1376 CPUPPCState
*cenv
= &cpu
->env
;
1378 if ((rb
& DBELL_BRDCAST
) || (cenv
->spr
[SPR_BOOKE_PIR
] == pir
)) {
1379 cenv
->pending_interrupts
|= 1 << irq
;
1380 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1383 qemu_mutex_unlock_iothread();
1386 /* Server Processor Control */
1388 static bool dbell_type_server(target_ulong rb
)
1391 * A Directed Hypervisor Doorbell message is sent only if the
1392 * message type is 5. All other types are reserved and the
1393 * instruction is a no-op
1395 return (rb
& DBELL_TYPE_MASK
) == DBELL_TYPE_DBELL_SERVER
;
1398 void helper_book3s_msgclr(CPUPPCState
*env
, target_ulong rb
)
1400 if (!dbell_type_server(rb
)) {
1404 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1407 static void book3s_msgsnd_common(int pir
, int irq
)
1411 qemu_mutex_lock_iothread();
1413 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1414 CPUPPCState
*cenv
= &cpu
->env
;
1416 /* TODO: broadcast message to all threads of the same processor */
1417 if (cenv
->spr_cb
[SPR_PIR
].default_value
== pir
) {
1418 cenv
->pending_interrupts
|= 1 << irq
;
1419 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1422 qemu_mutex_unlock_iothread();
1425 void helper_book3s_msgsnd(target_ulong rb
)
1427 int pir
= rb
& DBELL_PROCIDTAG_MASK
;
1429 if (!dbell_type_server(rb
)) {
1433 book3s_msgsnd_common(pir
, PPC_INTERRUPT_HDOORBELL
);
1436 #if defined(TARGET_PPC64)
1437 void helper_book3s_msgclrp(CPUPPCState
*env
, target_ulong rb
)
1439 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgclrp", HFSCR_IC_MSGP
);
1441 if (!dbell_type_server(rb
)) {
1445 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1449 * sends a message to other threads that are on the same
1450 * multi-threaded processor
1452 void helper_book3s_msgsndp(CPUPPCState
*env
, target_ulong rb
)
1454 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
1456 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgsndp", HFSCR_IC_MSGP
);
1458 if (!dbell_type_server(rb
)) {
1462 /* TODO: TCG supports only one thread */
1464 book3s_msgsnd_common(pir
, PPC_INTERRUPT_DOORBELL
);
1466 #endif /* TARGET_PPC64 */
1468 void ppc_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
1469 MMUAccessType access_type
,
1470 int mmu_idx
, uintptr_t retaddr
)
1472 CPUPPCState
*env
= cs
->env_ptr
;
1475 /* Restore state and reload the insn we executed, for filling in DSISR. */
1476 cpu_restore_state(cs
, retaddr
, true);
1477 insn
= cpu_ldl_code(env
, env
->nip
);
1479 switch (env
->mmu_model
) {
1480 case POWERPC_MMU_SOFT_4xx
:
1481 env
->spr
[SPR_40x_DEAR
] = vaddr
;
1483 case POWERPC_MMU_BOOKE
:
1484 case POWERPC_MMU_BOOKE206
:
1485 env
->spr
[SPR_BOOKE_DEAR
] = vaddr
;
1488 env
->spr
[SPR_DAR
] = vaddr
;
1492 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1493 env
->error_code
= insn
& 0x03FF0000;
1496 #endif /* CONFIG_TCG */
1497 #endif /* !CONFIG_USER_ONLY */