]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/excp_helper.c
PPC64/TCG: Implement 'rfebb' instruction
[mirror_qemu.git] / target / ppc / excp_helper.c
1 /*
2 * PowerPC exception emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "helper_regs.h"
25
26 #include "trace.h"
27
28 #ifdef CONFIG_TCG
29 #include "exec/helper-proto.h"
30 #include "exec/cpu_ldst.h"
31 #endif
32
33 /* #define DEBUG_SOFTWARE_TLB */
34
35 /*****************************************************************************/
36 /* Exception processing */
37 #if !defined(CONFIG_USER_ONLY)
38
39 static inline void dump_syscall(CPUPPCState *env)
40 {
41 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
42 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
43 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
44 " nip=" TARGET_FMT_lx "\n",
45 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
46 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
47 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
48 ppc_dump_gpr(env, 8), env->nip);
49 }
50
51 static inline void dump_hcall(CPUPPCState *env)
52 {
53 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
54 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
55 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
56 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
57 " nip=" TARGET_FMT_lx "\n",
58 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
59 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
60 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
61 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
62 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
63 env->nip);
64 }
65
66 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
67 target_ulong *msr)
68 {
69 /* We no longer are in a PM state */
70 env->resume_as_sreset = false;
71
72 /* Pretend to be returning from doze always as we don't lose state */
73 *msr |= SRR1_WS_NOLOSS;
74
75 /* Machine checks are sent normally */
76 if (excp == POWERPC_EXCP_MCHECK) {
77 return excp;
78 }
79 switch (excp) {
80 case POWERPC_EXCP_RESET:
81 *msr |= SRR1_WAKERESET;
82 break;
83 case POWERPC_EXCP_EXTERNAL:
84 *msr |= SRR1_WAKEEE;
85 break;
86 case POWERPC_EXCP_DECR:
87 *msr |= SRR1_WAKEDEC;
88 break;
89 case POWERPC_EXCP_SDOOR:
90 *msr |= SRR1_WAKEDBELL;
91 break;
92 case POWERPC_EXCP_SDOOR_HV:
93 *msr |= SRR1_WAKEHDBELL;
94 break;
95 case POWERPC_EXCP_HV_MAINT:
96 *msr |= SRR1_WAKEHMI;
97 break;
98 case POWERPC_EXCP_HVIRT:
99 *msr |= SRR1_WAKEHVI;
100 break;
101 default:
102 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
103 excp);
104 }
105 return POWERPC_EXCP_RESET;
106 }
107
108 /*
109 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
110 * taken with the MMU on, and which uses an alternate location (e.g., so the
111 * kernel/hv can map the vectors there with an effective address).
112 *
113 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
114 * are delivered in this way. AIL requires the LPCR to be set to enable this
115 * mode, and then a number of conditions have to be true for AIL to apply.
116 *
117 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
118 * they specifically want to be in real mode (e.g., the MCE might be signaling
119 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
120 *
121 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
122 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
123 * radix mode (LPCR[HR]).
124 *
125 * POWER8, POWER9 with LPCR[HR]=0
126 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
127 * +-----------+-------------+---------+-------------+-----+
128 * | a | 00/01/10 | x | x | 0 |
129 * | a | 11 | 0 | 1 | 0 |
130 * | a | 11 | 1 | 1 | a |
131 * | a | 11 | 0 | 0 | a |
132 * +-------------------------------------------------------+
133 *
134 * POWER9 with LPCR[HR]=1
135 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
136 * +-----------+-------------+---------+-------------+-----+
137 * | a | 00/01/10 | x | x | 0 |
138 * | a | 11 | x | x | a |
139 * +-------------------------------------------------------+
140 *
141 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
142 * the hypervisor in AIL mode if the guest is radix. This is good for
143 * performance but allows the guest to influence the AIL of hypervisor
144 * interrupts using its MSR, and also the hypervisor must disallow guest
145 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
146 * use AIL for its MSR[HV] 0->1 interrupts.
147 *
148 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
149 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
150 * MSR[HV] 1->1).
151 *
152 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
153 *
154 * POWER10 behaviour is
155 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
156 * +-----------+------------+-------------+---------+-------------+-----+
157 * | a | h | 00/01/10 | 0 | 0 | 0 |
158 * | a | h | 11 | 0 | 0 | a |
159 * | a | h | x | 0 | 1 | h |
160 * | a | h | 00/01/10 | 1 | 1 | 0 |
161 * | a | h | 11 | 1 | 1 | h |
162 * +--------------------------------------------------------------------+
163 */
164 static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
165 target_ulong msr,
166 target_ulong *new_msr,
167 target_ulong *vector)
168 {
169 #if defined(TARGET_PPC64)
170 CPUPPCState *env = &cpu->env;
171 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
172 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
173 int ail = 0;
174
175 if (excp == POWERPC_EXCP_MCHECK ||
176 excp == POWERPC_EXCP_RESET ||
177 excp == POWERPC_EXCP_HV_MAINT) {
178 /* SRESET, MCE, HMI never apply AIL */
179 return;
180 }
181
182 if (excp_model == POWERPC_EXCP_POWER8 ||
183 excp_model == POWERPC_EXCP_POWER9) {
184 if (!mmu_all_on) {
185 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
186 return;
187 }
188 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
189 /*
190 * AIL does not work if there is a MSR[HV] 0->1 transition and the
191 * partition is in HPT mode. For radix guests, such interrupts are
192 * allowed to be delivered to the hypervisor in ail mode.
193 */
194 return;
195 }
196
197 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
198 if (ail == 0) {
199 return;
200 }
201 if (ail == 1) {
202 /* AIL=1 is reserved, treat it like AIL=0 */
203 return;
204 }
205
206 } else if (excp_model == POWERPC_EXCP_POWER10) {
207 if (!mmu_all_on && !hv_escalation) {
208 /*
209 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
210 * Guest->guest and HV->HV interrupts do require MMU on.
211 */
212 return;
213 }
214
215 if (*new_msr & MSR_HVB) {
216 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
217 /* HV interrupts depend on LPCR[HAIL] */
218 return;
219 }
220 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
221 } else {
222 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
223 }
224 if (ail == 0) {
225 return;
226 }
227 if (ail == 1 || ail == 2) {
228 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
229 return;
230 }
231 } else {
232 /* Other processors do not support AIL */
233 return;
234 }
235
236 /*
237 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
238 * to the new IP.
239 */
240 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
241
242 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
243 if (ail == 2) {
244 *vector |= 0x0000000000018000ull;
245 } else if (ail == 3) {
246 *vector |= 0xc000000000004000ull;
247 }
248 } else {
249 /*
250 * scv AIL is a little different. AIL=2 does not change the address,
251 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
252 */
253 if (ail == 3) {
254 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
255 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
256 }
257 }
258 #endif
259 }
260
261 static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
262 target_ulong vector, target_ulong msr)
263 {
264 CPUState *cs = CPU(cpu);
265 CPUPPCState *env = &cpu->env;
266
267 /*
268 * We don't use hreg_store_msr here as already have treated any
269 * special case that could occur. Just store MSR and update hflags
270 *
271 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
272 * will prevent setting of the HV bit which some exceptions might need
273 * to do.
274 */
275 env->msr = msr & env->msr_mask;
276 hreg_compute_hflags(env);
277 env->nip = vector;
278 /* Reset exception state */
279 cs->exception_index = POWERPC_EXCP_NONE;
280 env->error_code = 0;
281
282 /* Reset the reservation */
283 env->reserve_addr = -1;
284
285 /*
286 * Any interrupt is context synchronizing, check if TCG TLB needs
287 * a delayed flush on ppc64
288 */
289 check_tlb_flush(env, false);
290 }
291
292 /*
293 * Note that this function should be greatly optimized when called
294 * with a constant excp, from ppc_hw_interrupt
295 */
296 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
297 {
298 CPUState *cs = CPU(cpu);
299 CPUPPCState *env = &cpu->env;
300 target_ulong msr, new_msr, vector;
301 int srr0, srr1, asrr0, asrr1, lev = -1;
302
303 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
304 " => %08x (%02x)\n", env->nip, excp, env->error_code);
305
306 /* new srr1 value excluding must-be-zero bits */
307 if (excp_model == POWERPC_EXCP_BOOKE) {
308 msr = env->msr;
309 } else {
310 msr = env->msr & ~0x783f0000ULL;
311 }
312
313 /*
314 * new interrupt handler msr preserves existing HV and ME unless
315 * explicitly overriden
316 */
317 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
318
319 /* target registers */
320 srr0 = SPR_SRR0;
321 srr1 = SPR_SRR1;
322 asrr0 = -1;
323 asrr1 = -1;
324
325 /*
326 * check for special resume at 0x100 from doze/nap/sleep/winkle on
327 * P7/P8/P9
328 */
329 if (env->resume_as_sreset) {
330 excp = powerpc_reset_wakeup(cs, env, excp, &msr);
331 }
332
333 /*
334 * Hypervisor emulation assistance interrupt only exists on server
335 * arch 2.05 server or later. We also don't want to generate it if
336 * we don't have HVB in msr_mask (PAPR mode).
337 */
338 if (excp == POWERPC_EXCP_HV_EMU
339 #if defined(TARGET_PPC64)
340 && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
341 #endif /* defined(TARGET_PPC64) */
342
343 ) {
344 excp = POWERPC_EXCP_PROGRAM;
345 }
346
347 #ifdef TARGET_PPC64
348 /*
349 * SPEU and VPU share the same IVOR but they exist in different
350 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
351 */
352 if (excp_model == POWERPC_EXCP_BOOKE && excp == POWERPC_EXCP_VPU) {
353 excp = POWERPC_EXCP_SPEU;
354 }
355 #endif
356
357 switch (excp) {
358 case POWERPC_EXCP_NONE:
359 /* Should never happen */
360 return;
361 case POWERPC_EXCP_CRITICAL: /* Critical input */
362 switch (excp_model) {
363 case POWERPC_EXCP_40x:
364 srr0 = SPR_40x_SRR2;
365 srr1 = SPR_40x_SRR3;
366 break;
367 case POWERPC_EXCP_BOOKE:
368 srr0 = SPR_BOOKE_CSRR0;
369 srr1 = SPR_BOOKE_CSRR1;
370 break;
371 case POWERPC_EXCP_G2:
372 break;
373 default:
374 goto excp_invalid;
375 }
376 break;
377 case POWERPC_EXCP_MCHECK: /* Machine check exception */
378 if (msr_me == 0) {
379 /*
380 * Machine check exception is not enabled. Enter
381 * checkstop state.
382 */
383 fprintf(stderr, "Machine check while not allowed. "
384 "Entering checkstop state\n");
385 if (qemu_log_separate()) {
386 qemu_log("Machine check while not allowed. "
387 "Entering checkstop state\n");
388 }
389 cs->halted = 1;
390 cpu_interrupt_exittb(cs);
391 }
392 if (env->msr_mask & MSR_HVB) {
393 /*
394 * ISA specifies HV, but can be delivered to guest with HV
395 * clear (e.g., see FWNMI in PAPR).
396 */
397 new_msr |= (target_ulong)MSR_HVB;
398 }
399
400 /* machine check exceptions don't have ME set */
401 new_msr &= ~((target_ulong)1 << MSR_ME);
402
403 /* XXX: should also have something loaded in DAR / DSISR */
404 switch (excp_model) {
405 case POWERPC_EXCP_40x:
406 srr0 = SPR_40x_SRR2;
407 srr1 = SPR_40x_SRR3;
408 break;
409 case POWERPC_EXCP_BOOKE:
410 /* FIXME: choose one or the other based on CPU type */
411 srr0 = SPR_BOOKE_MCSRR0;
412 srr1 = SPR_BOOKE_MCSRR1;
413 asrr0 = SPR_BOOKE_CSRR0;
414 asrr1 = SPR_BOOKE_CSRR1;
415 break;
416 default:
417 break;
418 }
419 break;
420 case POWERPC_EXCP_DSI: /* Data storage exception */
421 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
422 break;
423 case POWERPC_EXCP_ISI: /* Instruction storage exception */
424 trace_ppc_excp_isi(msr, env->nip);
425 msr |= env->error_code;
426 break;
427 case POWERPC_EXCP_EXTERNAL: /* External input */
428 {
429 bool lpes0;
430
431 cs = CPU(cpu);
432
433 /*
434 * Exception targeting modifiers
435 *
436 * LPES0 is supported on POWER7/8/9
437 * LPES1 is not supported (old iSeries mode)
438 *
439 * On anything else, we behave as if LPES0 is 1
440 * (externals don't alter MSR:HV)
441 */
442 #if defined(TARGET_PPC64)
443 if (excp_model == POWERPC_EXCP_POWER7 ||
444 excp_model == POWERPC_EXCP_POWER8 ||
445 excp_model == POWERPC_EXCP_POWER9 ||
446 excp_model == POWERPC_EXCP_POWER10) {
447 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
448 } else
449 #endif /* defined(TARGET_PPC64) */
450 {
451 lpes0 = true;
452 }
453
454 if (!lpes0) {
455 new_msr |= (target_ulong)MSR_HVB;
456 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
457 srr0 = SPR_HSRR0;
458 srr1 = SPR_HSRR1;
459 }
460 if (env->mpic_proxy) {
461 /* IACK the IRQ on delivery */
462 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
463 }
464 break;
465 }
466 case POWERPC_EXCP_ALIGN: /* Alignment exception */
467 /* Get rS/rD and rA from faulting opcode */
468 /*
469 * Note: the opcode fields will not be set properly for a
470 * direct store load/store, but nobody cares as nobody
471 * actually uses direct store segments.
472 */
473 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
474 break;
475 case POWERPC_EXCP_PROGRAM: /* Program exception */
476 switch (env->error_code & ~0xF) {
477 case POWERPC_EXCP_FP:
478 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
479 trace_ppc_excp_fp_ignore();
480 cs->exception_index = POWERPC_EXCP_NONE;
481 env->error_code = 0;
482 return;
483 }
484
485 /*
486 * FP exceptions always have NIP pointing to the faulting
487 * instruction, so always use store_next and claim we are
488 * precise in the MSR.
489 */
490 msr |= 0x00100000;
491 env->spr[SPR_BOOKE_ESR] = ESR_FP;
492 break;
493 case POWERPC_EXCP_INVAL:
494 trace_ppc_excp_inval(env->nip);
495 msr |= 0x00080000;
496 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
497 break;
498 case POWERPC_EXCP_PRIV:
499 msr |= 0x00040000;
500 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
501 break;
502 case POWERPC_EXCP_TRAP:
503 msr |= 0x00020000;
504 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
505 break;
506 default:
507 /* Should never occur */
508 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
509 env->error_code);
510 break;
511 }
512 break;
513 case POWERPC_EXCP_SYSCALL: /* System call exception */
514 lev = env->error_code;
515
516 if ((lev == 1) && cpu->vhyp) {
517 dump_hcall(env);
518 } else {
519 dump_syscall(env);
520 }
521
522 /*
523 * We need to correct the NIP which in this case is supposed
524 * to point to the next instruction
525 */
526 env->nip += 4;
527
528 /* "PAPR mode" built-in hypercall emulation */
529 if ((lev == 1) && cpu->vhyp) {
530 PPCVirtualHypervisorClass *vhc =
531 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
532 vhc->hypercall(cpu->vhyp, cpu);
533 return;
534 }
535 if (lev == 1) {
536 new_msr |= (target_ulong)MSR_HVB;
537 }
538 break;
539 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
540 lev = env->error_code;
541 dump_syscall(env);
542 env->nip += 4;
543 new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
544 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
545 break;
546 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
547 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
548 case POWERPC_EXCP_DECR: /* Decrementer exception */
549 break;
550 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
551 /* FIT on 4xx */
552 trace_ppc_excp_print("FIT");
553 break;
554 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
555 trace_ppc_excp_print("WDT");
556 switch (excp_model) {
557 case POWERPC_EXCP_BOOKE:
558 srr0 = SPR_BOOKE_CSRR0;
559 srr1 = SPR_BOOKE_CSRR1;
560 break;
561 default:
562 break;
563 }
564 break;
565 case POWERPC_EXCP_DTLB: /* Data TLB error */
566 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
567 break;
568 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
569 if (env->flags & POWERPC_FLAG_DE) {
570 /* FIXME: choose one or the other based on CPU type */
571 srr0 = SPR_BOOKE_DSRR0;
572 srr1 = SPR_BOOKE_DSRR1;
573 asrr0 = SPR_BOOKE_CSRR0;
574 asrr1 = SPR_BOOKE_CSRR1;
575 /* DBSR already modified by caller */
576 } else {
577 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
578 }
579 break;
580 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
581 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
582 break;
583 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
584 /* XXX: TODO */
585 cpu_abort(cs, "Embedded floating point data exception "
586 "is not implemented yet !\n");
587 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
588 break;
589 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
590 /* XXX: TODO */
591 cpu_abort(cs, "Embedded floating point round exception "
592 "is not implemented yet !\n");
593 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
594 break;
595 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
596 /* XXX: TODO */
597 cpu_abort(cs,
598 "Performance counter exception is not implemented yet !\n");
599 break;
600 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
601 break;
602 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
603 srr0 = SPR_BOOKE_CSRR0;
604 srr1 = SPR_BOOKE_CSRR1;
605 break;
606 case POWERPC_EXCP_RESET: /* System reset exception */
607 /* A power-saving exception sets ME, otherwise it is unchanged */
608 if (msr_pow) {
609 /* indicate that we resumed from power save mode */
610 msr |= 0x10000;
611 new_msr |= ((target_ulong)1 << MSR_ME);
612 }
613 if (env->msr_mask & MSR_HVB) {
614 /*
615 * ISA specifies HV, but can be delivered to guest with HV
616 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
617 */
618 new_msr |= (target_ulong)MSR_HVB;
619 } else {
620 if (msr_pow) {
621 cpu_abort(cs, "Trying to deliver power-saving system reset "
622 "exception %d with no HV support\n", excp);
623 }
624 }
625 break;
626 case POWERPC_EXCP_DSEG: /* Data segment exception */
627 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
628 case POWERPC_EXCP_TRACE: /* Trace exception */
629 break;
630 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
631 msr |= env->error_code;
632 /* fall through */
633 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
634 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
635 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
636 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
637 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
638 case POWERPC_EXCP_HV_EMU:
639 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
640 srr0 = SPR_HSRR0;
641 srr1 = SPR_HSRR1;
642 new_msr |= (target_ulong)MSR_HVB;
643 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
644 break;
645 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
646 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
647 case POWERPC_EXCP_FU: /* Facility unavailable exception */
648 #ifdef TARGET_PPC64
649 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
650 #endif
651 break;
652 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
653 #ifdef TARGET_PPC64
654 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
655 srr0 = SPR_HSRR0;
656 srr1 = SPR_HSRR1;
657 new_msr |= (target_ulong)MSR_HVB;
658 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
659 #endif
660 break;
661 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
662 trace_ppc_excp_print("PIT");
663 break;
664 case POWERPC_EXCP_IO: /* IO error exception */
665 /* XXX: TODO */
666 cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
667 break;
668 case POWERPC_EXCP_RUNM: /* Run mode exception */
669 /* XXX: TODO */
670 cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
671 break;
672 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
673 /* XXX: TODO */
674 cpu_abort(cs, "602 emulation trap exception "
675 "is not implemented yet !\n");
676 break;
677 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
678 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
679 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
680 switch (excp_model) {
681 case POWERPC_EXCP_602:
682 case POWERPC_EXCP_603:
683 case POWERPC_EXCP_G2:
684 /* Swap temporary saved registers with GPRs */
685 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
686 new_msr |= (target_ulong)1 << MSR_TGPR;
687 hreg_swap_gpr_tgpr(env);
688 }
689 /* fall through */
690 case POWERPC_EXCP_7x5:
691 #if defined(DEBUG_SOFTWARE_TLB)
692 if (qemu_log_enabled()) {
693 const char *es;
694 target_ulong *miss, *cmp;
695 int en;
696
697 if (excp == POWERPC_EXCP_IFTLB) {
698 es = "I";
699 en = 'I';
700 miss = &env->spr[SPR_IMISS];
701 cmp = &env->spr[SPR_ICMP];
702 } else {
703 if (excp == POWERPC_EXCP_DLTLB) {
704 es = "DL";
705 } else {
706 es = "DS";
707 }
708 en = 'D';
709 miss = &env->spr[SPR_DMISS];
710 cmp = &env->spr[SPR_DCMP];
711 }
712 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
713 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
714 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
715 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
716 env->error_code);
717 }
718 #endif
719 msr |= env->crf[0] << 28;
720 msr |= env->error_code; /* key, D/I, S/L bits */
721 /* Set way using a LRU mechanism */
722 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
723 break;
724 default:
725 cpu_abort(cs, "Invalid TLB miss exception\n");
726 break;
727 }
728 break;
729 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
730 /* XXX: TODO */
731 cpu_abort(cs, "Floating point assist exception "
732 "is not implemented yet !\n");
733 break;
734 case POWERPC_EXCP_DABR: /* Data address breakpoint */
735 /* XXX: TODO */
736 cpu_abort(cs, "DABR exception is not implemented yet !\n");
737 break;
738 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
739 /* XXX: TODO */
740 cpu_abort(cs, "IABR exception is not implemented yet !\n");
741 break;
742 case POWERPC_EXCP_SMI: /* System management interrupt */
743 /* XXX: TODO */
744 cpu_abort(cs, "SMI exception is not implemented yet !\n");
745 break;
746 case POWERPC_EXCP_THERM: /* Thermal interrupt */
747 /* XXX: TODO */
748 cpu_abort(cs, "Thermal management exception "
749 "is not implemented yet !\n");
750 break;
751 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
752 /* XXX: TODO */
753 cpu_abort(cs,
754 "Performance counter exception is not implemented yet !\n");
755 break;
756 case POWERPC_EXCP_VPUA: /* Vector assist exception */
757 /* XXX: TODO */
758 cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
759 break;
760 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
761 /* XXX: TODO */
762 cpu_abort(cs,
763 "970 soft-patch exception is not implemented yet !\n");
764 break;
765 case POWERPC_EXCP_MAINT: /* Maintenance exception */
766 /* XXX: TODO */
767 cpu_abort(cs,
768 "970 maintenance exception is not implemented yet !\n");
769 break;
770 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
771 /* XXX: TODO */
772 cpu_abort(cs, "Maskable external exception "
773 "is not implemented yet !\n");
774 break;
775 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
776 /* XXX: TODO */
777 cpu_abort(cs, "Non maskable external exception "
778 "is not implemented yet !\n");
779 break;
780 default:
781 excp_invalid:
782 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
783 break;
784 }
785
786 /* Sanity check */
787 if (!(env->msr_mask & MSR_HVB)) {
788 if (new_msr & MSR_HVB) {
789 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
790 "no HV support\n", excp);
791 }
792 if (srr0 == SPR_HSRR0) {
793 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
794 "no HV support\n", excp);
795 }
796 }
797
798 /*
799 * Sort out endianness of interrupt, this differs depending on the
800 * CPU, the HV mode, etc...
801 */
802 #ifdef TARGET_PPC64
803 if (excp_model == POWERPC_EXCP_POWER7) {
804 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
805 new_msr |= (target_ulong)1 << MSR_LE;
806 }
807 } else if (excp_model == POWERPC_EXCP_POWER8) {
808 if (new_msr & MSR_HVB) {
809 if (env->spr[SPR_HID0] & HID0_HILE) {
810 new_msr |= (target_ulong)1 << MSR_LE;
811 }
812 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
813 new_msr |= (target_ulong)1 << MSR_LE;
814 }
815 } else if (excp_model == POWERPC_EXCP_POWER9 ||
816 excp_model == POWERPC_EXCP_POWER10) {
817 if (new_msr & MSR_HVB) {
818 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
819 new_msr |= (target_ulong)1 << MSR_LE;
820 }
821 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
822 new_msr |= (target_ulong)1 << MSR_LE;
823 }
824 } else if (msr_ile) {
825 new_msr |= (target_ulong)1 << MSR_LE;
826 }
827 #else
828 if (msr_ile) {
829 new_msr |= (target_ulong)1 << MSR_LE;
830 }
831 #endif
832
833 vector = env->excp_vectors[excp];
834 if (vector == (target_ulong)-1ULL) {
835 cpu_abort(cs, "Raised an exception without defined vector %d\n",
836 excp);
837 }
838
839 vector |= env->excp_prefix;
840
841 /* If any alternate SRR register are defined, duplicate saved values */
842 if (asrr0 != -1) {
843 env->spr[asrr0] = env->nip;
844 }
845 if (asrr1 != -1) {
846 env->spr[asrr1] = msr;
847 }
848
849 #if defined(TARGET_PPC64)
850 if (excp_model == POWERPC_EXCP_BOOKE) {
851 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
852 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
853 new_msr |= (target_ulong)1 << MSR_CM;
854 } else {
855 vector = (uint32_t)vector;
856 }
857 } else {
858 if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
859 vector = (uint32_t)vector;
860 } else {
861 new_msr |= (target_ulong)1 << MSR_SF;
862 }
863 }
864 #endif
865
866 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
867 /* Save PC */
868 env->spr[srr0] = env->nip;
869
870 /* Save MSR */
871 env->spr[srr1] = msr;
872
873 #if defined(TARGET_PPC64)
874 } else {
875 vector += lev * 0x20;
876
877 env->lr = env->nip;
878 env->ctr = msr;
879 #endif
880 }
881
882 /* This can update new_msr and vector if AIL applies */
883 ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
884
885 powerpc_set_excp_state(cpu, vector, new_msr);
886 }
887
888 void ppc_cpu_do_interrupt(CPUState *cs)
889 {
890 PowerPCCPU *cpu = POWERPC_CPU(cs);
891 CPUPPCState *env = &cpu->env;
892
893 powerpc_excp(cpu, env->excp_model, cs->exception_index);
894 }
895
896 static void ppc_hw_interrupt(CPUPPCState *env)
897 {
898 PowerPCCPU *cpu = env_archcpu(env);
899 bool async_deliver;
900
901 /* External reset */
902 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
903 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
904 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
905 return;
906 }
907 /* Machine check exception */
908 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
909 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
910 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
911 return;
912 }
913 #if 0 /* TODO */
914 /* External debug exception */
915 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
916 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
917 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
918 return;
919 }
920 #endif
921
922 /*
923 * For interrupts that gate on MSR:EE, we need to do something a
924 * bit more subtle, as we need to let them through even when EE is
925 * clear when coming out of some power management states (in order
926 * for them to become a 0x100).
927 */
928 async_deliver = (msr_ee != 0) || env->resume_as_sreset;
929
930 /* Hypervisor decrementer exception */
931 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
932 /* LPCR will be clear when not supported so this will work */
933 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
934 if ((async_deliver || msr_hv == 0) && hdice) {
935 /* HDEC clears on delivery */
936 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
937 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
938 return;
939 }
940 }
941
942 /* Hypervisor virtualization interrupt */
943 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
944 /* LPCR will be clear when not supported so this will work */
945 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
946 if ((async_deliver || msr_hv == 0) && hvice) {
947 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
948 return;
949 }
950 }
951
952 /* External interrupt can ignore MSR:EE under some circumstances */
953 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
954 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
955 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
956 /* HEIC blocks delivery to the hypervisor */
957 if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
958 (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
959 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
960 return;
961 }
962 }
963 if (msr_ce != 0) {
964 /* External critical interrupt */
965 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
966 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
967 return;
968 }
969 }
970 if (async_deliver != 0) {
971 /* Watchdog timer on embedded PowerPC */
972 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
973 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
974 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
975 return;
976 }
977 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
978 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
979 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
980 return;
981 }
982 /* Fixed interval timer on embedded PowerPC */
983 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
984 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
985 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
986 return;
987 }
988 /* Programmable interval timer on embedded PowerPC */
989 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
990 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
991 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
992 return;
993 }
994 /* Decrementer exception */
995 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
996 if (ppc_decr_clear_on_delivery(env)) {
997 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
998 }
999 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
1000 return;
1001 }
1002 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
1003 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1004 if (is_book3s_arch2x(env)) {
1005 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
1006 } else {
1007 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
1008 }
1009 return;
1010 }
1011 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
1012 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1013 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
1014 return;
1015 }
1016 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
1017 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
1018 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
1019 return;
1020 }
1021 /* Thermal interrupt */
1022 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
1023 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
1024 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
1025 return;
1026 }
1027 }
1028
1029 if (env->resume_as_sreset) {
1030 /*
1031 * This is a bug ! It means that has_work took us out of halt without
1032 * anything to deliver while in a PM state that requires getting
1033 * out via a 0x100
1034 *
1035 * This means we will incorrectly execute past the power management
1036 * instruction instead of triggering a reset.
1037 *
1038 * It generally means a discrepancy between the wakeup conditions in the
1039 * processor has_work implementation and the logic in this function.
1040 */
1041 cpu_abort(env_cpu(env),
1042 "Wakeup from PM state but interrupt Undelivered");
1043 }
1044 }
1045
1046 void ppc_cpu_do_system_reset(CPUState *cs)
1047 {
1048 PowerPCCPU *cpu = POWERPC_CPU(cs);
1049 CPUPPCState *env = &cpu->env;
1050
1051 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
1052 }
1053
1054 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
1055 {
1056 PowerPCCPU *cpu = POWERPC_CPU(cs);
1057 CPUPPCState *env = &cpu->env;
1058 target_ulong msr = 0;
1059
1060 /*
1061 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1062 * been set by KVM.
1063 */
1064 msr = (1ULL << MSR_ME);
1065 msr |= env->msr & (1ULL << MSR_SF);
1066 if (ppc_interrupts_little_endian(cpu)) {
1067 msr |= (1ULL << MSR_LE);
1068 }
1069
1070 powerpc_set_excp_state(cpu, vector, msr);
1071 }
1072
1073 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1074 {
1075 PowerPCCPU *cpu = POWERPC_CPU(cs);
1076 CPUPPCState *env = &cpu->env;
1077
1078 if (interrupt_request & CPU_INTERRUPT_HARD) {
1079 ppc_hw_interrupt(env);
1080 if (env->pending_interrupts == 0) {
1081 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1082 }
1083 return true;
1084 }
1085 return false;
1086 }
1087
1088 #endif /* !CONFIG_USER_ONLY */
1089
1090 /*****************************************************************************/
1091 /* Exceptions processing helpers */
1092
1093 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1094 uint32_t error_code, uintptr_t raddr)
1095 {
1096 CPUState *cs = env_cpu(env);
1097
1098 cs->exception_index = exception;
1099 env->error_code = error_code;
1100 cpu_loop_exit_restore(cs, raddr);
1101 }
1102
1103 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1104 uint32_t error_code)
1105 {
1106 raise_exception_err_ra(env, exception, error_code, 0);
1107 }
1108
1109 void raise_exception(CPUPPCState *env, uint32_t exception)
1110 {
1111 raise_exception_err_ra(env, exception, 0, 0);
1112 }
1113
1114 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1115 uintptr_t raddr)
1116 {
1117 raise_exception_err_ra(env, exception, 0, raddr);
1118 }
1119
1120 #ifdef CONFIG_TCG
1121 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1122 uint32_t error_code)
1123 {
1124 raise_exception_err_ra(env, exception, error_code, 0);
1125 }
1126
1127 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1128 {
1129 raise_exception_err_ra(env, exception, 0, 0);
1130 }
1131 #endif
1132
1133 #if !defined(CONFIG_USER_ONLY)
1134 #ifdef CONFIG_TCG
1135 void helper_store_msr(CPUPPCState *env, target_ulong val)
1136 {
1137 uint32_t excp = hreg_store_msr(env, val, 0);
1138
1139 if (excp != 0) {
1140 CPUState *cs = env_cpu(env);
1141 cpu_interrupt_exittb(cs);
1142 raise_exception(env, excp);
1143 }
1144 }
1145
1146 #if defined(TARGET_PPC64)
1147 void helper_scv(CPUPPCState *env, uint32_t lev)
1148 {
1149 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
1150 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
1151 } else {
1152 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
1153 }
1154 }
1155
1156 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1157 {
1158 CPUState *cs;
1159
1160 cs = env_cpu(env);
1161 cs->halted = 1;
1162
1163 /* Condition for waking up at 0x100 */
1164 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1165 (env->spr[SPR_PSSCR] & PSSCR_EC);
1166 }
1167 #endif /* defined(TARGET_PPC64) */
1168 #endif /* CONFIG_TCG */
1169
1170 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1171 {
1172 CPUState *cs = env_cpu(env);
1173
1174 /* MSR:POW cannot be set by any form of rfi */
1175 msr &= ~(1ULL << MSR_POW);
1176
1177 #if defined(TARGET_PPC64)
1178 /* Switching to 32-bit ? Crop the nip */
1179 if (!msr_is_64bit(env, msr)) {
1180 nip = (uint32_t)nip;
1181 }
1182 #else
1183 nip = (uint32_t)nip;
1184 #endif
1185 /* XXX: beware: this is false if VLE is supported */
1186 env->nip = nip & ~((target_ulong)0x00000003);
1187 hreg_store_msr(env, msr, 1);
1188 trace_ppc_excp_rfi(env->nip, env->msr);
1189 /*
1190 * No need to raise an exception here, as rfi is always the last
1191 * insn of a TB
1192 */
1193 cpu_interrupt_exittb(cs);
1194 /* Reset the reservation */
1195 env->reserve_addr = -1;
1196
1197 /* Context synchronizing: check if TCG TLB needs flush */
1198 check_tlb_flush(env, false);
1199 }
1200
1201 #ifdef CONFIG_TCG
1202 void helper_rfi(CPUPPCState *env)
1203 {
1204 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1205 }
1206
1207 #define MSR_BOOK3S_MASK
1208 #if defined(TARGET_PPC64)
1209 void helper_rfid(CPUPPCState *env)
1210 {
1211 /*
1212 * The architecture defines a number of rules for which bits can
1213 * change but in practice, we handle this in hreg_store_msr()
1214 * which will be called by do_rfi(), so there is no need to filter
1215 * here
1216 */
1217 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1218 }
1219
1220 void helper_rfscv(CPUPPCState *env)
1221 {
1222 do_rfi(env, env->lr, env->ctr);
1223 }
1224
1225 void helper_hrfid(CPUPPCState *env)
1226 {
1227 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1228 }
1229 #endif
1230
1231 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1232 void helper_rfebb(CPUPPCState *env, target_ulong s)
1233 {
1234 target_ulong msr = env->msr;
1235
1236 /*
1237 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
1238 *
1239 * "If BESCR 32:33 != 0b00 the instruction is treated as if
1240 * the instruction form were invalid."
1241 */
1242 if (env->spr[SPR_BESCR] & BESCR_INVALID) {
1243 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1244 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1245 }
1246
1247 env->nip = env->spr[SPR_EBBRR];
1248
1249 /* Switching to 32-bit ? Crop the nip */
1250 if (!msr_is_64bit(env, msr)) {
1251 env->nip = (uint32_t)env->spr[SPR_EBBRR];
1252 }
1253
1254 if (s) {
1255 env->spr[SPR_BESCR] |= BESCR_GE;
1256 } else {
1257 env->spr[SPR_BESCR] &= ~BESCR_GE;
1258 }
1259 }
1260 #endif
1261
1262 /*****************************************************************************/
1263 /* Embedded PowerPC specific helpers */
1264 void helper_40x_rfci(CPUPPCState *env)
1265 {
1266 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1267 }
1268
1269 void helper_rfci(CPUPPCState *env)
1270 {
1271 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1272 }
1273
1274 void helper_rfdi(CPUPPCState *env)
1275 {
1276 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1277 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1278 }
1279
1280 void helper_rfmci(CPUPPCState *env)
1281 {
1282 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1283 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1284 }
1285 #endif /* CONFIG_TCG */
1286 #endif /* !defined(CONFIG_USER_ONLY) */
1287
1288 #ifdef CONFIG_TCG
1289 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1290 uint32_t flags)
1291 {
1292 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1293 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1294 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1295 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1296 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1297 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1298 POWERPC_EXCP_TRAP, GETPC());
1299 }
1300 }
1301
1302 #if defined(TARGET_PPC64)
1303 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1304 uint32_t flags)
1305 {
1306 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1307 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1308 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1309 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1310 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1311 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1312 POWERPC_EXCP_TRAP, GETPC());
1313 }
1314 }
1315 #endif
1316 #endif
1317
1318 #if !defined(CONFIG_USER_ONLY)
1319 /*****************************************************************************/
1320 /* PowerPC 601 specific instructions (POWER bridge) */
1321
1322 #ifdef CONFIG_TCG
1323 void helper_rfsvc(CPUPPCState *env)
1324 {
1325 do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1326 }
1327
1328 /* Embedded.Processor Control */
1329 static int dbell2irq(target_ulong rb)
1330 {
1331 int msg = rb & DBELL_TYPE_MASK;
1332 int irq = -1;
1333
1334 switch (msg) {
1335 case DBELL_TYPE_DBELL:
1336 irq = PPC_INTERRUPT_DOORBELL;
1337 break;
1338 case DBELL_TYPE_DBELL_CRIT:
1339 irq = PPC_INTERRUPT_CDOORBELL;
1340 break;
1341 case DBELL_TYPE_G_DBELL:
1342 case DBELL_TYPE_G_DBELL_CRIT:
1343 case DBELL_TYPE_G_DBELL_MC:
1344 /* XXX implement */
1345 default:
1346 break;
1347 }
1348
1349 return irq;
1350 }
1351
1352 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1353 {
1354 int irq = dbell2irq(rb);
1355
1356 if (irq < 0) {
1357 return;
1358 }
1359
1360 env->pending_interrupts &= ~(1 << irq);
1361 }
1362
1363 void helper_msgsnd(target_ulong rb)
1364 {
1365 int irq = dbell2irq(rb);
1366 int pir = rb & DBELL_PIRTAG_MASK;
1367 CPUState *cs;
1368
1369 if (irq < 0) {
1370 return;
1371 }
1372
1373 qemu_mutex_lock_iothread();
1374 CPU_FOREACH(cs) {
1375 PowerPCCPU *cpu = POWERPC_CPU(cs);
1376 CPUPPCState *cenv = &cpu->env;
1377
1378 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1379 cenv->pending_interrupts |= 1 << irq;
1380 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1381 }
1382 }
1383 qemu_mutex_unlock_iothread();
1384 }
1385
1386 /* Server Processor Control */
1387
1388 static bool dbell_type_server(target_ulong rb)
1389 {
1390 /*
1391 * A Directed Hypervisor Doorbell message is sent only if the
1392 * message type is 5. All other types are reserved and the
1393 * instruction is a no-op
1394 */
1395 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1396 }
1397
1398 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1399 {
1400 if (!dbell_type_server(rb)) {
1401 return;
1402 }
1403
1404 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1405 }
1406
1407 static void book3s_msgsnd_common(int pir, int irq)
1408 {
1409 CPUState *cs;
1410
1411 qemu_mutex_lock_iothread();
1412 CPU_FOREACH(cs) {
1413 PowerPCCPU *cpu = POWERPC_CPU(cs);
1414 CPUPPCState *cenv = &cpu->env;
1415
1416 /* TODO: broadcast message to all threads of the same processor */
1417 if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1418 cenv->pending_interrupts |= 1 << irq;
1419 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1420 }
1421 }
1422 qemu_mutex_unlock_iothread();
1423 }
1424
1425 void helper_book3s_msgsnd(target_ulong rb)
1426 {
1427 int pir = rb & DBELL_PROCIDTAG_MASK;
1428
1429 if (!dbell_type_server(rb)) {
1430 return;
1431 }
1432
1433 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1434 }
1435
1436 #if defined(TARGET_PPC64)
1437 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1438 {
1439 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1440
1441 if (!dbell_type_server(rb)) {
1442 return;
1443 }
1444
1445 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1446 }
1447
1448 /*
1449 * sends a message to other threads that are on the same
1450 * multi-threaded processor
1451 */
1452 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1453 {
1454 int pir = env->spr_cb[SPR_PIR].default_value;
1455
1456 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1457
1458 if (!dbell_type_server(rb)) {
1459 return;
1460 }
1461
1462 /* TODO: TCG supports only one thread */
1463
1464 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1465 }
1466 #endif /* TARGET_PPC64 */
1467
1468 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1469 MMUAccessType access_type,
1470 int mmu_idx, uintptr_t retaddr)
1471 {
1472 CPUPPCState *env = cs->env_ptr;
1473 uint32_t insn;
1474
1475 /* Restore state and reload the insn we executed, for filling in DSISR. */
1476 cpu_restore_state(cs, retaddr, true);
1477 insn = cpu_ldl_code(env, env->nip);
1478
1479 switch (env->mmu_model) {
1480 case POWERPC_MMU_SOFT_4xx:
1481 env->spr[SPR_40x_DEAR] = vaddr;
1482 break;
1483 case POWERPC_MMU_BOOKE:
1484 case POWERPC_MMU_BOOKE206:
1485 env->spr[SPR_BOOKE_DEAR] = vaddr;
1486 break;
1487 default:
1488 env->spr[SPR_DAR] = vaddr;
1489 break;
1490 }
1491
1492 cs->exception_index = POWERPC_EXCP_ALIGN;
1493 env->error_code = insn & 0x03FF0000;
1494 cpu_loop_exit(cs);
1495 }
1496 #endif /* CONFIG_TCG */
1497 #endif /* !CONFIG_USER_ONLY */