]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/excp_helper.c
Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2020-03-17' into staging
[mirror_qemu.git] / target / ppc / excp_helper.c
1 /*
2 * PowerPC exception emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "internal.h"
26 #include "helper_regs.h"
27
28 /* #define DEBUG_OP */
29 /* #define DEBUG_SOFTWARE_TLB */
30 /* #define DEBUG_EXCEPTIONS */
31
32 #ifdef DEBUG_EXCEPTIONS
33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
34 #else
35 # define LOG_EXCP(...) do { } while (0)
36 #endif
37
38 /*****************************************************************************/
39 /* Exception processing */
40 #if defined(CONFIG_USER_ONLY)
41 void ppc_cpu_do_interrupt(CPUState *cs)
42 {
43 PowerPCCPU *cpu = POWERPC_CPU(cs);
44 CPUPPCState *env = &cpu->env;
45
46 cs->exception_index = POWERPC_EXCP_NONE;
47 env->error_code = 0;
48 }
49
50 static void ppc_hw_interrupt(CPUPPCState *env)
51 {
52 CPUState *cs = env_cpu(env);
53
54 cs->exception_index = POWERPC_EXCP_NONE;
55 env->error_code = 0;
56 }
57 #else /* defined(CONFIG_USER_ONLY) */
58 static inline void dump_syscall(CPUPPCState *env)
59 {
60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64
61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
62 " nip=" TARGET_FMT_lx "\n",
63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
65 ppc_dump_gpr(env, 6), env->nip);
66 }
67
68 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
69 target_ulong *msr)
70 {
71 /* We no longer are in a PM state */
72 env->resume_as_sreset = false;
73
74 /* Pretend to be returning from doze always as we don't lose state */
75 *msr |= (0x1ull << (63 - 47));
76
77 /* Machine checks are sent normally */
78 if (excp == POWERPC_EXCP_MCHECK) {
79 return excp;
80 }
81 switch (excp) {
82 case POWERPC_EXCP_RESET:
83 *msr |= 0x4ull << (63 - 45);
84 break;
85 case POWERPC_EXCP_EXTERNAL:
86 *msr |= 0x8ull << (63 - 45);
87 break;
88 case POWERPC_EXCP_DECR:
89 *msr |= 0x6ull << (63 - 45);
90 break;
91 case POWERPC_EXCP_SDOOR:
92 *msr |= 0x5ull << (63 - 45);
93 break;
94 case POWERPC_EXCP_SDOOR_HV:
95 *msr |= 0x3ull << (63 - 45);
96 break;
97 case POWERPC_EXCP_HV_MAINT:
98 *msr |= 0xaull << (63 - 45);
99 break;
100 case POWERPC_EXCP_HVIRT:
101 *msr |= 0x9ull << (63 - 45);
102 break;
103 default:
104 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
105 excp);
106 }
107 return POWERPC_EXCP_RESET;
108 }
109
110 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail)
111 {
112 uint64_t offset = 0;
113
114 switch (ail) {
115 case AIL_NONE:
116 break;
117 case AIL_0001_8000:
118 offset = 0x18000;
119 break;
120 case AIL_C000_0000_0000_4000:
121 offset = 0xc000000000004000ull;
122 break;
123 default:
124 cpu_abort(cs, "Invalid AIL combination %d\n", ail);
125 break;
126 }
127
128 return offset;
129 }
130
131 static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
132 target_ulong vector, target_ulong msr)
133 {
134 CPUState *cs = CPU(cpu);
135 CPUPPCState *env = &cpu->env;
136
137 /*
138 * We don't use hreg_store_msr here as already have treated any
139 * special case that could occur. Just store MSR and update hflags
140 *
141 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
142 * will prevent setting of the HV bit which some exceptions might need
143 * to do.
144 */
145 env->msr = msr & env->msr_mask;
146 hreg_compute_hflags(env);
147 env->nip = vector;
148 /* Reset exception state */
149 cs->exception_index = POWERPC_EXCP_NONE;
150 env->error_code = 0;
151
152 /* Reset the reservation */
153 env->reserve_addr = -1;
154
155 /*
156 * Any interrupt is context synchronizing, check if TCG TLB needs
157 * a delayed flush on ppc64
158 */
159 check_tlb_flush(env, false);
160 }
161
162 /*
163 * Note that this function should be greatly optimized when called
164 * with a constant excp, from ppc_hw_interrupt
165 */
166 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
167 {
168 CPUState *cs = CPU(cpu);
169 CPUPPCState *env = &cpu->env;
170 target_ulong msr, new_msr, vector;
171 int srr0, srr1, asrr0, asrr1, lev, ail;
172 bool lpes0;
173
174 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
175 " => %08x (%02x)\n", env->nip, excp, env->error_code);
176
177 /* new srr1 value excluding must-be-zero bits */
178 if (excp_model == POWERPC_EXCP_BOOKE) {
179 msr = env->msr;
180 } else {
181 msr = env->msr & ~0x783f0000ULL;
182 }
183
184 /*
185 * new interrupt handler msr preserves existing HV and ME unless
186 * explicitly overriden
187 */
188 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
189
190 /* target registers */
191 srr0 = SPR_SRR0;
192 srr1 = SPR_SRR1;
193 asrr0 = -1;
194 asrr1 = -1;
195
196 /*
197 * check for special resume at 0x100 from doze/nap/sleep/winkle on
198 * P7/P8/P9
199 */
200 if (env->resume_as_sreset) {
201 excp = powerpc_reset_wakeup(cs, env, excp, &msr);
202 }
203
204 /*
205 * Exception targetting modifiers
206 *
207 * LPES0 is supported on POWER7/8/9
208 * LPES1 is not supported (old iSeries mode)
209 *
210 * On anything else, we behave as if LPES0 is 1
211 * (externals don't alter MSR:HV)
212 *
213 * AIL is initialized here but can be cleared by
214 * selected exceptions
215 */
216 #if defined(TARGET_PPC64)
217 if (excp_model == POWERPC_EXCP_POWER7 ||
218 excp_model == POWERPC_EXCP_POWER8 ||
219 excp_model == POWERPC_EXCP_POWER9) {
220 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
221 if (excp_model != POWERPC_EXCP_POWER7) {
222 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
223 } else {
224 ail = 0;
225 }
226 } else
227 #endif /* defined(TARGET_PPC64) */
228 {
229 lpes0 = true;
230 ail = 0;
231 }
232
233 /*
234 * Hypervisor emulation assistance interrupt only exists on server
235 * arch 2.05 server or later. We also don't want to generate it if
236 * we don't have HVB in msr_mask (PAPR mode).
237 */
238 if (excp == POWERPC_EXCP_HV_EMU
239 #if defined(TARGET_PPC64)
240 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB))
241 #endif /* defined(TARGET_PPC64) */
242
243 ) {
244 excp = POWERPC_EXCP_PROGRAM;
245 }
246
247 switch (excp) {
248 case POWERPC_EXCP_NONE:
249 /* Should never happen */
250 return;
251 case POWERPC_EXCP_CRITICAL: /* Critical input */
252 switch (excp_model) {
253 case POWERPC_EXCP_40x:
254 srr0 = SPR_40x_SRR2;
255 srr1 = SPR_40x_SRR3;
256 break;
257 case POWERPC_EXCP_BOOKE:
258 srr0 = SPR_BOOKE_CSRR0;
259 srr1 = SPR_BOOKE_CSRR1;
260 break;
261 case POWERPC_EXCP_G2:
262 break;
263 default:
264 goto excp_invalid;
265 }
266 break;
267 case POWERPC_EXCP_MCHECK: /* Machine check exception */
268 if (msr_me == 0) {
269 /*
270 * Machine check exception is not enabled. Enter
271 * checkstop state.
272 */
273 fprintf(stderr, "Machine check while not allowed. "
274 "Entering checkstop state\n");
275 if (qemu_log_separate()) {
276 qemu_log("Machine check while not allowed. "
277 "Entering checkstop state\n");
278 }
279 cs->halted = 1;
280 cpu_interrupt_exittb(cs);
281 }
282 if (env->msr_mask & MSR_HVB) {
283 /*
284 * ISA specifies HV, but can be delivered to guest with HV
285 * clear (e.g., see FWNMI in PAPR).
286 */
287 new_msr |= (target_ulong)MSR_HVB;
288 }
289 ail = 0;
290
291 /* machine check exceptions don't have ME set */
292 new_msr &= ~((target_ulong)1 << MSR_ME);
293
294 /* XXX: should also have something loaded in DAR / DSISR */
295 switch (excp_model) {
296 case POWERPC_EXCP_40x:
297 srr0 = SPR_40x_SRR2;
298 srr1 = SPR_40x_SRR3;
299 break;
300 case POWERPC_EXCP_BOOKE:
301 /* FIXME: choose one or the other based on CPU type */
302 srr0 = SPR_BOOKE_MCSRR0;
303 srr1 = SPR_BOOKE_MCSRR1;
304 asrr0 = SPR_BOOKE_CSRR0;
305 asrr1 = SPR_BOOKE_CSRR1;
306 break;
307 default:
308 break;
309 }
310 break;
311 case POWERPC_EXCP_DSI: /* Data storage exception */
312 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
313 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
314 break;
315 case POWERPC_EXCP_ISI: /* Instruction storage exception */
316 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
317 "\n", msr, env->nip);
318 msr |= env->error_code;
319 break;
320 case POWERPC_EXCP_EXTERNAL: /* External input */
321 cs = CPU(cpu);
322
323 if (!lpes0) {
324 new_msr |= (target_ulong)MSR_HVB;
325 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
326 srr0 = SPR_HSRR0;
327 srr1 = SPR_HSRR1;
328 }
329 if (env->mpic_proxy) {
330 /* IACK the IRQ on delivery */
331 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
332 }
333 break;
334 case POWERPC_EXCP_ALIGN: /* Alignment exception */
335 /* Get rS/rD and rA from faulting opcode */
336 /*
337 * Note: the opcode fields will not be set properly for a
338 * direct store load/store, but nobody cares as nobody
339 * actually uses direct store segments.
340 */
341 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
342 break;
343 case POWERPC_EXCP_PROGRAM: /* Program exception */
344 switch (env->error_code & ~0xF) {
345 case POWERPC_EXCP_FP:
346 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
347 LOG_EXCP("Ignore floating point exception\n");
348 cs->exception_index = POWERPC_EXCP_NONE;
349 env->error_code = 0;
350 return;
351 }
352
353 /*
354 * FP exceptions always have NIP pointing to the faulting
355 * instruction, so always use store_next and claim we are
356 * precise in the MSR.
357 */
358 msr |= 0x00100000;
359 env->spr[SPR_BOOKE_ESR] = ESR_FP;
360 break;
361 case POWERPC_EXCP_INVAL:
362 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
363 msr |= 0x00080000;
364 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
365 break;
366 case POWERPC_EXCP_PRIV:
367 msr |= 0x00040000;
368 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
369 break;
370 case POWERPC_EXCP_TRAP:
371 msr |= 0x00020000;
372 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
373 break;
374 default:
375 /* Should never occur */
376 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
377 env->error_code);
378 break;
379 }
380 break;
381 case POWERPC_EXCP_SYSCALL: /* System call exception */
382 dump_syscall(env);
383 lev = env->error_code;
384
385 /*
386 * We need to correct the NIP which in this case is supposed
387 * to point to the next instruction
388 */
389 env->nip += 4;
390
391 /* "PAPR mode" built-in hypercall emulation */
392 if ((lev == 1) && cpu->vhyp) {
393 PPCVirtualHypervisorClass *vhc =
394 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
395 vhc->hypercall(cpu->vhyp, cpu);
396 return;
397 }
398 if (lev == 1) {
399 new_msr |= (target_ulong)MSR_HVB;
400 }
401 break;
402 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
403 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
404 case POWERPC_EXCP_DECR: /* Decrementer exception */
405 break;
406 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
407 /* FIT on 4xx */
408 LOG_EXCP("FIT exception\n");
409 break;
410 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
411 LOG_EXCP("WDT exception\n");
412 switch (excp_model) {
413 case POWERPC_EXCP_BOOKE:
414 srr0 = SPR_BOOKE_CSRR0;
415 srr1 = SPR_BOOKE_CSRR1;
416 break;
417 default:
418 break;
419 }
420 break;
421 case POWERPC_EXCP_DTLB: /* Data TLB error */
422 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
423 break;
424 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
425 if (env->flags & POWERPC_FLAG_DE) {
426 /* FIXME: choose one or the other based on CPU type */
427 srr0 = SPR_BOOKE_DSRR0;
428 srr1 = SPR_BOOKE_DSRR1;
429 asrr0 = SPR_BOOKE_CSRR0;
430 asrr1 = SPR_BOOKE_CSRR1;
431 /* DBSR already modified by caller */
432 } else {
433 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
434 }
435 break;
436 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
437 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
438 break;
439 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
440 /* XXX: TODO */
441 cpu_abort(cs, "Embedded floating point data exception "
442 "is not implemented yet !\n");
443 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
444 break;
445 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
446 /* XXX: TODO */
447 cpu_abort(cs, "Embedded floating point round exception "
448 "is not implemented yet !\n");
449 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
450 break;
451 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
452 /* XXX: TODO */
453 cpu_abort(cs,
454 "Performance counter exception is not implemented yet !\n");
455 break;
456 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
457 break;
458 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
459 srr0 = SPR_BOOKE_CSRR0;
460 srr1 = SPR_BOOKE_CSRR1;
461 break;
462 case POWERPC_EXCP_RESET: /* System reset exception */
463 /* A power-saving exception sets ME, otherwise it is unchanged */
464 if (msr_pow) {
465 /* indicate that we resumed from power save mode */
466 msr |= 0x10000;
467 new_msr |= ((target_ulong)1 << MSR_ME);
468 }
469 if (env->msr_mask & MSR_HVB) {
470 /*
471 * ISA specifies HV, but can be delivered to guest with HV
472 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
473 */
474 new_msr |= (target_ulong)MSR_HVB;
475 } else {
476 if (msr_pow) {
477 cpu_abort(cs, "Trying to deliver power-saving system reset "
478 "exception %d with no HV support\n", excp);
479 }
480 }
481 ail = 0;
482 break;
483 case POWERPC_EXCP_DSEG: /* Data segment exception */
484 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
485 case POWERPC_EXCP_TRACE: /* Trace exception */
486 break;
487 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
488 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
489 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
490 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
491 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
492 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
493 case POWERPC_EXCP_HV_EMU:
494 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
495 srr0 = SPR_HSRR0;
496 srr1 = SPR_HSRR1;
497 new_msr |= (target_ulong)MSR_HVB;
498 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
499 break;
500 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
501 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
502 case POWERPC_EXCP_FU: /* Facility unavailable exception */
503 #ifdef TARGET_PPC64
504 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
505 #endif
506 break;
507 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
508 #ifdef TARGET_PPC64
509 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
510 srr0 = SPR_HSRR0;
511 srr1 = SPR_HSRR1;
512 new_msr |= (target_ulong)MSR_HVB;
513 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
514 #endif
515 break;
516 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
517 LOG_EXCP("PIT exception\n");
518 break;
519 case POWERPC_EXCP_IO: /* IO error exception */
520 /* XXX: TODO */
521 cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
522 break;
523 case POWERPC_EXCP_RUNM: /* Run mode exception */
524 /* XXX: TODO */
525 cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
526 break;
527 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
528 /* XXX: TODO */
529 cpu_abort(cs, "602 emulation trap exception "
530 "is not implemented yet !\n");
531 break;
532 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
533 switch (excp_model) {
534 case POWERPC_EXCP_602:
535 case POWERPC_EXCP_603:
536 case POWERPC_EXCP_603E:
537 case POWERPC_EXCP_G2:
538 goto tlb_miss_tgpr;
539 case POWERPC_EXCP_7x5:
540 goto tlb_miss;
541 case POWERPC_EXCP_74xx:
542 goto tlb_miss_74xx;
543 default:
544 cpu_abort(cs, "Invalid instruction TLB miss exception\n");
545 break;
546 }
547 break;
548 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
549 switch (excp_model) {
550 case POWERPC_EXCP_602:
551 case POWERPC_EXCP_603:
552 case POWERPC_EXCP_603E:
553 case POWERPC_EXCP_G2:
554 goto tlb_miss_tgpr;
555 case POWERPC_EXCP_7x5:
556 goto tlb_miss;
557 case POWERPC_EXCP_74xx:
558 goto tlb_miss_74xx;
559 default:
560 cpu_abort(cs, "Invalid data load TLB miss exception\n");
561 break;
562 }
563 break;
564 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
565 switch (excp_model) {
566 case POWERPC_EXCP_602:
567 case POWERPC_EXCP_603:
568 case POWERPC_EXCP_603E:
569 case POWERPC_EXCP_G2:
570 tlb_miss_tgpr:
571 /* Swap temporary saved registers with GPRs */
572 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
573 new_msr |= (target_ulong)1 << MSR_TGPR;
574 hreg_swap_gpr_tgpr(env);
575 }
576 goto tlb_miss;
577 case POWERPC_EXCP_7x5:
578 tlb_miss:
579 #if defined(DEBUG_SOFTWARE_TLB)
580 if (qemu_log_enabled()) {
581 const char *es;
582 target_ulong *miss, *cmp;
583 int en;
584
585 if (excp == POWERPC_EXCP_IFTLB) {
586 es = "I";
587 en = 'I';
588 miss = &env->spr[SPR_IMISS];
589 cmp = &env->spr[SPR_ICMP];
590 } else {
591 if (excp == POWERPC_EXCP_DLTLB) {
592 es = "DL";
593 } else {
594 es = "DS";
595 }
596 en = 'D';
597 miss = &env->spr[SPR_DMISS];
598 cmp = &env->spr[SPR_DCMP];
599 }
600 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
601 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
602 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
603 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
604 env->error_code);
605 }
606 #endif
607 msr |= env->crf[0] << 28;
608 msr |= env->error_code; /* key, D/I, S/L bits */
609 /* Set way using a LRU mechanism */
610 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
611 break;
612 case POWERPC_EXCP_74xx:
613 tlb_miss_74xx:
614 #if defined(DEBUG_SOFTWARE_TLB)
615 if (qemu_log_enabled()) {
616 const char *es;
617 target_ulong *miss, *cmp;
618 int en;
619
620 if (excp == POWERPC_EXCP_IFTLB) {
621 es = "I";
622 en = 'I';
623 miss = &env->spr[SPR_TLBMISS];
624 cmp = &env->spr[SPR_PTEHI];
625 } else {
626 if (excp == POWERPC_EXCP_DLTLB) {
627 es = "DL";
628 } else {
629 es = "DS";
630 }
631 en = 'D';
632 miss = &env->spr[SPR_TLBMISS];
633 cmp = &env->spr[SPR_PTEHI];
634 }
635 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
636 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
637 env->error_code);
638 }
639 #endif
640 msr |= env->error_code; /* key bit */
641 break;
642 default:
643 cpu_abort(cs, "Invalid data store TLB miss exception\n");
644 break;
645 }
646 break;
647 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
648 /* XXX: TODO */
649 cpu_abort(cs, "Floating point assist exception "
650 "is not implemented yet !\n");
651 break;
652 case POWERPC_EXCP_DABR: /* Data address breakpoint */
653 /* XXX: TODO */
654 cpu_abort(cs, "DABR exception is not implemented yet !\n");
655 break;
656 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
657 /* XXX: TODO */
658 cpu_abort(cs, "IABR exception is not implemented yet !\n");
659 break;
660 case POWERPC_EXCP_SMI: /* System management interrupt */
661 /* XXX: TODO */
662 cpu_abort(cs, "SMI exception is not implemented yet !\n");
663 break;
664 case POWERPC_EXCP_THERM: /* Thermal interrupt */
665 /* XXX: TODO */
666 cpu_abort(cs, "Thermal management exception "
667 "is not implemented yet !\n");
668 break;
669 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
670 /* XXX: TODO */
671 cpu_abort(cs,
672 "Performance counter exception is not implemented yet !\n");
673 break;
674 case POWERPC_EXCP_VPUA: /* Vector assist exception */
675 /* XXX: TODO */
676 cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
677 break;
678 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
679 /* XXX: TODO */
680 cpu_abort(cs,
681 "970 soft-patch exception is not implemented yet !\n");
682 break;
683 case POWERPC_EXCP_MAINT: /* Maintenance exception */
684 /* XXX: TODO */
685 cpu_abort(cs,
686 "970 maintenance exception is not implemented yet !\n");
687 break;
688 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
689 /* XXX: TODO */
690 cpu_abort(cs, "Maskable external exception "
691 "is not implemented yet !\n");
692 break;
693 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
694 /* XXX: TODO */
695 cpu_abort(cs, "Non maskable external exception "
696 "is not implemented yet !\n");
697 break;
698 default:
699 excp_invalid:
700 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
701 break;
702 }
703
704 /* Save PC */
705 env->spr[srr0] = env->nip;
706
707 /* Save MSR */
708 env->spr[srr1] = msr;
709
710 /* Sanity check */
711 if (!(env->msr_mask & MSR_HVB)) {
712 if (new_msr & MSR_HVB) {
713 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
714 "no HV support\n", excp);
715 }
716 if (srr0 == SPR_HSRR0) {
717 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
718 "no HV support\n", excp);
719 }
720 }
721
722 /* If any alternate SRR register are defined, duplicate saved values */
723 if (asrr0 != -1) {
724 env->spr[asrr0] = env->spr[srr0];
725 }
726 if (asrr1 != -1) {
727 env->spr[asrr1] = env->spr[srr1];
728 }
729
730 /*
731 * Sort out endianness of interrupt, this differs depending on the
732 * CPU, the HV mode, etc...
733 */
734 #ifdef TARGET_PPC64
735 if (excp_model == POWERPC_EXCP_POWER7) {
736 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
737 new_msr |= (target_ulong)1 << MSR_LE;
738 }
739 } else if (excp_model == POWERPC_EXCP_POWER8) {
740 if (new_msr & MSR_HVB) {
741 if (env->spr[SPR_HID0] & HID0_HILE) {
742 new_msr |= (target_ulong)1 << MSR_LE;
743 }
744 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
745 new_msr |= (target_ulong)1 << MSR_LE;
746 }
747 } else if (excp_model == POWERPC_EXCP_POWER9) {
748 if (new_msr & MSR_HVB) {
749 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
750 new_msr |= (target_ulong)1 << MSR_LE;
751 }
752 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
753 new_msr |= (target_ulong)1 << MSR_LE;
754 }
755 } else if (msr_ile) {
756 new_msr |= (target_ulong)1 << MSR_LE;
757 }
758 #else
759 if (msr_ile) {
760 new_msr |= (target_ulong)1 << MSR_LE;
761 }
762 #endif
763
764 /* Jump to handler */
765 vector = env->excp_vectors[excp];
766 if (vector == (target_ulong)-1ULL) {
767 cpu_abort(cs, "Raised an exception without defined vector %d\n",
768 excp);
769 }
770 vector |= env->excp_prefix;
771
772 /*
773 * AIL only works if there is no HV transition and we are running
774 * with translations enabled
775 */
776 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
777 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
778 ail = 0;
779 }
780 /* Handle AIL */
781 if (ail) {
782 new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
783 vector |= ppc_excp_vector_offset(cs, ail);
784 }
785
786 #if defined(TARGET_PPC64)
787 if (excp_model == POWERPC_EXCP_BOOKE) {
788 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
789 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
790 new_msr |= (target_ulong)1 << MSR_CM;
791 } else {
792 vector = (uint32_t)vector;
793 }
794 } else {
795 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) {
796 vector = (uint32_t)vector;
797 } else {
798 new_msr |= (target_ulong)1 << MSR_SF;
799 }
800 }
801 #endif
802
803 powerpc_set_excp_state(cpu, vector, new_msr);
804 }
805
806 void ppc_cpu_do_interrupt(CPUState *cs)
807 {
808 PowerPCCPU *cpu = POWERPC_CPU(cs);
809 CPUPPCState *env = &cpu->env;
810
811 powerpc_excp(cpu, env->excp_model, cs->exception_index);
812 }
813
814 static void ppc_hw_interrupt(CPUPPCState *env)
815 {
816 PowerPCCPU *cpu = env_archcpu(env);
817 bool async_deliver;
818
819 /* External reset */
820 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
821 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
822 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
823 return;
824 }
825 /* Machine check exception */
826 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
827 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
828 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
829 return;
830 }
831 #if 0 /* TODO */
832 /* External debug exception */
833 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
834 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
835 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
836 return;
837 }
838 #endif
839
840 /*
841 * For interrupts that gate on MSR:EE, we need to do something a
842 * bit more subtle, as we need to let them through even when EE is
843 * clear when coming out of some power management states (in order
844 * for them to become a 0x100).
845 */
846 async_deliver = (msr_ee != 0) || env->resume_as_sreset;
847
848 /* Hypervisor decrementer exception */
849 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
850 /* LPCR will be clear when not supported so this will work */
851 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
852 if ((async_deliver || msr_hv == 0) && hdice) {
853 /* HDEC clears on delivery */
854 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
855 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
856 return;
857 }
858 }
859
860 /* Hypervisor virtualization interrupt */
861 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
862 /* LPCR will be clear when not supported so this will work */
863 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
864 if ((async_deliver || msr_hv == 0) && hvice) {
865 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
866 return;
867 }
868 }
869
870 /* External interrupt can ignore MSR:EE under some circumstances */
871 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
872 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
873 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
874 /* HEIC blocks delivery to the hypervisor */
875 if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
876 (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
877 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
878 return;
879 }
880 }
881 if (msr_ce != 0) {
882 /* External critical interrupt */
883 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
884 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
885 return;
886 }
887 }
888 if (async_deliver != 0) {
889 /* Watchdog timer on embedded PowerPC */
890 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
891 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
892 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
893 return;
894 }
895 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
896 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
897 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
898 return;
899 }
900 /* Fixed interval timer on embedded PowerPC */
901 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
902 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
903 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
904 return;
905 }
906 /* Programmable interval timer on embedded PowerPC */
907 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
908 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
909 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
910 return;
911 }
912 /* Decrementer exception */
913 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
914 if (ppc_decr_clear_on_delivery(env)) {
915 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
916 }
917 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
918 return;
919 }
920 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
921 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
922 if (is_book3s_arch2x(env)) {
923 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
924 } else {
925 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
926 }
927 return;
928 }
929 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
930 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
931 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
932 return;
933 }
934 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
935 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
936 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
937 return;
938 }
939 /* Thermal interrupt */
940 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
941 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
942 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
943 return;
944 }
945 }
946
947 if (env->resume_as_sreset) {
948 /*
949 * This is a bug ! It means that has_work took us out of halt without
950 * anything to deliver while in a PM state that requires getting
951 * out via a 0x100
952 *
953 * This means we will incorrectly execute past the power management
954 * instruction instead of triggering a reset.
955 *
956 * It generally means a discrepancy between the wakup conditions in the
957 * processor has_work implementation and the logic in this function.
958 */
959 cpu_abort(env_cpu(env),
960 "Wakeup from PM state but interrupt Undelivered");
961 }
962 }
963
964 void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector)
965 {
966 PowerPCCPU *cpu = POWERPC_CPU(cs);
967 CPUPPCState *env = &cpu->env;
968
969 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
970 if (vector != -1) {
971 env->nip = vector;
972 }
973 }
974
975 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
976 {
977 PowerPCCPU *cpu = POWERPC_CPU(cs);
978 CPUPPCState *env = &cpu->env;
979 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
980 target_ulong msr = 0;
981
982 /*
983 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
984 * been set by KVM.
985 */
986 msr = (1ULL << MSR_ME);
987 msr |= env->msr & (1ULL << MSR_SF);
988 if (!(*pcc->interrupts_big_endian)(cpu)) {
989 msr |= (1ULL << MSR_LE);
990 }
991
992 powerpc_set_excp_state(cpu, vector, msr);
993 }
994 #endif /* !CONFIG_USER_ONLY */
995
996 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
997 {
998 PowerPCCPU *cpu = POWERPC_CPU(cs);
999 CPUPPCState *env = &cpu->env;
1000
1001 if (interrupt_request & CPU_INTERRUPT_HARD) {
1002 ppc_hw_interrupt(env);
1003 if (env->pending_interrupts == 0) {
1004 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1005 }
1006 return true;
1007 }
1008 return false;
1009 }
1010
1011 #if defined(DEBUG_OP)
1012 static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
1013 {
1014 qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
1015 TARGET_FMT_lx "\n", RA, msr);
1016 }
1017 #endif
1018
1019 /*****************************************************************************/
1020 /* Exceptions processing helpers */
1021
1022 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1023 uint32_t error_code, uintptr_t raddr)
1024 {
1025 CPUState *cs = env_cpu(env);
1026
1027 cs->exception_index = exception;
1028 env->error_code = error_code;
1029 cpu_loop_exit_restore(cs, raddr);
1030 }
1031
1032 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1033 uint32_t error_code)
1034 {
1035 raise_exception_err_ra(env, exception, error_code, 0);
1036 }
1037
1038 void raise_exception(CPUPPCState *env, uint32_t exception)
1039 {
1040 raise_exception_err_ra(env, exception, 0, 0);
1041 }
1042
1043 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1044 uintptr_t raddr)
1045 {
1046 raise_exception_err_ra(env, exception, 0, raddr);
1047 }
1048
1049 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1050 uint32_t error_code)
1051 {
1052 raise_exception_err_ra(env, exception, error_code, 0);
1053 }
1054
1055 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1056 {
1057 raise_exception_err_ra(env, exception, 0, 0);
1058 }
1059
1060 #if !defined(CONFIG_USER_ONLY)
1061 void helper_store_msr(CPUPPCState *env, target_ulong val)
1062 {
1063 uint32_t excp = hreg_store_msr(env, val, 0);
1064
1065 if (excp != 0) {
1066 CPUState *cs = env_cpu(env);
1067 cpu_interrupt_exittb(cs);
1068 raise_exception(env, excp);
1069 }
1070 }
1071
1072 #if defined(TARGET_PPC64)
1073 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1074 {
1075 CPUState *cs;
1076
1077 cs = env_cpu(env);
1078 cs->halted = 1;
1079
1080 /*
1081 * The architecture specifies that HDEC interrupts are discarded
1082 * in PM states
1083 */
1084 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1085
1086 /* Condition for waking up at 0x100 */
1087 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1088 (env->spr[SPR_PSSCR] & PSSCR_EC);
1089 }
1090 #endif /* defined(TARGET_PPC64) */
1091
1092 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1093 {
1094 CPUState *cs = env_cpu(env);
1095
1096 /* MSR:POW cannot be set by any form of rfi */
1097 msr &= ~(1ULL << MSR_POW);
1098
1099 #if defined(TARGET_PPC64)
1100 /* Switching to 32-bit ? Crop the nip */
1101 if (!msr_is_64bit(env, msr)) {
1102 nip = (uint32_t)nip;
1103 }
1104 #else
1105 nip = (uint32_t)nip;
1106 #endif
1107 /* XXX: beware: this is false if VLE is supported */
1108 env->nip = nip & ~((target_ulong)0x00000003);
1109 hreg_store_msr(env, msr, 1);
1110 #if defined(DEBUG_OP)
1111 cpu_dump_rfi(env->nip, env->msr);
1112 #endif
1113 /*
1114 * No need to raise an exception here, as rfi is always the last
1115 * insn of a TB
1116 */
1117 cpu_interrupt_exittb(cs);
1118 /* Reset the reservation */
1119 env->reserve_addr = -1;
1120
1121 /* Context synchronizing: check if TCG TLB needs flush */
1122 check_tlb_flush(env, false);
1123 }
1124
1125 void helper_rfi(CPUPPCState *env)
1126 {
1127 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1128 }
1129
1130 #define MSR_BOOK3S_MASK
1131 #if defined(TARGET_PPC64)
1132 void helper_rfid(CPUPPCState *env)
1133 {
1134 /*
1135 * The architeture defines a number of rules for which bits can
1136 * change but in practice, we handle this in hreg_store_msr()
1137 * which will be called by do_rfi(), so there is no need to filter
1138 * here
1139 */
1140 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1141 }
1142
1143 void helper_hrfid(CPUPPCState *env)
1144 {
1145 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1146 }
1147 #endif
1148
1149 /*****************************************************************************/
1150 /* Embedded PowerPC specific helpers */
1151 void helper_40x_rfci(CPUPPCState *env)
1152 {
1153 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1154 }
1155
1156 void helper_rfci(CPUPPCState *env)
1157 {
1158 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1159 }
1160
1161 void helper_rfdi(CPUPPCState *env)
1162 {
1163 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1164 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1165 }
1166
1167 void helper_rfmci(CPUPPCState *env)
1168 {
1169 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1170 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1171 }
1172 #endif
1173
1174 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1175 uint32_t flags)
1176 {
1177 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1178 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1179 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1180 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1181 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1182 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1183 POWERPC_EXCP_TRAP, GETPC());
1184 }
1185 }
1186
1187 #if defined(TARGET_PPC64)
1188 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1189 uint32_t flags)
1190 {
1191 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1192 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1193 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1194 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1195 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1196 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1197 POWERPC_EXCP_TRAP, GETPC());
1198 }
1199 }
1200 #endif
1201
1202 #if !defined(CONFIG_USER_ONLY)
1203 /*****************************************************************************/
1204 /* PowerPC 601 specific instructions (POWER bridge) */
1205
1206 void helper_rfsvc(CPUPPCState *env)
1207 {
1208 do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1209 }
1210
1211 /* Embedded.Processor Control */
1212 static int dbell2irq(target_ulong rb)
1213 {
1214 int msg = rb & DBELL_TYPE_MASK;
1215 int irq = -1;
1216
1217 switch (msg) {
1218 case DBELL_TYPE_DBELL:
1219 irq = PPC_INTERRUPT_DOORBELL;
1220 break;
1221 case DBELL_TYPE_DBELL_CRIT:
1222 irq = PPC_INTERRUPT_CDOORBELL;
1223 break;
1224 case DBELL_TYPE_G_DBELL:
1225 case DBELL_TYPE_G_DBELL_CRIT:
1226 case DBELL_TYPE_G_DBELL_MC:
1227 /* XXX implement */
1228 default:
1229 break;
1230 }
1231
1232 return irq;
1233 }
1234
1235 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1236 {
1237 int irq = dbell2irq(rb);
1238
1239 if (irq < 0) {
1240 return;
1241 }
1242
1243 env->pending_interrupts &= ~(1 << irq);
1244 }
1245
1246 void helper_msgsnd(target_ulong rb)
1247 {
1248 int irq = dbell2irq(rb);
1249 int pir = rb & DBELL_PIRTAG_MASK;
1250 CPUState *cs;
1251
1252 if (irq < 0) {
1253 return;
1254 }
1255
1256 qemu_mutex_lock_iothread();
1257 CPU_FOREACH(cs) {
1258 PowerPCCPU *cpu = POWERPC_CPU(cs);
1259 CPUPPCState *cenv = &cpu->env;
1260
1261 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1262 cenv->pending_interrupts |= 1 << irq;
1263 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1264 }
1265 }
1266 qemu_mutex_unlock_iothread();
1267 }
1268
1269 /* Server Processor Control */
1270
1271 static bool dbell_type_server(target_ulong rb)
1272 {
1273 /*
1274 * A Directed Hypervisor Doorbell message is sent only if the
1275 * message type is 5. All other types are reserved and the
1276 * instruction is a no-op
1277 */
1278 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1279 }
1280
1281 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1282 {
1283 if (!dbell_type_server(rb)) {
1284 return;
1285 }
1286
1287 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1288 }
1289
1290 static void book3s_msgsnd_common(int pir, int irq)
1291 {
1292 CPUState *cs;
1293
1294 qemu_mutex_lock_iothread();
1295 CPU_FOREACH(cs) {
1296 PowerPCCPU *cpu = POWERPC_CPU(cs);
1297 CPUPPCState *cenv = &cpu->env;
1298
1299 /* TODO: broadcast message to all threads of the same processor */
1300 if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1301 cenv->pending_interrupts |= 1 << irq;
1302 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1303 }
1304 }
1305 qemu_mutex_unlock_iothread();
1306 }
1307
1308 void helper_book3s_msgsnd(target_ulong rb)
1309 {
1310 int pir = rb & DBELL_PROCIDTAG_MASK;
1311
1312 if (!dbell_type_server(rb)) {
1313 return;
1314 }
1315
1316 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1317 }
1318
1319 #if defined(TARGET_PPC64)
1320 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1321 {
1322 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1323
1324 if (!dbell_type_server(rb)) {
1325 return;
1326 }
1327
1328 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1329 }
1330
1331 /*
1332 * sends a message to other threads that are on the same
1333 * multi-threaded processor
1334 */
1335 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1336 {
1337 int pir = env->spr_cb[SPR_PIR].default_value;
1338
1339 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1340
1341 if (!dbell_type_server(rb)) {
1342 return;
1343 }
1344
1345 /* TODO: TCG supports only one thread */
1346
1347 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1348 }
1349 #endif
1350 #endif
1351
1352 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1353 MMUAccessType access_type,
1354 int mmu_idx, uintptr_t retaddr)
1355 {
1356 CPUPPCState *env = cs->env_ptr;
1357 uint32_t insn;
1358
1359 /* Restore state and reload the insn we executed, for filling in DSISR. */
1360 cpu_restore_state(cs, retaddr, true);
1361 insn = cpu_ldl_code(env, env->nip);
1362
1363 cs->exception_index = POWERPC_EXCP_ALIGN;
1364 env->error_code = insn & 0x03FF0000;
1365 cpu_loop_exit(cs);
1366 }