]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/excp_helper.c
Merge remote-tracking branch 'remotes/cleber/tags/python-next-pull-request' into...
[mirror_qemu.git] / target / ppc / excp_helper.c
1 /*
2 * PowerPC exception emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "internal.h"
26 #include "helper_regs.h"
27
28 //#define DEBUG_OP
29 //#define DEBUG_SOFTWARE_TLB
30 //#define DEBUG_EXCEPTIONS
31
32 #ifdef DEBUG_EXCEPTIONS
33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
34 #else
35 # define LOG_EXCP(...) do { } while (0)
36 #endif
37
38 /*****************************************************************************/
39 /* Exception processing */
40 #if defined(CONFIG_USER_ONLY)
41 void ppc_cpu_do_interrupt(CPUState *cs)
42 {
43 PowerPCCPU *cpu = POWERPC_CPU(cs);
44 CPUPPCState *env = &cpu->env;
45
46 cs->exception_index = POWERPC_EXCP_NONE;
47 env->error_code = 0;
48 }
49
50 static void ppc_hw_interrupt(CPUPPCState *env)
51 {
52 CPUState *cs = CPU(ppc_env_get_cpu(env));
53
54 cs->exception_index = POWERPC_EXCP_NONE;
55 env->error_code = 0;
56 }
57 #else /* defined(CONFIG_USER_ONLY) */
58 static inline void dump_syscall(CPUPPCState *env)
59 {
60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64
61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
62 " nip=" TARGET_FMT_lx "\n",
63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
65 ppc_dump_gpr(env, 6), env->nip);
66 }
67
68 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
69 target_ulong *msr)
70 {
71 /* We no longer are in a PM state */
72 env->resume_as_sreset = false;
73
74 /* Pretend to be returning from doze always as we don't lose state */
75 *msr |= (0x1ull << (63 - 47));
76
77 /* Machine checks are sent normally */
78 if (excp == POWERPC_EXCP_MCHECK) {
79 return excp;
80 }
81 switch (excp) {
82 case POWERPC_EXCP_RESET:
83 *msr |= 0x4ull << (63 - 45);
84 break;
85 case POWERPC_EXCP_EXTERNAL:
86 *msr |= 0x8ull << (63 - 45);
87 break;
88 case POWERPC_EXCP_DECR:
89 *msr |= 0x6ull << (63 - 45);
90 break;
91 case POWERPC_EXCP_SDOOR:
92 *msr |= 0x5ull << (63 - 45);
93 break;
94 case POWERPC_EXCP_SDOOR_HV:
95 *msr |= 0x3ull << (63 - 45);
96 break;
97 case POWERPC_EXCP_HV_MAINT:
98 *msr |= 0xaull << (63 - 45);
99 break;
100 case POWERPC_EXCP_HVIRT:
101 *msr |= 0x9ull << (63 - 45);
102 break;
103 default:
104 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
105 excp);
106 }
107 return POWERPC_EXCP_RESET;
108 }
109
110
111 /* Note that this function should be greatly optimized
112 * when called with a constant excp, from ppc_hw_interrupt
113 */
114 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
115 {
116 CPUState *cs = CPU(cpu);
117 CPUPPCState *env = &cpu->env;
118 target_ulong msr, new_msr, vector;
119 int srr0, srr1, asrr0, asrr1, lev, ail;
120 bool lpes0;
121
122 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
123 " => %08x (%02x)\n", env->nip, excp, env->error_code);
124
125 /* new srr1 value excluding must-be-zero bits */
126 if (excp_model == POWERPC_EXCP_BOOKE) {
127 msr = env->msr;
128 } else {
129 msr = env->msr & ~0x783f0000ULL;
130 }
131
132 /* new interrupt handler msr preserves existing HV and ME unless
133 * explicitly overriden
134 */
135 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
136
137 /* target registers */
138 srr0 = SPR_SRR0;
139 srr1 = SPR_SRR1;
140 asrr0 = -1;
141 asrr1 = -1;
142
143 /*
144 * check for special resume at 0x100 from doze/nap/sleep/winkle on
145 * P7/P8/P9
146 */
147 if (env->resume_as_sreset) {
148 excp = powerpc_reset_wakeup(cs, env, excp, &msr);
149 }
150
151 /* Exception targetting modifiers
152 *
153 * LPES0 is supported on POWER7/8/9
154 * LPES1 is not supported (old iSeries mode)
155 *
156 * On anything else, we behave as if LPES0 is 1
157 * (externals don't alter MSR:HV)
158 *
159 * AIL is initialized here but can be cleared by
160 * selected exceptions
161 */
162 #if defined(TARGET_PPC64)
163 if (excp_model == POWERPC_EXCP_POWER7 ||
164 excp_model == POWERPC_EXCP_POWER8 ||
165 excp_model == POWERPC_EXCP_POWER9) {
166 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
167 if (excp_model != POWERPC_EXCP_POWER7) {
168 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
169 } else {
170 ail = 0;
171 }
172 } else
173 #endif /* defined(TARGET_PPC64) */
174 {
175 lpes0 = true;
176 ail = 0;
177 }
178
179 /* Hypervisor emulation assistance interrupt only exists on server
180 * arch 2.05 server or later. We also don't want to generate it if
181 * we don't have HVB in msr_mask (PAPR mode).
182 */
183 if (excp == POWERPC_EXCP_HV_EMU
184 #if defined(TARGET_PPC64)
185 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB))
186 #endif /* defined(TARGET_PPC64) */
187
188 ) {
189 excp = POWERPC_EXCP_PROGRAM;
190 }
191
192 switch (excp) {
193 case POWERPC_EXCP_NONE:
194 /* Should never happen */
195 return;
196 case POWERPC_EXCP_CRITICAL: /* Critical input */
197 switch (excp_model) {
198 case POWERPC_EXCP_40x:
199 srr0 = SPR_40x_SRR2;
200 srr1 = SPR_40x_SRR3;
201 break;
202 case POWERPC_EXCP_BOOKE:
203 srr0 = SPR_BOOKE_CSRR0;
204 srr1 = SPR_BOOKE_CSRR1;
205 break;
206 case POWERPC_EXCP_G2:
207 break;
208 default:
209 goto excp_invalid;
210 }
211 break;
212 case POWERPC_EXCP_MCHECK: /* Machine check exception */
213 if (msr_me == 0) {
214 /* Machine check exception is not enabled.
215 * Enter checkstop state.
216 */
217 fprintf(stderr, "Machine check while not allowed. "
218 "Entering checkstop state\n");
219 if (qemu_log_separate()) {
220 qemu_log("Machine check while not allowed. "
221 "Entering checkstop state\n");
222 }
223 cs->halted = 1;
224 cpu_interrupt_exittb(cs);
225 }
226 if (env->msr_mask & MSR_HVB) {
227 /* ISA specifies HV, but can be delivered to guest with HV clear
228 * (e.g., see FWNMI in PAPR).
229 */
230 new_msr |= (target_ulong)MSR_HVB;
231 }
232 ail = 0;
233
234 /* machine check exceptions don't have ME set */
235 new_msr &= ~((target_ulong)1 << MSR_ME);
236
237 /* XXX: should also have something loaded in DAR / DSISR */
238 switch (excp_model) {
239 case POWERPC_EXCP_40x:
240 srr0 = SPR_40x_SRR2;
241 srr1 = SPR_40x_SRR3;
242 break;
243 case POWERPC_EXCP_BOOKE:
244 /* FIXME: choose one or the other based on CPU type */
245 srr0 = SPR_BOOKE_MCSRR0;
246 srr1 = SPR_BOOKE_MCSRR1;
247 asrr0 = SPR_BOOKE_CSRR0;
248 asrr1 = SPR_BOOKE_CSRR1;
249 break;
250 default:
251 break;
252 }
253 break;
254 case POWERPC_EXCP_DSI: /* Data storage exception */
255 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
256 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
257 break;
258 case POWERPC_EXCP_ISI: /* Instruction storage exception */
259 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
260 "\n", msr, env->nip);
261 msr |= env->error_code;
262 break;
263 case POWERPC_EXCP_EXTERNAL: /* External input */
264 cs = CPU(cpu);
265
266 if (!lpes0) {
267 new_msr |= (target_ulong)MSR_HVB;
268 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
269 srr0 = SPR_HSRR0;
270 srr1 = SPR_HSRR1;
271 }
272 if (env->mpic_proxy) {
273 /* IACK the IRQ on delivery */
274 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
275 }
276 break;
277 case POWERPC_EXCP_ALIGN: /* Alignment exception */
278 /* Get rS/rD and rA from faulting opcode */
279 /* Note: the opcode fields will not be set properly for a direct
280 * store load/store, but nobody cares as nobody actually uses
281 * direct store segments.
282 */
283 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
284 break;
285 case POWERPC_EXCP_PROGRAM: /* Program exception */
286 switch (env->error_code & ~0xF) {
287 case POWERPC_EXCP_FP:
288 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
289 LOG_EXCP("Ignore floating point exception\n");
290 cs->exception_index = POWERPC_EXCP_NONE;
291 env->error_code = 0;
292 return;
293 }
294
295 /* FP exceptions always have NIP pointing to the faulting
296 * instruction, so always use store_next and claim we are
297 * precise in the MSR.
298 */
299 msr |= 0x00100000;
300 env->spr[SPR_BOOKE_ESR] = ESR_FP;
301 break;
302 case POWERPC_EXCP_INVAL:
303 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
304 msr |= 0x00080000;
305 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
306 break;
307 case POWERPC_EXCP_PRIV:
308 msr |= 0x00040000;
309 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
310 break;
311 case POWERPC_EXCP_TRAP:
312 msr |= 0x00020000;
313 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
314 break;
315 default:
316 /* Should never occur */
317 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
318 env->error_code);
319 break;
320 }
321 break;
322 case POWERPC_EXCP_SYSCALL: /* System call exception */
323 dump_syscall(env);
324 lev = env->error_code;
325
326 /* We need to correct the NIP which in this case is supposed
327 * to point to the next instruction
328 */
329 env->nip += 4;
330
331 /* "PAPR mode" built-in hypercall emulation */
332 if ((lev == 1) && cpu->vhyp) {
333 PPCVirtualHypervisorClass *vhc =
334 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
335 vhc->hypercall(cpu->vhyp, cpu);
336 return;
337 }
338 if (lev == 1) {
339 new_msr |= (target_ulong)MSR_HVB;
340 }
341 break;
342 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
343 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
344 case POWERPC_EXCP_DECR: /* Decrementer exception */
345 break;
346 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
347 /* FIT on 4xx */
348 LOG_EXCP("FIT exception\n");
349 break;
350 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
351 LOG_EXCP("WDT exception\n");
352 switch (excp_model) {
353 case POWERPC_EXCP_BOOKE:
354 srr0 = SPR_BOOKE_CSRR0;
355 srr1 = SPR_BOOKE_CSRR1;
356 break;
357 default:
358 break;
359 }
360 break;
361 case POWERPC_EXCP_DTLB: /* Data TLB error */
362 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
363 break;
364 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
365 if (env->flags & POWERPC_FLAG_DE) {
366 /* FIXME: choose one or the other based on CPU type */
367 srr0 = SPR_BOOKE_DSRR0;
368 srr1 = SPR_BOOKE_DSRR1;
369 asrr0 = SPR_BOOKE_CSRR0;
370 asrr1 = SPR_BOOKE_CSRR1;
371 /* DBSR already modified by caller */
372 } else {
373 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
374 }
375 break;
376 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
377 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
378 break;
379 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
380 /* XXX: TODO */
381 cpu_abort(cs, "Embedded floating point data exception "
382 "is not implemented yet !\n");
383 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
384 break;
385 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
386 /* XXX: TODO */
387 cpu_abort(cs, "Embedded floating point round exception "
388 "is not implemented yet !\n");
389 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
390 break;
391 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
392 /* XXX: TODO */
393 cpu_abort(cs,
394 "Performance counter exception is not implemented yet !\n");
395 break;
396 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
397 break;
398 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
399 srr0 = SPR_BOOKE_CSRR0;
400 srr1 = SPR_BOOKE_CSRR1;
401 break;
402 case POWERPC_EXCP_RESET: /* System reset exception */
403 /* A power-saving exception sets ME, otherwise it is unchanged */
404 if (msr_pow) {
405 /* indicate that we resumed from power save mode */
406 msr |= 0x10000;
407 new_msr |= ((target_ulong)1 << MSR_ME);
408 }
409 if (env->msr_mask & MSR_HVB) {
410 /* ISA specifies HV, but can be delivered to guest with HV clear
411 * (e.g., see FWNMI in PAPR, NMI injection in QEMU).
412 */
413 new_msr |= (target_ulong)MSR_HVB;
414 } else {
415 if (msr_pow) {
416 cpu_abort(cs, "Trying to deliver power-saving system reset "
417 "exception %d with no HV support\n", excp);
418 }
419 }
420 ail = 0;
421 break;
422 case POWERPC_EXCP_DSEG: /* Data segment exception */
423 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
424 case POWERPC_EXCP_TRACE: /* Trace exception */
425 break;
426 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
427 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
428 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
429 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
430 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
431 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
432 case POWERPC_EXCP_HV_EMU:
433 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
434 srr0 = SPR_HSRR0;
435 srr1 = SPR_HSRR1;
436 new_msr |= (target_ulong)MSR_HVB;
437 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
438 break;
439 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
440 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
441 case POWERPC_EXCP_FU: /* Facility unavailable exception */
442 #ifdef TARGET_PPC64
443 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
444 #endif
445 break;
446 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
447 LOG_EXCP("PIT exception\n");
448 break;
449 case POWERPC_EXCP_IO: /* IO error exception */
450 /* XXX: TODO */
451 cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
452 break;
453 case POWERPC_EXCP_RUNM: /* Run mode exception */
454 /* XXX: TODO */
455 cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
456 break;
457 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
458 /* XXX: TODO */
459 cpu_abort(cs, "602 emulation trap exception "
460 "is not implemented yet !\n");
461 break;
462 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
463 switch (excp_model) {
464 case POWERPC_EXCP_602:
465 case POWERPC_EXCP_603:
466 case POWERPC_EXCP_603E:
467 case POWERPC_EXCP_G2:
468 goto tlb_miss_tgpr;
469 case POWERPC_EXCP_7x5:
470 goto tlb_miss;
471 case POWERPC_EXCP_74xx:
472 goto tlb_miss_74xx;
473 default:
474 cpu_abort(cs, "Invalid instruction TLB miss exception\n");
475 break;
476 }
477 break;
478 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
479 switch (excp_model) {
480 case POWERPC_EXCP_602:
481 case POWERPC_EXCP_603:
482 case POWERPC_EXCP_603E:
483 case POWERPC_EXCP_G2:
484 goto tlb_miss_tgpr;
485 case POWERPC_EXCP_7x5:
486 goto tlb_miss;
487 case POWERPC_EXCP_74xx:
488 goto tlb_miss_74xx;
489 default:
490 cpu_abort(cs, "Invalid data load TLB miss exception\n");
491 break;
492 }
493 break;
494 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
495 switch (excp_model) {
496 case POWERPC_EXCP_602:
497 case POWERPC_EXCP_603:
498 case POWERPC_EXCP_603E:
499 case POWERPC_EXCP_G2:
500 tlb_miss_tgpr:
501 /* Swap temporary saved registers with GPRs */
502 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
503 new_msr |= (target_ulong)1 << MSR_TGPR;
504 hreg_swap_gpr_tgpr(env);
505 }
506 goto tlb_miss;
507 case POWERPC_EXCP_7x5:
508 tlb_miss:
509 #if defined(DEBUG_SOFTWARE_TLB)
510 if (qemu_log_enabled()) {
511 const char *es;
512 target_ulong *miss, *cmp;
513 int en;
514
515 if (excp == POWERPC_EXCP_IFTLB) {
516 es = "I";
517 en = 'I';
518 miss = &env->spr[SPR_IMISS];
519 cmp = &env->spr[SPR_ICMP];
520 } else {
521 if (excp == POWERPC_EXCP_DLTLB) {
522 es = "DL";
523 } else {
524 es = "DS";
525 }
526 en = 'D';
527 miss = &env->spr[SPR_DMISS];
528 cmp = &env->spr[SPR_DCMP];
529 }
530 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
531 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
532 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
533 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
534 env->error_code);
535 }
536 #endif
537 msr |= env->crf[0] << 28;
538 msr |= env->error_code; /* key, D/I, S/L bits */
539 /* Set way using a LRU mechanism */
540 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
541 break;
542 case POWERPC_EXCP_74xx:
543 tlb_miss_74xx:
544 #if defined(DEBUG_SOFTWARE_TLB)
545 if (qemu_log_enabled()) {
546 const char *es;
547 target_ulong *miss, *cmp;
548 int en;
549
550 if (excp == POWERPC_EXCP_IFTLB) {
551 es = "I";
552 en = 'I';
553 miss = &env->spr[SPR_TLBMISS];
554 cmp = &env->spr[SPR_PTEHI];
555 } else {
556 if (excp == POWERPC_EXCP_DLTLB) {
557 es = "DL";
558 } else {
559 es = "DS";
560 }
561 en = 'D';
562 miss = &env->spr[SPR_TLBMISS];
563 cmp = &env->spr[SPR_PTEHI];
564 }
565 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
566 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
567 env->error_code);
568 }
569 #endif
570 msr |= env->error_code; /* key bit */
571 break;
572 default:
573 cpu_abort(cs, "Invalid data store TLB miss exception\n");
574 break;
575 }
576 break;
577 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
578 /* XXX: TODO */
579 cpu_abort(cs, "Floating point assist exception "
580 "is not implemented yet !\n");
581 break;
582 case POWERPC_EXCP_DABR: /* Data address breakpoint */
583 /* XXX: TODO */
584 cpu_abort(cs, "DABR exception is not implemented yet !\n");
585 break;
586 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
587 /* XXX: TODO */
588 cpu_abort(cs, "IABR exception is not implemented yet !\n");
589 break;
590 case POWERPC_EXCP_SMI: /* System management interrupt */
591 /* XXX: TODO */
592 cpu_abort(cs, "SMI exception is not implemented yet !\n");
593 break;
594 case POWERPC_EXCP_THERM: /* Thermal interrupt */
595 /* XXX: TODO */
596 cpu_abort(cs, "Thermal management exception "
597 "is not implemented yet !\n");
598 break;
599 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
600 /* XXX: TODO */
601 cpu_abort(cs,
602 "Performance counter exception is not implemented yet !\n");
603 break;
604 case POWERPC_EXCP_VPUA: /* Vector assist exception */
605 /* XXX: TODO */
606 cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
607 break;
608 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
609 /* XXX: TODO */
610 cpu_abort(cs,
611 "970 soft-patch exception is not implemented yet !\n");
612 break;
613 case POWERPC_EXCP_MAINT: /* Maintenance exception */
614 /* XXX: TODO */
615 cpu_abort(cs,
616 "970 maintenance exception is not implemented yet !\n");
617 break;
618 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
619 /* XXX: TODO */
620 cpu_abort(cs, "Maskable external exception "
621 "is not implemented yet !\n");
622 break;
623 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
624 /* XXX: TODO */
625 cpu_abort(cs, "Non maskable external exception "
626 "is not implemented yet !\n");
627 break;
628 default:
629 excp_invalid:
630 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
631 break;
632 }
633
634 /* Save PC */
635 env->spr[srr0] = env->nip;
636
637 /* Save MSR */
638 env->spr[srr1] = msr;
639
640 /* Sanity check */
641 if (!(env->msr_mask & MSR_HVB)) {
642 if (new_msr & MSR_HVB) {
643 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
644 "no HV support\n", excp);
645 }
646 if (srr0 == SPR_HSRR0) {
647 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
648 "no HV support\n", excp);
649 }
650 }
651
652 /* If any alternate SRR register are defined, duplicate saved values */
653 if (asrr0 != -1) {
654 env->spr[asrr0] = env->spr[srr0];
655 }
656 if (asrr1 != -1) {
657 env->spr[asrr1] = env->spr[srr1];
658 }
659
660 /* Sort out endianness of interrupt, this differs depending on the
661 * CPU, the HV mode, etc...
662 */
663 #ifdef TARGET_PPC64
664 if (excp_model == POWERPC_EXCP_POWER7) {
665 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
666 new_msr |= (target_ulong)1 << MSR_LE;
667 }
668 } else if (excp_model == POWERPC_EXCP_POWER8) {
669 if (new_msr & MSR_HVB) {
670 if (env->spr[SPR_HID0] & HID0_HILE) {
671 new_msr |= (target_ulong)1 << MSR_LE;
672 }
673 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
674 new_msr |= (target_ulong)1 << MSR_LE;
675 }
676 } else if (excp_model == POWERPC_EXCP_POWER9) {
677 if (new_msr & MSR_HVB) {
678 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
679 new_msr |= (target_ulong)1 << MSR_LE;
680 }
681 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
682 new_msr |= (target_ulong)1 << MSR_LE;
683 }
684 } else if (msr_ile) {
685 new_msr |= (target_ulong)1 << MSR_LE;
686 }
687 #else
688 if (msr_ile) {
689 new_msr |= (target_ulong)1 << MSR_LE;
690 }
691 #endif
692
693 /* Jump to handler */
694 vector = env->excp_vectors[excp];
695 if (vector == (target_ulong)-1ULL) {
696 cpu_abort(cs, "Raised an exception without defined vector %d\n",
697 excp);
698 }
699 vector |= env->excp_prefix;
700
701 /* AIL only works if there is no HV transition and we are running with
702 * translations enabled
703 */
704 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
705 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
706 ail = 0;
707 }
708 /* Handle AIL */
709 if (ail) {
710 new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
711 switch(ail) {
712 case AIL_0001_8000:
713 vector |= 0x18000;
714 break;
715 case AIL_C000_0000_0000_4000:
716 vector |= 0xc000000000004000ull;
717 break;
718 default:
719 cpu_abort(cs, "Invalid AIL combination %d\n", ail);
720 break;
721 }
722 }
723
724 #if defined(TARGET_PPC64)
725 if (excp_model == POWERPC_EXCP_BOOKE) {
726 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
727 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
728 new_msr |= (target_ulong)1 << MSR_CM;
729 } else {
730 vector = (uint32_t)vector;
731 }
732 } else {
733 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) {
734 vector = (uint32_t)vector;
735 } else {
736 new_msr |= (target_ulong)1 << MSR_SF;
737 }
738 }
739 #endif
740 /* We don't use hreg_store_msr here as already have treated
741 * any special case that could occur. Just store MSR and update hflags
742 *
743 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
744 * will prevent setting of the HV bit which some exceptions might need
745 * to do.
746 */
747 env->msr = new_msr & env->msr_mask;
748 hreg_compute_hflags(env);
749 env->nip = vector;
750 /* Reset exception state */
751 cs->exception_index = POWERPC_EXCP_NONE;
752 env->error_code = 0;
753
754 /* Reset the reservation */
755 env->reserve_addr = -1;
756
757 /* Any interrupt is context synchronizing, check if TCG TLB
758 * needs a delayed flush on ppc64
759 */
760 check_tlb_flush(env, false);
761 }
762
763 void ppc_cpu_do_interrupt(CPUState *cs)
764 {
765 PowerPCCPU *cpu = POWERPC_CPU(cs);
766 CPUPPCState *env = &cpu->env;
767
768 powerpc_excp(cpu, env->excp_model, cs->exception_index);
769 }
770
771 static void ppc_hw_interrupt(CPUPPCState *env)
772 {
773 PowerPCCPU *cpu = ppc_env_get_cpu(env);
774 bool async_deliver;
775
776 /* External reset */
777 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
778 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
779 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
780 return;
781 }
782 /* Machine check exception */
783 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
784 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
785 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
786 return;
787 }
788 #if 0 /* TODO */
789 /* External debug exception */
790 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
791 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
792 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
793 return;
794 }
795 #endif
796
797 /*
798 * For interrupts that gate on MSR:EE, we need to do something a
799 * bit more subtle, as we need to let them through even when EE is
800 * clear when coming out of some power management states (in order
801 * for them to become a 0x100).
802 */
803 async_deliver = (msr_ee != 0) || env->resume_as_sreset;
804
805 /* Hypervisor decrementer exception */
806 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
807 /* LPCR will be clear when not supported so this will work */
808 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
809 if ((async_deliver || msr_hv == 0) && hdice) {
810 /* HDEC clears on delivery */
811 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
812 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
813 return;
814 }
815 }
816
817 /* Hypervisor virtualization interrupt */
818 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
819 /* LPCR will be clear when not supported so this will work */
820 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
821 if ((async_deliver || msr_hv == 0) && hvice) {
822 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
823 return;
824 }
825 }
826
827 /* External interrupt can ignore MSR:EE under some circumstances */
828 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
829 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
830 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
831 /* HEIC blocks delivery to the hypervisor */
832 if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
833 (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
834 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
835 return;
836 }
837 }
838 if (msr_ce != 0) {
839 /* External critical interrupt */
840 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
841 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
842 return;
843 }
844 }
845 if (async_deliver != 0) {
846 /* Watchdog timer on embedded PowerPC */
847 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
848 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
849 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
850 return;
851 }
852 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
853 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
854 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
855 return;
856 }
857 /* Fixed interval timer on embedded PowerPC */
858 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
859 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
860 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
861 return;
862 }
863 /* Programmable interval timer on embedded PowerPC */
864 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
865 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
866 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
867 return;
868 }
869 /* Decrementer exception */
870 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
871 if (ppc_decr_clear_on_delivery(env)) {
872 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
873 }
874 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
875 return;
876 }
877 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
878 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
879 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
880 return;
881 }
882 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
883 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
884 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
885 return;
886 }
887 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
888 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
889 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
890 return;
891 }
892 /* Thermal interrupt */
893 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
894 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
895 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
896 return;
897 }
898 }
899
900 if (env->resume_as_sreset) {
901 /*
902 * This is a bug ! It means that has_work took us out of halt without
903 * anything to deliver while in a PM state that requires getting
904 * out via a 0x100
905 *
906 * This means we will incorrectly execute past the power management
907 * instruction instead of triggering a reset.
908 *
909 * It generally means a discrepancy between the wakup conditions in the
910 * processor has_work implementation and the logic in this function.
911 */
912 cpu_abort(CPU(ppc_env_get_cpu(env)),
913 "Wakeup from PM state but interrupt Undelivered");
914 }
915 }
916
917 void ppc_cpu_do_system_reset(CPUState *cs)
918 {
919 PowerPCCPU *cpu = POWERPC_CPU(cs);
920 CPUPPCState *env = &cpu->env;
921
922 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
923 }
924 #endif /* !CONFIG_USER_ONLY */
925
926 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
927 {
928 PowerPCCPU *cpu = POWERPC_CPU(cs);
929 CPUPPCState *env = &cpu->env;
930
931 if (interrupt_request & CPU_INTERRUPT_HARD) {
932 ppc_hw_interrupt(env);
933 if (env->pending_interrupts == 0) {
934 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
935 }
936 return true;
937 }
938 return false;
939 }
940
941 #if defined(DEBUG_OP)
942 static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
943 {
944 qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
945 TARGET_FMT_lx "\n", RA, msr);
946 }
947 #endif
948
949 /*****************************************************************************/
950 /* Exceptions processing helpers */
951
952 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
953 uint32_t error_code, uintptr_t raddr)
954 {
955 CPUState *cs = CPU(ppc_env_get_cpu(env));
956
957 cs->exception_index = exception;
958 env->error_code = error_code;
959 cpu_loop_exit_restore(cs, raddr);
960 }
961
962 void raise_exception_err(CPUPPCState *env, uint32_t exception,
963 uint32_t error_code)
964 {
965 raise_exception_err_ra(env, exception, error_code, 0);
966 }
967
968 void raise_exception(CPUPPCState *env, uint32_t exception)
969 {
970 raise_exception_err_ra(env, exception, 0, 0);
971 }
972
973 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
974 uintptr_t raddr)
975 {
976 raise_exception_err_ra(env, exception, 0, raddr);
977 }
978
979 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
980 uint32_t error_code)
981 {
982 raise_exception_err_ra(env, exception, error_code, 0);
983 }
984
985 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
986 {
987 raise_exception_err_ra(env, exception, 0, 0);
988 }
989
990 #if !defined(CONFIG_USER_ONLY)
991 void helper_store_msr(CPUPPCState *env, target_ulong val)
992 {
993 uint32_t excp = hreg_store_msr(env, val, 0);
994
995 if (excp != 0) {
996 CPUState *cs = CPU(ppc_env_get_cpu(env));
997 cpu_interrupt_exittb(cs);
998 raise_exception(env, excp);
999 }
1000 }
1001
1002 #if defined(TARGET_PPC64)
1003 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1004 {
1005 CPUState *cs;
1006
1007 cs = CPU(ppc_env_get_cpu(env));
1008 cs->halted = 1;
1009
1010 /* The architecture specifies that HDEC interrupts are
1011 * discarded in PM states
1012 */
1013 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1014
1015 /* Condition for waking up at 0x100 */
1016 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1017 (env->spr[SPR_PSSCR] & PSSCR_EC);
1018 }
1019 #endif /* defined(TARGET_PPC64) */
1020
1021 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1022 {
1023 CPUState *cs = CPU(ppc_env_get_cpu(env));
1024
1025 /* MSR:POW cannot be set by any form of rfi */
1026 msr &= ~(1ULL << MSR_POW);
1027
1028 #if defined(TARGET_PPC64)
1029 /* Switching to 32-bit ? Crop the nip */
1030 if (!msr_is_64bit(env, msr)) {
1031 nip = (uint32_t)nip;
1032 }
1033 #else
1034 nip = (uint32_t)nip;
1035 #endif
1036 /* XXX: beware: this is false if VLE is supported */
1037 env->nip = nip & ~((target_ulong)0x00000003);
1038 hreg_store_msr(env, msr, 1);
1039 #if defined(DEBUG_OP)
1040 cpu_dump_rfi(env->nip, env->msr);
1041 #endif
1042 /* No need to raise an exception here,
1043 * as rfi is always the last insn of a TB
1044 */
1045 cpu_interrupt_exittb(cs);
1046 /* Reset the reservation */
1047 env->reserve_addr = -1;
1048
1049 /* Context synchronizing: check if TCG TLB needs flush */
1050 check_tlb_flush(env, false);
1051 }
1052
1053 void helper_rfi(CPUPPCState *env)
1054 {
1055 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1056 }
1057
1058 #define MSR_BOOK3S_MASK
1059 #if defined(TARGET_PPC64)
1060 void helper_rfid(CPUPPCState *env)
1061 {
1062 /* The architeture defines a number of rules for which bits
1063 * can change but in practice, we handle this in hreg_store_msr()
1064 * which will be called by do_rfi(), so there is no need to filter
1065 * here
1066 */
1067 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1068 }
1069
1070 void helper_hrfid(CPUPPCState *env)
1071 {
1072 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1073 }
1074 #endif
1075
1076 /*****************************************************************************/
1077 /* Embedded PowerPC specific helpers */
1078 void helper_40x_rfci(CPUPPCState *env)
1079 {
1080 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1081 }
1082
1083 void helper_rfci(CPUPPCState *env)
1084 {
1085 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1086 }
1087
1088 void helper_rfdi(CPUPPCState *env)
1089 {
1090 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1091 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1092 }
1093
1094 void helper_rfmci(CPUPPCState *env)
1095 {
1096 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1097 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1098 }
1099 #endif
1100
1101 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1102 uint32_t flags)
1103 {
1104 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1105 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1106 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1107 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1108 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1109 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1110 POWERPC_EXCP_TRAP, GETPC());
1111 }
1112 }
1113
1114 #if defined(TARGET_PPC64)
1115 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1116 uint32_t flags)
1117 {
1118 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1119 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1120 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1121 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1122 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1123 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1124 POWERPC_EXCP_TRAP, GETPC());
1125 }
1126 }
1127 #endif
1128
1129 #if !defined(CONFIG_USER_ONLY)
1130 /*****************************************************************************/
1131 /* PowerPC 601 specific instructions (POWER bridge) */
1132
1133 void helper_rfsvc(CPUPPCState *env)
1134 {
1135 do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1136 }
1137
1138 /* Embedded.Processor Control */
1139 static int dbell2irq(target_ulong rb)
1140 {
1141 int msg = rb & DBELL_TYPE_MASK;
1142 int irq = -1;
1143
1144 switch (msg) {
1145 case DBELL_TYPE_DBELL:
1146 irq = PPC_INTERRUPT_DOORBELL;
1147 break;
1148 case DBELL_TYPE_DBELL_CRIT:
1149 irq = PPC_INTERRUPT_CDOORBELL;
1150 break;
1151 case DBELL_TYPE_G_DBELL:
1152 case DBELL_TYPE_G_DBELL_CRIT:
1153 case DBELL_TYPE_G_DBELL_MC:
1154 /* XXX implement */
1155 default:
1156 break;
1157 }
1158
1159 return irq;
1160 }
1161
1162 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1163 {
1164 int irq = dbell2irq(rb);
1165
1166 if (irq < 0) {
1167 return;
1168 }
1169
1170 env->pending_interrupts &= ~(1 << irq);
1171 }
1172
1173 void helper_msgsnd(target_ulong rb)
1174 {
1175 int irq = dbell2irq(rb);
1176 int pir = rb & DBELL_PIRTAG_MASK;
1177 CPUState *cs;
1178
1179 if (irq < 0) {
1180 return;
1181 }
1182
1183 qemu_mutex_lock_iothread();
1184 CPU_FOREACH(cs) {
1185 PowerPCCPU *cpu = POWERPC_CPU(cs);
1186 CPUPPCState *cenv = &cpu->env;
1187
1188 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1189 cenv->pending_interrupts |= 1 << irq;
1190 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1191 }
1192 }
1193 qemu_mutex_unlock_iothread();
1194 }
1195
1196 /* Server Processor Control */
1197 static int book3s_dbell2irq(target_ulong rb)
1198 {
1199 int msg = rb & DBELL_TYPE_MASK;
1200
1201 /* A Directed Hypervisor Doorbell message is sent only if the
1202 * message type is 5. All other types are reserved and the
1203 * instruction is a no-op */
1204 return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1;
1205 }
1206
1207 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1208 {
1209 int irq = book3s_dbell2irq(rb);
1210
1211 if (irq < 0) {
1212 return;
1213 }
1214
1215 env->pending_interrupts &= ~(1 << irq);
1216 }
1217
1218 void helper_book3s_msgsnd(target_ulong rb)
1219 {
1220 int irq = book3s_dbell2irq(rb);
1221 int pir = rb & DBELL_PROCIDTAG_MASK;
1222 CPUState *cs;
1223
1224 if (irq < 0) {
1225 return;
1226 }
1227
1228 qemu_mutex_lock_iothread();
1229 CPU_FOREACH(cs) {
1230 PowerPCCPU *cpu = POWERPC_CPU(cs);
1231 CPUPPCState *cenv = &cpu->env;
1232
1233 /* TODO: broadcast message to all threads of the same processor */
1234 if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1235 cenv->pending_interrupts |= 1 << irq;
1236 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1237 }
1238 }
1239 qemu_mutex_unlock_iothread();
1240 }
1241 #endif
1242
1243 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1244 MMUAccessType access_type,
1245 int mmu_idx, uintptr_t retaddr)
1246 {
1247 CPUPPCState *env = cs->env_ptr;
1248 uint32_t insn;
1249
1250 /* Restore state and reload the insn we executed, for filling in DSISR. */
1251 cpu_restore_state(cs, retaddr, true);
1252 insn = cpu_ldl_code(env, env->nip);
1253
1254 cs->exception_index = POWERPC_EXCP_ALIGN;
1255 env->error_code = insn & 0x03FF0000;
1256 cpu_loop_exit(cs);
1257 }