]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/excp_helper.c
iotests: Let 245 pass on tmpfs
[mirror_qemu.git] / target / ppc / excp_helper.c
1 /*
2 * PowerPC exception emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "internal.h"
26 #include "helper_regs.h"
27
28 //#define DEBUG_OP
29 //#define DEBUG_SOFTWARE_TLB
30 //#define DEBUG_EXCEPTIONS
31
32 #ifdef DEBUG_EXCEPTIONS
33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
34 #else
35 # define LOG_EXCP(...) do { } while (0)
36 #endif
37
38 /*****************************************************************************/
39 /* Exception processing */
40 #if defined(CONFIG_USER_ONLY)
41 void ppc_cpu_do_interrupt(CPUState *cs)
42 {
43 PowerPCCPU *cpu = POWERPC_CPU(cs);
44 CPUPPCState *env = &cpu->env;
45
46 cs->exception_index = POWERPC_EXCP_NONE;
47 env->error_code = 0;
48 }
49
50 static void ppc_hw_interrupt(CPUPPCState *env)
51 {
52 CPUState *cs = CPU(ppc_env_get_cpu(env));
53
54 cs->exception_index = POWERPC_EXCP_NONE;
55 env->error_code = 0;
56 }
57 #else /* defined(CONFIG_USER_ONLY) */
58 static inline void dump_syscall(CPUPPCState *env)
59 {
60 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64
61 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
62 " nip=" TARGET_FMT_lx "\n",
63 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
64 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
65 ppc_dump_gpr(env, 6), env->nip);
66 }
67
68 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
69 target_ulong *msr)
70 {
71 /* We no longer are in a PM state */
72 env->resume_as_sreset = false;
73
74 /* Pretend to be returning from doze always as we don't lose state */
75 *msr |= (0x1ull << (63 - 47));
76
77 /* Machine checks are sent normally */
78 if (excp == POWERPC_EXCP_MCHECK) {
79 return excp;
80 }
81 switch (excp) {
82 case POWERPC_EXCP_RESET:
83 *msr |= 0x4ull << (63 - 45);
84 break;
85 case POWERPC_EXCP_EXTERNAL:
86 *msr |= 0x8ull << (63 - 45);
87 break;
88 case POWERPC_EXCP_DECR:
89 *msr |= 0x6ull << (63 - 45);
90 break;
91 case POWERPC_EXCP_SDOOR:
92 *msr |= 0x5ull << (63 - 45);
93 break;
94 case POWERPC_EXCP_SDOOR_HV:
95 *msr |= 0x3ull << (63 - 45);
96 break;
97 case POWERPC_EXCP_HV_MAINT:
98 *msr |= 0xaull << (63 - 45);
99 break;
100 case POWERPC_EXCP_HVIRT:
101 *msr |= 0x9ull << (63 - 45);
102 break;
103 default:
104 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
105 excp);
106 }
107 return POWERPC_EXCP_RESET;
108 }
109
110 static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail)
111 {
112 uint64_t offset = 0;
113
114 switch (ail) {
115 case AIL_0001_8000:
116 offset = 0x18000;
117 break;
118 case AIL_C000_0000_0000_4000:
119 offset = 0xc000000000004000ull;
120 break;
121 default:
122 cpu_abort(cs, "Invalid AIL combination %d\n", ail);
123 break;
124 }
125
126 return offset;
127 }
128
129 /* Note that this function should be greatly optimized
130 * when called with a constant excp, from ppc_hw_interrupt
131 */
132 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
133 {
134 CPUState *cs = CPU(cpu);
135 CPUPPCState *env = &cpu->env;
136 target_ulong msr, new_msr, vector;
137 int srr0, srr1, asrr0, asrr1, lev, ail;
138 bool lpes0;
139
140 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
141 " => %08x (%02x)\n", env->nip, excp, env->error_code);
142
143 /* new srr1 value excluding must-be-zero bits */
144 if (excp_model == POWERPC_EXCP_BOOKE) {
145 msr = env->msr;
146 } else {
147 msr = env->msr & ~0x783f0000ULL;
148 }
149
150 /* new interrupt handler msr preserves existing HV and ME unless
151 * explicitly overriden
152 */
153 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
154
155 /* target registers */
156 srr0 = SPR_SRR0;
157 srr1 = SPR_SRR1;
158 asrr0 = -1;
159 asrr1 = -1;
160
161 /*
162 * check for special resume at 0x100 from doze/nap/sleep/winkle on
163 * P7/P8/P9
164 */
165 if (env->resume_as_sreset) {
166 excp = powerpc_reset_wakeup(cs, env, excp, &msr);
167 }
168
169 /* Exception targetting modifiers
170 *
171 * LPES0 is supported on POWER7/8/9
172 * LPES1 is not supported (old iSeries mode)
173 *
174 * On anything else, we behave as if LPES0 is 1
175 * (externals don't alter MSR:HV)
176 *
177 * AIL is initialized here but can be cleared by
178 * selected exceptions
179 */
180 #if defined(TARGET_PPC64)
181 if (excp_model == POWERPC_EXCP_POWER7 ||
182 excp_model == POWERPC_EXCP_POWER8 ||
183 excp_model == POWERPC_EXCP_POWER9) {
184 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
185 if (excp_model != POWERPC_EXCP_POWER7) {
186 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
187 } else {
188 ail = 0;
189 }
190 } else
191 #endif /* defined(TARGET_PPC64) */
192 {
193 lpes0 = true;
194 ail = 0;
195 }
196
197 /* Hypervisor emulation assistance interrupt only exists on server
198 * arch 2.05 server or later. We also don't want to generate it if
199 * we don't have HVB in msr_mask (PAPR mode).
200 */
201 if (excp == POWERPC_EXCP_HV_EMU
202 #if defined(TARGET_PPC64)
203 && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB))
204 #endif /* defined(TARGET_PPC64) */
205
206 ) {
207 excp = POWERPC_EXCP_PROGRAM;
208 }
209
210 switch (excp) {
211 case POWERPC_EXCP_NONE:
212 /* Should never happen */
213 return;
214 case POWERPC_EXCP_CRITICAL: /* Critical input */
215 switch (excp_model) {
216 case POWERPC_EXCP_40x:
217 srr0 = SPR_40x_SRR2;
218 srr1 = SPR_40x_SRR3;
219 break;
220 case POWERPC_EXCP_BOOKE:
221 srr0 = SPR_BOOKE_CSRR0;
222 srr1 = SPR_BOOKE_CSRR1;
223 break;
224 case POWERPC_EXCP_G2:
225 break;
226 default:
227 goto excp_invalid;
228 }
229 break;
230 case POWERPC_EXCP_MCHECK: /* Machine check exception */
231 if (msr_me == 0) {
232 /* Machine check exception is not enabled.
233 * Enter checkstop state.
234 */
235 fprintf(stderr, "Machine check while not allowed. "
236 "Entering checkstop state\n");
237 if (qemu_log_separate()) {
238 qemu_log("Machine check while not allowed. "
239 "Entering checkstop state\n");
240 }
241 cs->halted = 1;
242 cpu_interrupt_exittb(cs);
243 }
244 if (env->msr_mask & MSR_HVB) {
245 /* ISA specifies HV, but can be delivered to guest with HV clear
246 * (e.g., see FWNMI in PAPR).
247 */
248 new_msr |= (target_ulong)MSR_HVB;
249 }
250 ail = 0;
251
252 /* machine check exceptions don't have ME set */
253 new_msr &= ~((target_ulong)1 << MSR_ME);
254
255 /* XXX: should also have something loaded in DAR / DSISR */
256 switch (excp_model) {
257 case POWERPC_EXCP_40x:
258 srr0 = SPR_40x_SRR2;
259 srr1 = SPR_40x_SRR3;
260 break;
261 case POWERPC_EXCP_BOOKE:
262 /* FIXME: choose one or the other based on CPU type */
263 srr0 = SPR_BOOKE_MCSRR0;
264 srr1 = SPR_BOOKE_MCSRR1;
265 asrr0 = SPR_BOOKE_CSRR0;
266 asrr1 = SPR_BOOKE_CSRR1;
267 break;
268 default:
269 break;
270 }
271 break;
272 case POWERPC_EXCP_DSI: /* Data storage exception */
273 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
274 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
275 break;
276 case POWERPC_EXCP_ISI: /* Instruction storage exception */
277 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
278 "\n", msr, env->nip);
279 msr |= env->error_code;
280 break;
281 case POWERPC_EXCP_EXTERNAL: /* External input */
282 cs = CPU(cpu);
283
284 if (!lpes0) {
285 new_msr |= (target_ulong)MSR_HVB;
286 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
287 srr0 = SPR_HSRR0;
288 srr1 = SPR_HSRR1;
289 }
290 if (env->mpic_proxy) {
291 /* IACK the IRQ on delivery */
292 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
293 }
294 break;
295 case POWERPC_EXCP_ALIGN: /* Alignment exception */
296 /* Get rS/rD and rA from faulting opcode */
297 /* Note: the opcode fields will not be set properly for a direct
298 * store load/store, but nobody cares as nobody actually uses
299 * direct store segments.
300 */
301 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
302 break;
303 case POWERPC_EXCP_PROGRAM: /* Program exception */
304 switch (env->error_code & ~0xF) {
305 case POWERPC_EXCP_FP:
306 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
307 LOG_EXCP("Ignore floating point exception\n");
308 cs->exception_index = POWERPC_EXCP_NONE;
309 env->error_code = 0;
310 return;
311 }
312
313 /* FP exceptions always have NIP pointing to the faulting
314 * instruction, so always use store_next and claim we are
315 * precise in the MSR.
316 */
317 msr |= 0x00100000;
318 env->spr[SPR_BOOKE_ESR] = ESR_FP;
319 break;
320 case POWERPC_EXCP_INVAL:
321 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
322 msr |= 0x00080000;
323 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
324 break;
325 case POWERPC_EXCP_PRIV:
326 msr |= 0x00040000;
327 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
328 break;
329 case POWERPC_EXCP_TRAP:
330 msr |= 0x00020000;
331 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
332 break;
333 default:
334 /* Should never occur */
335 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
336 env->error_code);
337 break;
338 }
339 break;
340 case POWERPC_EXCP_SYSCALL: /* System call exception */
341 dump_syscall(env);
342 lev = env->error_code;
343
344 /* We need to correct the NIP which in this case is supposed
345 * to point to the next instruction
346 */
347 env->nip += 4;
348
349 /* "PAPR mode" built-in hypercall emulation */
350 if ((lev == 1) && cpu->vhyp) {
351 PPCVirtualHypervisorClass *vhc =
352 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
353 vhc->hypercall(cpu->vhyp, cpu);
354 return;
355 }
356 if (lev == 1) {
357 new_msr |= (target_ulong)MSR_HVB;
358 }
359 break;
360 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
361 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
362 case POWERPC_EXCP_DECR: /* Decrementer exception */
363 break;
364 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
365 /* FIT on 4xx */
366 LOG_EXCP("FIT exception\n");
367 break;
368 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
369 LOG_EXCP("WDT exception\n");
370 switch (excp_model) {
371 case POWERPC_EXCP_BOOKE:
372 srr0 = SPR_BOOKE_CSRR0;
373 srr1 = SPR_BOOKE_CSRR1;
374 break;
375 default:
376 break;
377 }
378 break;
379 case POWERPC_EXCP_DTLB: /* Data TLB error */
380 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
381 break;
382 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
383 if (env->flags & POWERPC_FLAG_DE) {
384 /* FIXME: choose one or the other based on CPU type */
385 srr0 = SPR_BOOKE_DSRR0;
386 srr1 = SPR_BOOKE_DSRR1;
387 asrr0 = SPR_BOOKE_CSRR0;
388 asrr1 = SPR_BOOKE_CSRR1;
389 /* DBSR already modified by caller */
390 } else {
391 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
392 }
393 break;
394 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
395 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
396 break;
397 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
398 /* XXX: TODO */
399 cpu_abort(cs, "Embedded floating point data exception "
400 "is not implemented yet !\n");
401 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
402 break;
403 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
404 /* XXX: TODO */
405 cpu_abort(cs, "Embedded floating point round exception "
406 "is not implemented yet !\n");
407 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
408 break;
409 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
410 /* XXX: TODO */
411 cpu_abort(cs,
412 "Performance counter exception is not implemented yet !\n");
413 break;
414 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
415 break;
416 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
417 srr0 = SPR_BOOKE_CSRR0;
418 srr1 = SPR_BOOKE_CSRR1;
419 break;
420 case POWERPC_EXCP_RESET: /* System reset exception */
421 /* A power-saving exception sets ME, otherwise it is unchanged */
422 if (msr_pow) {
423 /* indicate that we resumed from power save mode */
424 msr |= 0x10000;
425 new_msr |= ((target_ulong)1 << MSR_ME);
426 }
427 if (env->msr_mask & MSR_HVB) {
428 /* ISA specifies HV, but can be delivered to guest with HV clear
429 * (e.g., see FWNMI in PAPR, NMI injection in QEMU).
430 */
431 new_msr |= (target_ulong)MSR_HVB;
432 } else {
433 if (msr_pow) {
434 cpu_abort(cs, "Trying to deliver power-saving system reset "
435 "exception %d with no HV support\n", excp);
436 }
437 }
438 ail = 0;
439 break;
440 case POWERPC_EXCP_DSEG: /* Data segment exception */
441 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
442 case POWERPC_EXCP_TRACE: /* Trace exception */
443 break;
444 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
445 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
446 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
447 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
448 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
449 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
450 case POWERPC_EXCP_HV_EMU:
451 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
452 srr0 = SPR_HSRR0;
453 srr1 = SPR_HSRR1;
454 new_msr |= (target_ulong)MSR_HVB;
455 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
456 break;
457 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
458 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
459 case POWERPC_EXCP_FU: /* Facility unavailable exception */
460 #ifdef TARGET_PPC64
461 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
462 #endif
463 break;
464 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
465 LOG_EXCP("PIT exception\n");
466 break;
467 case POWERPC_EXCP_IO: /* IO error exception */
468 /* XXX: TODO */
469 cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
470 break;
471 case POWERPC_EXCP_RUNM: /* Run mode exception */
472 /* XXX: TODO */
473 cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
474 break;
475 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
476 /* XXX: TODO */
477 cpu_abort(cs, "602 emulation trap exception "
478 "is not implemented yet !\n");
479 break;
480 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
481 switch (excp_model) {
482 case POWERPC_EXCP_602:
483 case POWERPC_EXCP_603:
484 case POWERPC_EXCP_603E:
485 case POWERPC_EXCP_G2:
486 goto tlb_miss_tgpr;
487 case POWERPC_EXCP_7x5:
488 goto tlb_miss;
489 case POWERPC_EXCP_74xx:
490 goto tlb_miss_74xx;
491 default:
492 cpu_abort(cs, "Invalid instruction TLB miss exception\n");
493 break;
494 }
495 break;
496 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
497 switch (excp_model) {
498 case POWERPC_EXCP_602:
499 case POWERPC_EXCP_603:
500 case POWERPC_EXCP_603E:
501 case POWERPC_EXCP_G2:
502 goto tlb_miss_tgpr;
503 case POWERPC_EXCP_7x5:
504 goto tlb_miss;
505 case POWERPC_EXCP_74xx:
506 goto tlb_miss_74xx;
507 default:
508 cpu_abort(cs, "Invalid data load TLB miss exception\n");
509 break;
510 }
511 break;
512 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
513 switch (excp_model) {
514 case POWERPC_EXCP_602:
515 case POWERPC_EXCP_603:
516 case POWERPC_EXCP_603E:
517 case POWERPC_EXCP_G2:
518 tlb_miss_tgpr:
519 /* Swap temporary saved registers with GPRs */
520 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
521 new_msr |= (target_ulong)1 << MSR_TGPR;
522 hreg_swap_gpr_tgpr(env);
523 }
524 goto tlb_miss;
525 case POWERPC_EXCP_7x5:
526 tlb_miss:
527 #if defined(DEBUG_SOFTWARE_TLB)
528 if (qemu_log_enabled()) {
529 const char *es;
530 target_ulong *miss, *cmp;
531 int en;
532
533 if (excp == POWERPC_EXCP_IFTLB) {
534 es = "I";
535 en = 'I';
536 miss = &env->spr[SPR_IMISS];
537 cmp = &env->spr[SPR_ICMP];
538 } else {
539 if (excp == POWERPC_EXCP_DLTLB) {
540 es = "DL";
541 } else {
542 es = "DS";
543 }
544 en = 'D';
545 miss = &env->spr[SPR_DMISS];
546 cmp = &env->spr[SPR_DCMP];
547 }
548 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
549 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
550 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
551 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
552 env->error_code);
553 }
554 #endif
555 msr |= env->crf[0] << 28;
556 msr |= env->error_code; /* key, D/I, S/L bits */
557 /* Set way using a LRU mechanism */
558 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
559 break;
560 case POWERPC_EXCP_74xx:
561 tlb_miss_74xx:
562 #if defined(DEBUG_SOFTWARE_TLB)
563 if (qemu_log_enabled()) {
564 const char *es;
565 target_ulong *miss, *cmp;
566 int en;
567
568 if (excp == POWERPC_EXCP_IFTLB) {
569 es = "I";
570 en = 'I';
571 miss = &env->spr[SPR_TLBMISS];
572 cmp = &env->spr[SPR_PTEHI];
573 } else {
574 if (excp == POWERPC_EXCP_DLTLB) {
575 es = "DL";
576 } else {
577 es = "DS";
578 }
579 en = 'D';
580 miss = &env->spr[SPR_TLBMISS];
581 cmp = &env->spr[SPR_PTEHI];
582 }
583 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
584 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
585 env->error_code);
586 }
587 #endif
588 msr |= env->error_code; /* key bit */
589 break;
590 default:
591 cpu_abort(cs, "Invalid data store TLB miss exception\n");
592 break;
593 }
594 break;
595 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
596 /* XXX: TODO */
597 cpu_abort(cs, "Floating point assist exception "
598 "is not implemented yet !\n");
599 break;
600 case POWERPC_EXCP_DABR: /* Data address breakpoint */
601 /* XXX: TODO */
602 cpu_abort(cs, "DABR exception is not implemented yet !\n");
603 break;
604 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
605 /* XXX: TODO */
606 cpu_abort(cs, "IABR exception is not implemented yet !\n");
607 break;
608 case POWERPC_EXCP_SMI: /* System management interrupt */
609 /* XXX: TODO */
610 cpu_abort(cs, "SMI exception is not implemented yet !\n");
611 break;
612 case POWERPC_EXCP_THERM: /* Thermal interrupt */
613 /* XXX: TODO */
614 cpu_abort(cs, "Thermal management exception "
615 "is not implemented yet !\n");
616 break;
617 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
618 /* XXX: TODO */
619 cpu_abort(cs,
620 "Performance counter exception is not implemented yet !\n");
621 break;
622 case POWERPC_EXCP_VPUA: /* Vector assist exception */
623 /* XXX: TODO */
624 cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
625 break;
626 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
627 /* XXX: TODO */
628 cpu_abort(cs,
629 "970 soft-patch exception is not implemented yet !\n");
630 break;
631 case POWERPC_EXCP_MAINT: /* Maintenance exception */
632 /* XXX: TODO */
633 cpu_abort(cs,
634 "970 maintenance exception is not implemented yet !\n");
635 break;
636 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
637 /* XXX: TODO */
638 cpu_abort(cs, "Maskable external exception "
639 "is not implemented yet !\n");
640 break;
641 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
642 /* XXX: TODO */
643 cpu_abort(cs, "Non maskable external exception "
644 "is not implemented yet !\n");
645 break;
646 default:
647 excp_invalid:
648 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
649 break;
650 }
651
652 /* Save PC */
653 env->spr[srr0] = env->nip;
654
655 /* Save MSR */
656 env->spr[srr1] = msr;
657
658 /* Sanity check */
659 if (!(env->msr_mask & MSR_HVB)) {
660 if (new_msr & MSR_HVB) {
661 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
662 "no HV support\n", excp);
663 }
664 if (srr0 == SPR_HSRR0) {
665 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
666 "no HV support\n", excp);
667 }
668 }
669
670 /* If any alternate SRR register are defined, duplicate saved values */
671 if (asrr0 != -1) {
672 env->spr[asrr0] = env->spr[srr0];
673 }
674 if (asrr1 != -1) {
675 env->spr[asrr1] = env->spr[srr1];
676 }
677
678 /* Sort out endianness of interrupt, this differs depending on the
679 * CPU, the HV mode, etc...
680 */
681 #ifdef TARGET_PPC64
682 if (excp_model == POWERPC_EXCP_POWER7) {
683 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
684 new_msr |= (target_ulong)1 << MSR_LE;
685 }
686 } else if (excp_model == POWERPC_EXCP_POWER8) {
687 if (new_msr & MSR_HVB) {
688 if (env->spr[SPR_HID0] & HID0_HILE) {
689 new_msr |= (target_ulong)1 << MSR_LE;
690 }
691 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
692 new_msr |= (target_ulong)1 << MSR_LE;
693 }
694 } else if (excp_model == POWERPC_EXCP_POWER9) {
695 if (new_msr & MSR_HVB) {
696 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
697 new_msr |= (target_ulong)1 << MSR_LE;
698 }
699 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
700 new_msr |= (target_ulong)1 << MSR_LE;
701 }
702 } else if (msr_ile) {
703 new_msr |= (target_ulong)1 << MSR_LE;
704 }
705 #else
706 if (msr_ile) {
707 new_msr |= (target_ulong)1 << MSR_LE;
708 }
709 #endif
710
711 /* Jump to handler */
712 vector = env->excp_vectors[excp];
713 if (vector == (target_ulong)-1ULL) {
714 cpu_abort(cs, "Raised an exception without defined vector %d\n",
715 excp);
716 }
717 vector |= env->excp_prefix;
718
719 /* AIL only works if there is no HV transition and we are running with
720 * translations enabled
721 */
722 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
723 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
724 ail = 0;
725 }
726 /* Handle AIL */
727 if (ail) {
728 new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
729 vector |= ppc_excp_vector_offset(cs, ail);
730 }
731
732 #if defined(TARGET_PPC64)
733 if (excp_model == POWERPC_EXCP_BOOKE) {
734 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
735 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
736 new_msr |= (target_ulong)1 << MSR_CM;
737 } else {
738 vector = (uint32_t)vector;
739 }
740 } else {
741 if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) {
742 vector = (uint32_t)vector;
743 } else {
744 new_msr |= (target_ulong)1 << MSR_SF;
745 }
746 }
747 #endif
748 /* We don't use hreg_store_msr here as already have treated
749 * any special case that could occur. Just store MSR and update hflags
750 *
751 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
752 * will prevent setting of the HV bit which some exceptions might need
753 * to do.
754 */
755 env->msr = new_msr & env->msr_mask;
756 hreg_compute_hflags(env);
757 env->nip = vector;
758 /* Reset exception state */
759 cs->exception_index = POWERPC_EXCP_NONE;
760 env->error_code = 0;
761
762 /* Reset the reservation */
763 env->reserve_addr = -1;
764
765 /* Any interrupt is context synchronizing, check if TCG TLB
766 * needs a delayed flush on ppc64
767 */
768 check_tlb_flush(env, false);
769 }
770
771 void ppc_cpu_do_interrupt(CPUState *cs)
772 {
773 PowerPCCPU *cpu = POWERPC_CPU(cs);
774 CPUPPCState *env = &cpu->env;
775
776 powerpc_excp(cpu, env->excp_model, cs->exception_index);
777 }
778
779 static void ppc_hw_interrupt(CPUPPCState *env)
780 {
781 PowerPCCPU *cpu = ppc_env_get_cpu(env);
782 bool async_deliver;
783
784 /* External reset */
785 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
786 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
787 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
788 return;
789 }
790 /* Machine check exception */
791 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
792 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
793 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
794 return;
795 }
796 #if 0 /* TODO */
797 /* External debug exception */
798 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
799 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
800 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
801 return;
802 }
803 #endif
804
805 /*
806 * For interrupts that gate on MSR:EE, we need to do something a
807 * bit more subtle, as we need to let them through even when EE is
808 * clear when coming out of some power management states (in order
809 * for them to become a 0x100).
810 */
811 async_deliver = (msr_ee != 0) || env->resume_as_sreset;
812
813 /* Hypervisor decrementer exception */
814 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
815 /* LPCR will be clear when not supported so this will work */
816 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
817 if ((async_deliver || msr_hv == 0) && hdice) {
818 /* HDEC clears on delivery */
819 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
820 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
821 return;
822 }
823 }
824
825 /* Hypervisor virtualization interrupt */
826 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
827 /* LPCR will be clear when not supported so this will work */
828 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
829 if ((async_deliver || msr_hv == 0) && hvice) {
830 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
831 return;
832 }
833 }
834
835 /* External interrupt can ignore MSR:EE under some circumstances */
836 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
837 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
838 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
839 /* HEIC blocks delivery to the hypervisor */
840 if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
841 (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
842 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
843 return;
844 }
845 }
846 if (msr_ce != 0) {
847 /* External critical interrupt */
848 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
849 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
850 return;
851 }
852 }
853 if (async_deliver != 0) {
854 /* Watchdog timer on embedded PowerPC */
855 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
856 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
857 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
858 return;
859 }
860 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
861 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
862 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
863 return;
864 }
865 /* Fixed interval timer on embedded PowerPC */
866 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
867 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
868 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
869 return;
870 }
871 /* Programmable interval timer on embedded PowerPC */
872 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
873 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
874 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
875 return;
876 }
877 /* Decrementer exception */
878 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
879 if (ppc_decr_clear_on_delivery(env)) {
880 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
881 }
882 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
883 return;
884 }
885 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
886 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
887 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
888 return;
889 }
890 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
891 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
892 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
893 return;
894 }
895 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
896 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
897 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
898 return;
899 }
900 /* Thermal interrupt */
901 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
902 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
903 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
904 return;
905 }
906 }
907
908 if (env->resume_as_sreset) {
909 /*
910 * This is a bug ! It means that has_work took us out of halt without
911 * anything to deliver while in a PM state that requires getting
912 * out via a 0x100
913 *
914 * This means we will incorrectly execute past the power management
915 * instruction instead of triggering a reset.
916 *
917 * It generally means a discrepancy between the wakup conditions in the
918 * processor has_work implementation and the logic in this function.
919 */
920 cpu_abort(CPU(ppc_env_get_cpu(env)),
921 "Wakeup from PM state but interrupt Undelivered");
922 }
923 }
924
925 void ppc_cpu_do_system_reset(CPUState *cs)
926 {
927 PowerPCCPU *cpu = POWERPC_CPU(cs);
928 CPUPPCState *env = &cpu->env;
929
930 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
931 }
932 #endif /* !CONFIG_USER_ONLY */
933
934 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
935 {
936 PowerPCCPU *cpu = POWERPC_CPU(cs);
937 CPUPPCState *env = &cpu->env;
938
939 if (interrupt_request & CPU_INTERRUPT_HARD) {
940 ppc_hw_interrupt(env);
941 if (env->pending_interrupts == 0) {
942 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
943 }
944 return true;
945 }
946 return false;
947 }
948
949 #if defined(DEBUG_OP)
950 static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
951 {
952 qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
953 TARGET_FMT_lx "\n", RA, msr);
954 }
955 #endif
956
957 /*****************************************************************************/
958 /* Exceptions processing helpers */
959
960 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
961 uint32_t error_code, uintptr_t raddr)
962 {
963 CPUState *cs = CPU(ppc_env_get_cpu(env));
964
965 cs->exception_index = exception;
966 env->error_code = error_code;
967 cpu_loop_exit_restore(cs, raddr);
968 }
969
970 void raise_exception_err(CPUPPCState *env, uint32_t exception,
971 uint32_t error_code)
972 {
973 raise_exception_err_ra(env, exception, error_code, 0);
974 }
975
976 void raise_exception(CPUPPCState *env, uint32_t exception)
977 {
978 raise_exception_err_ra(env, exception, 0, 0);
979 }
980
981 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
982 uintptr_t raddr)
983 {
984 raise_exception_err_ra(env, exception, 0, raddr);
985 }
986
987 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
988 uint32_t error_code)
989 {
990 raise_exception_err_ra(env, exception, error_code, 0);
991 }
992
993 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
994 {
995 raise_exception_err_ra(env, exception, 0, 0);
996 }
997
998 #if !defined(CONFIG_USER_ONLY)
999 void helper_store_msr(CPUPPCState *env, target_ulong val)
1000 {
1001 uint32_t excp = hreg_store_msr(env, val, 0);
1002
1003 if (excp != 0) {
1004 CPUState *cs = CPU(ppc_env_get_cpu(env));
1005 cpu_interrupt_exittb(cs);
1006 raise_exception(env, excp);
1007 }
1008 }
1009
1010 #if defined(TARGET_PPC64)
1011 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1012 {
1013 CPUState *cs;
1014
1015 cs = CPU(ppc_env_get_cpu(env));
1016 cs->halted = 1;
1017
1018 /* The architecture specifies that HDEC interrupts are
1019 * discarded in PM states
1020 */
1021 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1022
1023 /* Condition for waking up at 0x100 */
1024 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1025 (env->spr[SPR_PSSCR] & PSSCR_EC);
1026 }
1027 #endif /* defined(TARGET_PPC64) */
1028
1029 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1030 {
1031 CPUState *cs = CPU(ppc_env_get_cpu(env));
1032
1033 /* MSR:POW cannot be set by any form of rfi */
1034 msr &= ~(1ULL << MSR_POW);
1035
1036 #if defined(TARGET_PPC64)
1037 /* Switching to 32-bit ? Crop the nip */
1038 if (!msr_is_64bit(env, msr)) {
1039 nip = (uint32_t)nip;
1040 }
1041 #else
1042 nip = (uint32_t)nip;
1043 #endif
1044 /* XXX: beware: this is false if VLE is supported */
1045 env->nip = nip & ~((target_ulong)0x00000003);
1046 hreg_store_msr(env, msr, 1);
1047 #if defined(DEBUG_OP)
1048 cpu_dump_rfi(env->nip, env->msr);
1049 #endif
1050 /* No need to raise an exception here,
1051 * as rfi is always the last insn of a TB
1052 */
1053 cpu_interrupt_exittb(cs);
1054 /* Reset the reservation */
1055 env->reserve_addr = -1;
1056
1057 /* Context synchronizing: check if TCG TLB needs flush */
1058 check_tlb_flush(env, false);
1059 }
1060
1061 void helper_rfi(CPUPPCState *env)
1062 {
1063 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1064 }
1065
1066 #define MSR_BOOK3S_MASK
1067 #if defined(TARGET_PPC64)
1068 void helper_rfid(CPUPPCState *env)
1069 {
1070 /* The architeture defines a number of rules for which bits
1071 * can change but in practice, we handle this in hreg_store_msr()
1072 * which will be called by do_rfi(), so there is no need to filter
1073 * here
1074 */
1075 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1076 }
1077
1078 void helper_hrfid(CPUPPCState *env)
1079 {
1080 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1081 }
1082 #endif
1083
1084 /*****************************************************************************/
1085 /* Embedded PowerPC specific helpers */
1086 void helper_40x_rfci(CPUPPCState *env)
1087 {
1088 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1089 }
1090
1091 void helper_rfci(CPUPPCState *env)
1092 {
1093 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1094 }
1095
1096 void helper_rfdi(CPUPPCState *env)
1097 {
1098 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1099 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1100 }
1101
1102 void helper_rfmci(CPUPPCState *env)
1103 {
1104 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1105 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1106 }
1107 #endif
1108
1109 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1110 uint32_t flags)
1111 {
1112 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1113 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1114 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1115 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1116 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1117 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1118 POWERPC_EXCP_TRAP, GETPC());
1119 }
1120 }
1121
1122 #if defined(TARGET_PPC64)
1123 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1124 uint32_t flags)
1125 {
1126 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1127 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1128 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1129 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1130 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1131 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1132 POWERPC_EXCP_TRAP, GETPC());
1133 }
1134 }
1135 #endif
1136
1137 #if !defined(CONFIG_USER_ONLY)
1138 /*****************************************************************************/
1139 /* PowerPC 601 specific instructions (POWER bridge) */
1140
1141 void helper_rfsvc(CPUPPCState *env)
1142 {
1143 do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1144 }
1145
1146 /* Embedded.Processor Control */
1147 static int dbell2irq(target_ulong rb)
1148 {
1149 int msg = rb & DBELL_TYPE_MASK;
1150 int irq = -1;
1151
1152 switch (msg) {
1153 case DBELL_TYPE_DBELL:
1154 irq = PPC_INTERRUPT_DOORBELL;
1155 break;
1156 case DBELL_TYPE_DBELL_CRIT:
1157 irq = PPC_INTERRUPT_CDOORBELL;
1158 break;
1159 case DBELL_TYPE_G_DBELL:
1160 case DBELL_TYPE_G_DBELL_CRIT:
1161 case DBELL_TYPE_G_DBELL_MC:
1162 /* XXX implement */
1163 default:
1164 break;
1165 }
1166
1167 return irq;
1168 }
1169
1170 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1171 {
1172 int irq = dbell2irq(rb);
1173
1174 if (irq < 0) {
1175 return;
1176 }
1177
1178 env->pending_interrupts &= ~(1 << irq);
1179 }
1180
1181 void helper_msgsnd(target_ulong rb)
1182 {
1183 int irq = dbell2irq(rb);
1184 int pir = rb & DBELL_PIRTAG_MASK;
1185 CPUState *cs;
1186
1187 if (irq < 0) {
1188 return;
1189 }
1190
1191 qemu_mutex_lock_iothread();
1192 CPU_FOREACH(cs) {
1193 PowerPCCPU *cpu = POWERPC_CPU(cs);
1194 CPUPPCState *cenv = &cpu->env;
1195
1196 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1197 cenv->pending_interrupts |= 1 << irq;
1198 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1199 }
1200 }
1201 qemu_mutex_unlock_iothread();
1202 }
1203
1204 /* Server Processor Control */
1205 static int book3s_dbell2irq(target_ulong rb)
1206 {
1207 int msg = rb & DBELL_TYPE_MASK;
1208
1209 /* A Directed Hypervisor Doorbell message is sent only if the
1210 * message type is 5. All other types are reserved and the
1211 * instruction is a no-op */
1212 return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1;
1213 }
1214
1215 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1216 {
1217 int irq = book3s_dbell2irq(rb);
1218
1219 if (irq < 0) {
1220 return;
1221 }
1222
1223 env->pending_interrupts &= ~(1 << irq);
1224 }
1225
1226 void helper_book3s_msgsnd(target_ulong rb)
1227 {
1228 int irq = book3s_dbell2irq(rb);
1229 int pir = rb & DBELL_PROCIDTAG_MASK;
1230 CPUState *cs;
1231
1232 if (irq < 0) {
1233 return;
1234 }
1235
1236 qemu_mutex_lock_iothread();
1237 CPU_FOREACH(cs) {
1238 PowerPCCPU *cpu = POWERPC_CPU(cs);
1239 CPUPPCState *cenv = &cpu->env;
1240
1241 /* TODO: broadcast message to all threads of the same processor */
1242 if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1243 cenv->pending_interrupts |= 1 << irq;
1244 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1245 }
1246 }
1247 qemu_mutex_unlock_iothread();
1248 }
1249 #endif
1250
1251 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1252 MMUAccessType access_type,
1253 int mmu_idx, uintptr_t retaddr)
1254 {
1255 CPUPPCState *env = cs->env_ptr;
1256 uint32_t insn;
1257
1258 /* Restore state and reload the insn we executed, for filling in DSISR. */
1259 cpu_restore_state(cs, retaddr, true);
1260 insn = cpu_ldl_code(env, env->nip);
1261
1262 cs->exception_index = POWERPC_EXCP_ALIGN;
1263 env->error_code = insn & 0x03FF0000;
1264 cpu_loop_exit(cs);
1265 }