]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kernel/traps.c
[POWERPC] Rename last get_property calls
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / traps.c
1 /*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36
37 #include <asm/kdebug.h>
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/machdep.h>
43 #include <asm/rtas.h>
44 #include <asm/pmc.h>
45 #ifdef CONFIG_PPC32
46 #include <asm/reg.h>
47 #endif
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #endif
55 #include <asm/kexec.h>
56
57 #ifdef CONFIG_DEBUGGER
58 int (*__debugger)(struct pt_regs *regs);
59 int (*__debugger_ipi)(struct pt_regs *regs);
60 int (*__debugger_bpt)(struct pt_regs *regs);
61 int (*__debugger_sstep)(struct pt_regs *regs);
62 int (*__debugger_iabr_match)(struct pt_regs *regs);
63 int (*__debugger_dabr_match)(struct pt_regs *regs);
64 int (*__debugger_fault_handler)(struct pt_regs *regs);
65
66 EXPORT_SYMBOL(__debugger);
67 EXPORT_SYMBOL(__debugger_ipi);
68 EXPORT_SYMBOL(__debugger_bpt);
69 EXPORT_SYMBOL(__debugger_sstep);
70 EXPORT_SYMBOL(__debugger_iabr_match);
71 EXPORT_SYMBOL(__debugger_dabr_match);
72 EXPORT_SYMBOL(__debugger_fault_handler);
73 #endif
74
75 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
76
77 int register_die_notifier(struct notifier_block *nb)
78 {
79 return atomic_notifier_chain_register(&powerpc_die_chain, nb);
80 }
81 EXPORT_SYMBOL(register_die_notifier);
82
83 int unregister_die_notifier(struct notifier_block *nb)
84 {
85 return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
86 }
87 EXPORT_SYMBOL(unregister_die_notifier);
88
89 /*
90 * Trap & Exception support
91 */
92
93 #ifdef CONFIG_PMAC_BACKLIGHT
94 static void pmac_backlight_unblank(void)
95 {
96 mutex_lock(&pmac_backlight_mutex);
97 if (pmac_backlight) {
98 struct backlight_properties *props;
99
100 props = &pmac_backlight->props;
101 props->brightness = props->max_brightness;
102 props->power = FB_BLANK_UNBLANK;
103 backlight_update_status(pmac_backlight);
104 }
105 mutex_unlock(&pmac_backlight_mutex);
106 }
107 #else
108 static inline void pmac_backlight_unblank(void) { }
109 #endif
110
111 int die(const char *str, struct pt_regs *regs, long err)
112 {
113 static struct {
114 spinlock_t lock;
115 u32 lock_owner;
116 int lock_owner_depth;
117 } die = {
118 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
119 .lock_owner = -1,
120 .lock_owner_depth = 0
121 };
122 static int die_counter;
123 unsigned long flags;
124
125 if (debugger(regs))
126 return 1;
127
128 oops_enter();
129
130 if (die.lock_owner != raw_smp_processor_id()) {
131 console_verbose();
132 spin_lock_irqsave(&die.lock, flags);
133 die.lock_owner = smp_processor_id();
134 die.lock_owner_depth = 0;
135 bust_spinlocks(1);
136 if (machine_is(powermac))
137 pmac_backlight_unblank();
138 } else {
139 local_save_flags(flags);
140 }
141
142 if (++die.lock_owner_depth < 3) {
143 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
144 #ifdef CONFIG_PREEMPT
145 printk("PREEMPT ");
146 #endif
147 #ifdef CONFIG_SMP
148 printk("SMP NR_CPUS=%d ", NR_CPUS);
149 #endif
150 #ifdef CONFIG_DEBUG_PAGEALLOC
151 printk("DEBUG_PAGEALLOC ");
152 #endif
153 #ifdef CONFIG_NUMA
154 printk("NUMA ");
155 #endif
156 printk("%s\n", ppc_md.name ? ppc_md.name : "");
157
158 print_modules();
159 show_regs(regs);
160 } else {
161 printk("Recursive die() failure, output suppressed\n");
162 }
163
164 bust_spinlocks(0);
165 die.lock_owner = -1;
166 spin_unlock_irqrestore(&die.lock, flags);
167
168 if (kexec_should_crash(current) ||
169 kexec_sr_activated(smp_processor_id()))
170 crash_kexec(regs);
171 crash_kexec_secondary(regs);
172
173 if (in_interrupt())
174 panic("Fatal exception in interrupt");
175
176 if (panic_on_oops)
177 panic("Fatal exception");
178
179 oops_exit();
180 do_exit(err);
181
182 return 0;
183 }
184
185 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
186 {
187 siginfo_t info;
188
189 if (!user_mode(regs)) {
190 if (die("Exception in kernel mode", regs, signr))
191 return;
192 }
193
194 memset(&info, 0, sizeof(info));
195 info.si_signo = signr;
196 info.si_code = code;
197 info.si_addr = (void __user *) addr;
198 force_sig_info(signr, &info, current);
199
200 /*
201 * Init gets no signals that it doesn't have a handler for.
202 * That's all very well, but if it has caused a synchronous
203 * exception and we ignore the resulting signal, it will just
204 * generate the same exception over and over again and we get
205 * nowhere. Better to kill it and let the kernel panic.
206 */
207 if (is_init(current)) {
208 __sighandler_t handler;
209
210 spin_lock_irq(&current->sighand->siglock);
211 handler = current->sighand->action[signr-1].sa.sa_handler;
212 spin_unlock_irq(&current->sighand->siglock);
213 if (handler == SIG_DFL) {
214 /* init has generated a synchronous exception
215 and it doesn't have a handler for the signal */
216 printk(KERN_CRIT "init has generated signal %d "
217 "but has no handler for it\n", signr);
218 do_exit(signr);
219 }
220 }
221 }
222
223 #ifdef CONFIG_PPC64
224 void system_reset_exception(struct pt_regs *regs)
225 {
226 /* See if any machine dependent calls */
227 if (ppc_md.system_reset_exception) {
228 if (ppc_md.system_reset_exception(regs))
229 return;
230 }
231
232 #ifdef CONFIG_KEXEC
233 cpu_set(smp_processor_id(), cpus_in_sr);
234 #endif
235
236 die("System Reset", regs, SIGABRT);
237
238 /*
239 * Some CPUs when released from the debugger will execute this path.
240 * These CPUs entered the debugger via a soft-reset. If the CPU was
241 * hung before entering the debugger it will return to the hung
242 * state when exiting this function. This causes a problem in
243 * kdump since the hung CPU(s) will not respond to the IPI sent
244 * from kdump. To prevent the problem we call crash_kexec_secondary()
245 * here. If a kdump had not been initiated or we exit the debugger
246 * with the "exit and recover" command (x) crash_kexec_secondary()
247 * will return after 5ms and the CPU returns to its previous state.
248 */
249 crash_kexec_secondary(regs);
250
251 /* Must die if the interrupt is not recoverable */
252 if (!(regs->msr & MSR_RI))
253 panic("Unrecoverable System Reset");
254
255 /* What should we do here? We could issue a shutdown or hard reset. */
256 }
257 #endif
258
259 /*
260 * I/O accesses can cause machine checks on powermacs.
261 * Check if the NIP corresponds to the address of a sync
262 * instruction for which there is an entry in the exception
263 * table.
264 * Note that the 601 only takes a machine check on TEA
265 * (transfer error ack) signal assertion, and does not
266 * set any of the top 16 bits of SRR1.
267 * -- paulus.
268 */
269 static inline int check_io_access(struct pt_regs *regs)
270 {
271 #ifdef CONFIG_PPC32
272 unsigned long msr = regs->msr;
273 const struct exception_table_entry *entry;
274 unsigned int *nip = (unsigned int *)regs->nip;
275
276 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
277 && (entry = search_exception_tables(regs->nip)) != NULL) {
278 /*
279 * Check that it's a sync instruction, or somewhere
280 * in the twi; isync; nop sequence that inb/inw/inl uses.
281 * As the address is in the exception table
282 * we should be able to read the instr there.
283 * For the debug message, we look at the preceding
284 * load or store.
285 */
286 if (*nip == 0x60000000) /* nop */
287 nip -= 2;
288 else if (*nip == 0x4c00012c) /* isync */
289 --nip;
290 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
291 /* sync or twi */
292 unsigned int rb;
293
294 --nip;
295 rb = (*nip >> 11) & 0x1f;
296 printk(KERN_DEBUG "%s bad port %lx at %p\n",
297 (*nip & 0x100)? "OUT to": "IN from",
298 regs->gpr[rb] - _IO_BASE, nip);
299 regs->msr |= MSR_RI;
300 regs->nip = entry->fixup;
301 return 1;
302 }
303 }
304 #endif /* CONFIG_PPC32 */
305 return 0;
306 }
307
308 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
309 /* On 4xx, the reason for the machine check or program exception
310 is in the ESR. */
311 #define get_reason(regs) ((regs)->dsisr)
312 #ifndef CONFIG_FSL_BOOKE
313 #define get_mc_reason(regs) ((regs)->dsisr)
314 #else
315 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
316 #endif
317 #define REASON_FP ESR_FP
318 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
319 #define REASON_PRIVILEGED ESR_PPR
320 #define REASON_TRAP ESR_PTR
321
322 /* single-step stuff */
323 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
324 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
325
326 #else
327 /* On non-4xx, the reason for the machine check or program
328 exception is in the MSR. */
329 #define get_reason(regs) ((regs)->msr)
330 #define get_mc_reason(regs) ((regs)->msr)
331 #define REASON_FP 0x100000
332 #define REASON_ILLEGAL 0x80000
333 #define REASON_PRIVILEGED 0x40000
334 #define REASON_TRAP 0x20000
335
336 #define single_stepping(regs) ((regs)->msr & MSR_SE)
337 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
338 #endif
339
340 /*
341 * This is "fall-back" implementation for configurations
342 * which don't provide platform-specific machine check info
343 */
344 void __attribute__ ((weak))
345 platform_machine_check(struct pt_regs *regs)
346 {
347 }
348
349 void machine_check_exception(struct pt_regs *regs)
350 {
351 int recover = 0;
352 unsigned long reason = get_mc_reason(regs);
353
354 /* See if any machine dependent calls */
355 if (ppc_md.machine_check_exception)
356 recover = ppc_md.machine_check_exception(regs);
357
358 if (recover)
359 return;
360
361 if (user_mode(regs)) {
362 regs->msr |= MSR_RI;
363 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
364 return;
365 }
366
367 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
368 /* the qspan pci read routines can cause machine checks -- Cort */
369 bad_page_fault(regs, regs->dar, SIGBUS);
370 return;
371 #endif
372
373 if (debugger_fault_handler(regs)) {
374 regs->msr |= MSR_RI;
375 return;
376 }
377
378 if (check_io_access(regs))
379 return;
380
381 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
382 if (reason & ESR_IMCP) {
383 printk("Instruction");
384 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
385 } else
386 printk("Data");
387 printk(" machine check in kernel mode.\n");
388 #elif defined(CONFIG_440A)
389 printk("Machine check in kernel mode.\n");
390 if (reason & ESR_IMCP){
391 printk("Instruction Synchronous Machine Check exception\n");
392 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
393 }
394 else {
395 u32 mcsr = mfspr(SPRN_MCSR);
396 if (mcsr & MCSR_IB)
397 printk("Instruction Read PLB Error\n");
398 if (mcsr & MCSR_DRB)
399 printk("Data Read PLB Error\n");
400 if (mcsr & MCSR_DWB)
401 printk("Data Write PLB Error\n");
402 if (mcsr & MCSR_TLBP)
403 printk("TLB Parity Error\n");
404 if (mcsr & MCSR_ICP){
405 flush_instruction_cache();
406 printk("I-Cache Parity Error\n");
407 }
408 if (mcsr & MCSR_DCSP)
409 printk("D-Cache Search Parity Error\n");
410 if (mcsr & MCSR_DCFP)
411 printk("D-Cache Flush Parity Error\n");
412 if (mcsr & MCSR_IMPE)
413 printk("Machine Check exception is imprecise\n");
414
415 /* Clear MCSR */
416 mtspr(SPRN_MCSR, mcsr);
417 }
418 #elif defined (CONFIG_E500)
419 printk("Machine check in kernel mode.\n");
420 printk("Caused by (from MCSR=%lx): ", reason);
421
422 if (reason & MCSR_MCP)
423 printk("Machine Check Signal\n");
424 if (reason & MCSR_ICPERR)
425 printk("Instruction Cache Parity Error\n");
426 if (reason & MCSR_DCP_PERR)
427 printk("Data Cache Push Parity Error\n");
428 if (reason & MCSR_DCPERR)
429 printk("Data Cache Parity Error\n");
430 if (reason & MCSR_GL_CI)
431 printk("Guarded Load or Cache-Inhibited stwcx.\n");
432 if (reason & MCSR_BUS_IAERR)
433 printk("Bus - Instruction Address Error\n");
434 if (reason & MCSR_BUS_RAERR)
435 printk("Bus - Read Address Error\n");
436 if (reason & MCSR_BUS_WAERR)
437 printk("Bus - Write Address Error\n");
438 if (reason & MCSR_BUS_IBERR)
439 printk("Bus - Instruction Data Error\n");
440 if (reason & MCSR_BUS_RBERR)
441 printk("Bus - Read Data Bus Error\n");
442 if (reason & MCSR_BUS_WBERR)
443 printk("Bus - Read Data Bus Error\n");
444 if (reason & MCSR_BUS_IPERR)
445 printk("Bus - Instruction Parity Error\n");
446 if (reason & MCSR_BUS_RPERR)
447 printk("Bus - Read Parity Error\n");
448 #elif defined (CONFIG_E200)
449 printk("Machine check in kernel mode.\n");
450 printk("Caused by (from MCSR=%lx): ", reason);
451
452 if (reason & MCSR_MCP)
453 printk("Machine Check Signal\n");
454 if (reason & MCSR_CP_PERR)
455 printk("Cache Push Parity Error\n");
456 if (reason & MCSR_CPERR)
457 printk("Cache Parity Error\n");
458 if (reason & MCSR_EXCP_ERR)
459 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
460 if (reason & MCSR_BUS_IRERR)
461 printk("Bus - Read Bus Error on instruction fetch\n");
462 if (reason & MCSR_BUS_DRERR)
463 printk("Bus - Read Bus Error on data load\n");
464 if (reason & MCSR_BUS_WRERR)
465 printk("Bus - Write Bus Error on buffered store or cache line push\n");
466 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
467 printk("Machine check in kernel mode.\n");
468 printk("Caused by (from SRR1=%lx): ", reason);
469 switch (reason & 0x601F0000) {
470 case 0x80000:
471 printk("Machine check signal\n");
472 break;
473 case 0: /* for 601 */
474 case 0x40000:
475 case 0x140000: /* 7450 MSS error and TEA */
476 printk("Transfer error ack signal\n");
477 break;
478 case 0x20000:
479 printk("Data parity error signal\n");
480 break;
481 case 0x10000:
482 printk("Address parity error signal\n");
483 break;
484 case 0x20000000:
485 printk("L1 Data Cache error\n");
486 break;
487 case 0x40000000:
488 printk("L1 Instruction Cache error\n");
489 break;
490 case 0x00100000:
491 printk("L2 data cache parity error\n");
492 break;
493 default:
494 printk("Unknown values in msr\n");
495 }
496 #endif /* CONFIG_4xx */
497
498 /*
499 * Optional platform-provided routine to print out
500 * additional info, e.g. bus error registers.
501 */
502 platform_machine_check(regs);
503
504 if (debugger_fault_handler(regs))
505 return;
506 die("Machine check", regs, SIGBUS);
507
508 /* Must die if the interrupt is not recoverable */
509 if (!(regs->msr & MSR_RI))
510 panic("Unrecoverable Machine check");
511 }
512
513 void SMIException(struct pt_regs *regs)
514 {
515 die("System Management Interrupt", regs, SIGABRT);
516 }
517
518 void unknown_exception(struct pt_regs *regs)
519 {
520 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
521 regs->nip, regs->msr, regs->trap);
522
523 _exception(SIGTRAP, regs, 0, 0);
524 }
525
526 void instruction_breakpoint_exception(struct pt_regs *regs)
527 {
528 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
529 5, SIGTRAP) == NOTIFY_STOP)
530 return;
531 if (debugger_iabr_match(regs))
532 return;
533 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
534 }
535
536 void RunModeException(struct pt_regs *regs)
537 {
538 _exception(SIGTRAP, regs, 0, 0);
539 }
540
541 void __kprobes single_step_exception(struct pt_regs *regs)
542 {
543 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
544
545 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
546 5, SIGTRAP) == NOTIFY_STOP)
547 return;
548 if (debugger_sstep(regs))
549 return;
550
551 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
552 }
553
554 /*
555 * After we have successfully emulated an instruction, we have to
556 * check if the instruction was being single-stepped, and if so,
557 * pretend we got a single-step exception. This was pointed out
558 * by Kumar Gala. -- paulus
559 */
560 static void emulate_single_step(struct pt_regs *regs)
561 {
562 if (single_stepping(regs)) {
563 clear_single_step(regs);
564 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
565 }
566 }
567
568 static inline int __parse_fpscr(unsigned long fpscr)
569 {
570 int ret = 0;
571
572 /* Invalid operation */
573 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
574 ret = FPE_FLTINV;
575
576 /* Overflow */
577 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
578 ret = FPE_FLTOVF;
579
580 /* Underflow */
581 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
582 ret = FPE_FLTUND;
583
584 /* Divide by zero */
585 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
586 ret = FPE_FLTDIV;
587
588 /* Inexact result */
589 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
590 ret = FPE_FLTRES;
591
592 return ret;
593 }
594
595 static void parse_fpe(struct pt_regs *regs)
596 {
597 int code = 0;
598
599 flush_fp_to_thread(current);
600
601 code = __parse_fpscr(current->thread.fpscr.val);
602
603 _exception(SIGFPE, regs, code, regs->nip);
604 }
605
606 /*
607 * Illegal instruction emulation support. Originally written to
608 * provide the PVR to user applications using the mfspr rd, PVR.
609 * Return non-zero if we can't emulate, or -EFAULT if the associated
610 * memory access caused an access fault. Return zero on success.
611 *
612 * There are a couple of ways to do this, either "decode" the instruction
613 * or directly match lots of bits. In this case, matching lots of
614 * bits is faster and easier.
615 *
616 */
617 #define INST_MFSPR_PVR 0x7c1f42a6
618 #define INST_MFSPR_PVR_MASK 0xfc1fffff
619
620 #define INST_DCBA 0x7c0005ec
621 #define INST_DCBA_MASK 0xfc0007fe
622
623 #define INST_MCRXR 0x7c000400
624 #define INST_MCRXR_MASK 0xfc0007fe
625
626 #define INST_STRING 0x7c00042a
627 #define INST_STRING_MASK 0xfc0007fe
628 #define INST_STRING_GEN_MASK 0xfc00067e
629 #define INST_LSWI 0x7c0004aa
630 #define INST_LSWX 0x7c00042a
631 #define INST_STSWI 0x7c0005aa
632 #define INST_STSWX 0x7c00052a
633
634 #define INST_POPCNTB 0x7c0000f4
635 #define INST_POPCNTB_MASK 0xfc0007fe
636
637 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
638 {
639 u8 rT = (instword >> 21) & 0x1f;
640 u8 rA = (instword >> 16) & 0x1f;
641 u8 NB_RB = (instword >> 11) & 0x1f;
642 u32 num_bytes;
643 unsigned long EA;
644 int pos = 0;
645
646 /* Early out if we are an invalid form of lswx */
647 if ((instword & INST_STRING_MASK) == INST_LSWX)
648 if ((rT == rA) || (rT == NB_RB))
649 return -EINVAL;
650
651 EA = (rA == 0) ? 0 : regs->gpr[rA];
652
653 switch (instword & INST_STRING_MASK) {
654 case INST_LSWX:
655 case INST_STSWX:
656 EA += NB_RB;
657 num_bytes = regs->xer & 0x7f;
658 break;
659 case INST_LSWI:
660 case INST_STSWI:
661 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
662 break;
663 default:
664 return -EINVAL;
665 }
666
667 while (num_bytes != 0)
668 {
669 u8 val;
670 u32 shift = 8 * (3 - (pos & 0x3));
671
672 switch ((instword & INST_STRING_MASK)) {
673 case INST_LSWX:
674 case INST_LSWI:
675 if (get_user(val, (u8 __user *)EA))
676 return -EFAULT;
677 /* first time updating this reg,
678 * zero it out */
679 if (pos == 0)
680 regs->gpr[rT] = 0;
681 regs->gpr[rT] |= val << shift;
682 break;
683 case INST_STSWI:
684 case INST_STSWX:
685 val = regs->gpr[rT] >> shift;
686 if (put_user(val, (u8 __user *)EA))
687 return -EFAULT;
688 break;
689 }
690 /* move EA to next address */
691 EA += 1;
692 num_bytes--;
693
694 /* manage our position within the register */
695 if (++pos == 4) {
696 pos = 0;
697 if (++rT == 32)
698 rT = 0;
699 }
700 }
701
702 return 0;
703 }
704
705 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
706 {
707 u32 ra,rs;
708 unsigned long tmp;
709
710 ra = (instword >> 16) & 0x1f;
711 rs = (instword >> 21) & 0x1f;
712
713 tmp = regs->gpr[rs];
714 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
715 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
716 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
717 regs->gpr[ra] = tmp;
718
719 return 0;
720 }
721
722 static int emulate_instruction(struct pt_regs *regs)
723 {
724 u32 instword;
725 u32 rd;
726
727 if (!user_mode(regs) || (regs->msr & MSR_LE))
728 return -EINVAL;
729 CHECK_FULL_REGS(regs);
730
731 if (get_user(instword, (u32 __user *)(regs->nip)))
732 return -EFAULT;
733
734 /* Emulate the mfspr rD, PVR. */
735 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
736 rd = (instword >> 21) & 0x1f;
737 regs->gpr[rd] = mfspr(SPRN_PVR);
738 return 0;
739 }
740
741 /* Emulating the dcba insn is just a no-op. */
742 if ((instword & INST_DCBA_MASK) == INST_DCBA)
743 return 0;
744
745 /* Emulate the mcrxr insn. */
746 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
747 int shift = (instword >> 21) & 0x1c;
748 unsigned long msk = 0xf0000000UL >> shift;
749
750 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
751 regs->xer &= ~0xf0000000UL;
752 return 0;
753 }
754
755 /* Emulate load/store string insn. */
756 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
757 return emulate_string_inst(regs, instword);
758
759 /* Emulate the popcntb (Population Count Bytes) instruction. */
760 if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
761 return emulate_popcntb_inst(regs, instword);
762 }
763
764 return -EINVAL;
765 }
766
767 int is_valid_bugaddr(unsigned long addr)
768 {
769 return is_kernel_addr(addr);
770 }
771
772 void __kprobes program_check_exception(struct pt_regs *regs)
773 {
774 unsigned int reason = get_reason(regs);
775 extern int do_mathemu(struct pt_regs *regs);
776
777 /* We can now get here via a FP Unavailable exception if the core
778 * has no FPU, in that case the reason flags will be 0 */
779
780 if (reason & REASON_FP) {
781 /* IEEE FP exception */
782 parse_fpe(regs);
783 return;
784 }
785 if (reason & REASON_TRAP) {
786 /* trap exception */
787 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
788 == NOTIFY_STOP)
789 return;
790 if (debugger_bpt(regs))
791 return;
792
793 if (!(regs->msr & MSR_PR) && /* not user-mode */
794 report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
795 regs->nip += 4;
796 return;
797 }
798 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
799 return;
800 }
801
802 local_irq_enable();
803
804 #ifdef CONFIG_MATH_EMULATION
805 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
806 * but there seems to be a hardware bug on the 405GP (RevD)
807 * that means ESR is sometimes set incorrectly - either to
808 * ESR_DST (!?) or 0. In the process of chasing this with the
809 * hardware people - not sure if it can happen on any illegal
810 * instruction or only on FP instructions, whether there is a
811 * pattern to occurences etc. -dgibson 31/Mar/2003 */
812 switch (do_mathemu(regs)) {
813 case 0:
814 emulate_single_step(regs);
815 return;
816 case 1: {
817 int code = 0;
818 code = __parse_fpscr(current->thread.fpscr.val);
819 _exception(SIGFPE, regs, code, regs->nip);
820 return;
821 }
822 case -EFAULT:
823 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
824 return;
825 }
826 /* fall through on any other errors */
827 #endif /* CONFIG_MATH_EMULATION */
828
829 /* Try to emulate it if we should. */
830 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
831 switch (emulate_instruction(regs)) {
832 case 0:
833 regs->nip += 4;
834 emulate_single_step(regs);
835 return;
836 case -EFAULT:
837 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
838 return;
839 }
840 }
841
842 if (reason & REASON_PRIVILEGED)
843 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
844 else
845 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
846 }
847
848 void alignment_exception(struct pt_regs *regs)
849 {
850 int sig, code, fixed = 0;
851
852 /* we don't implement logging of alignment exceptions */
853 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
854 fixed = fix_alignment(regs);
855
856 if (fixed == 1) {
857 regs->nip += 4; /* skip over emulated instruction */
858 emulate_single_step(regs);
859 return;
860 }
861
862 /* Operand address was bad */
863 if (fixed == -EFAULT) {
864 sig = SIGSEGV;
865 code = SEGV_ACCERR;
866 } else {
867 sig = SIGBUS;
868 code = BUS_ADRALN;
869 }
870 if (user_mode(regs))
871 _exception(sig, regs, code, regs->dar);
872 else
873 bad_page_fault(regs, regs->dar, sig);
874 }
875
876 void StackOverflow(struct pt_regs *regs)
877 {
878 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
879 current, regs->gpr[1]);
880 debugger(regs);
881 show_regs(regs);
882 panic("kernel stack overflow");
883 }
884
885 void nonrecoverable_exception(struct pt_regs *regs)
886 {
887 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
888 regs->nip, regs->msr);
889 debugger(regs);
890 die("nonrecoverable exception", regs, SIGKILL);
891 }
892
893 void trace_syscall(struct pt_regs *regs)
894 {
895 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
896 current, current->pid, regs->nip, regs->link, regs->gpr[0],
897 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
898 }
899
900 void kernel_fp_unavailable_exception(struct pt_regs *regs)
901 {
902 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
903 "%lx at %lx\n", regs->trap, regs->nip);
904 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
905 }
906
907 void altivec_unavailable_exception(struct pt_regs *regs)
908 {
909 if (user_mode(regs)) {
910 /* A user program has executed an altivec instruction,
911 but this kernel doesn't support altivec. */
912 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
913 return;
914 }
915
916 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
917 "%lx at %lx\n", regs->trap, regs->nip);
918 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
919 }
920
921 void performance_monitor_exception(struct pt_regs *regs)
922 {
923 perf_irq(regs);
924 }
925
926 #ifdef CONFIG_8xx
927 void SoftwareEmulation(struct pt_regs *regs)
928 {
929 extern int do_mathemu(struct pt_regs *);
930 extern int Soft_emulate_8xx(struct pt_regs *);
931 int errcode;
932
933 CHECK_FULL_REGS(regs);
934
935 if (!user_mode(regs)) {
936 debugger(regs);
937 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
938 }
939
940 #ifdef CONFIG_MATH_EMULATION
941 errcode = do_mathemu(regs);
942
943 switch (errcode) {
944 case 0:
945 emulate_single_step(regs);
946 return;
947 case 1: {
948 int code = 0;
949 code = __parse_fpscr(current->thread.fpscr.val);
950 _exception(SIGFPE, regs, code, regs->nip);
951 return;
952 }
953 case -EFAULT:
954 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
955 return;
956 default:
957 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
958 return;
959 }
960
961 #else
962 errcode = Soft_emulate_8xx(regs);
963 switch (errcode) {
964 case 0:
965 emulate_single_step(regs);
966 return;
967 case 1:
968 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
969 return;
970 case -EFAULT:
971 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
972 return;
973 }
974 #endif
975 }
976 #endif /* CONFIG_8xx */
977
978 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
979
980 void DebugException(struct pt_regs *regs, unsigned long debug_status)
981 {
982 if (debug_status & DBSR_IC) { /* instruction completion */
983 regs->msr &= ~MSR_DE;
984 if (user_mode(regs)) {
985 current->thread.dbcr0 &= ~DBCR0_IC;
986 } else {
987 /* Disable instruction completion */
988 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
989 /* Clear the instruction completion event */
990 mtspr(SPRN_DBSR, DBSR_IC);
991 if (debugger_sstep(regs))
992 return;
993 }
994 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
995 }
996 }
997 #endif /* CONFIG_4xx || CONFIG_BOOKE */
998
999 #if !defined(CONFIG_TAU_INT)
1000 void TAUException(struct pt_regs *regs)
1001 {
1002 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1003 regs->nip, regs->msr, regs->trap, print_tainted());
1004 }
1005 #endif /* CONFIG_INT_TAU */
1006
1007 #ifdef CONFIG_ALTIVEC
1008 void altivec_assist_exception(struct pt_regs *regs)
1009 {
1010 int err;
1011
1012 if (!user_mode(regs)) {
1013 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1014 " at %lx\n", regs->nip);
1015 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1016 }
1017
1018 flush_altivec_to_thread(current);
1019
1020 err = emulate_altivec(regs);
1021 if (err == 0) {
1022 regs->nip += 4; /* skip emulated instruction */
1023 emulate_single_step(regs);
1024 return;
1025 }
1026
1027 if (err == -EFAULT) {
1028 /* got an error reading the instruction */
1029 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1030 } else {
1031 /* didn't recognize the instruction */
1032 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1033 if (printk_ratelimit())
1034 printk(KERN_ERR "Unrecognized altivec instruction "
1035 "in %s at %lx\n", current->comm, regs->nip);
1036 current->thread.vscr.u[3] |= 0x10000;
1037 }
1038 }
1039 #endif /* CONFIG_ALTIVEC */
1040
1041 #ifdef CONFIG_FSL_BOOKE
1042 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1043 unsigned long error_code)
1044 {
1045 /* We treat cache locking instructions from the user
1046 * as priv ops, in the future we could try to do
1047 * something smarter
1048 */
1049 if (error_code & (ESR_DLK|ESR_ILK))
1050 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1051 return;
1052 }
1053 #endif /* CONFIG_FSL_BOOKE */
1054
1055 #ifdef CONFIG_SPE
1056 void SPEFloatingPointException(struct pt_regs *regs)
1057 {
1058 unsigned long spefscr;
1059 int fpexc_mode;
1060 int code = 0;
1061
1062 spefscr = current->thread.spefscr;
1063 fpexc_mode = current->thread.fpexc_mode;
1064
1065 /* Hardware does not neccessarily set sticky
1066 * underflow/overflow/invalid flags */
1067 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1068 code = FPE_FLTOVF;
1069 spefscr |= SPEFSCR_FOVFS;
1070 }
1071 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1072 code = FPE_FLTUND;
1073 spefscr |= SPEFSCR_FUNFS;
1074 }
1075 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1076 code = FPE_FLTDIV;
1077 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1078 code = FPE_FLTINV;
1079 spefscr |= SPEFSCR_FINVS;
1080 }
1081 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1082 code = FPE_FLTRES;
1083
1084 current->thread.spefscr = spefscr;
1085
1086 _exception(SIGFPE, regs, code, regs->nip);
1087 return;
1088 }
1089 #endif
1090
1091 /*
1092 * We enter here if we get an unrecoverable exception, that is, one
1093 * that happened at a point where the RI (recoverable interrupt) bit
1094 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1095 * we therefore lost state by taking this exception.
1096 */
1097 void unrecoverable_exception(struct pt_regs *regs)
1098 {
1099 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1100 regs->trap, regs->nip);
1101 die("Unrecoverable exception", regs, SIGABRT);
1102 }
1103
1104 #ifdef CONFIG_BOOKE_WDT
1105 /*
1106 * Default handler for a Watchdog exception,
1107 * spins until a reboot occurs
1108 */
1109 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1110 {
1111 /* Generic WatchdogHandler, implement your own */
1112 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1113 return;
1114 }
1115
1116 void WatchdogException(struct pt_regs *regs)
1117 {
1118 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1119 WatchdogHandler(regs);
1120 }
1121 #endif
1122
1123 /*
1124 * We enter here if we discover during exception entry that we are
1125 * running in supervisor mode with a userspace value in the stack pointer.
1126 */
1127 void kernel_bad_stack(struct pt_regs *regs)
1128 {
1129 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1130 regs->gpr[1], regs->nip);
1131 die("Bad kernel stack pointer", regs, SIGABRT);
1132 }
1133
1134 void __init trap_init(void)
1135 {
1136 }