2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/extable.h>
30 #include <linux/module.h> /* print_modules */
31 #include <linux/prctl.h>
32 #include <linux/delay.h>
33 #include <linux/kprobes.h>
34 #include <linux/kexec.h>
35 #include <linux/backlight.h>
36 #include <linux/bug.h>
37 #include <linux/kdebug.h>
38 #include <linux/ratelimit.h>
39 #include <linux/context_tracking.h>
41 #include <asm/emulated_ops.h>
42 #include <asm/pgtable.h>
43 #include <linux/uaccess.h>
44 #include <asm/debugfs.h>
46 #include <asm/machdep.h>
50 #ifdef CONFIG_PMAC_BACKLIGHT
51 #include <asm/backlight.h>
54 #include <asm/firmware.h>
55 #include <asm/processor.h>
58 #include <asm/kexec.h>
59 #include <asm/ppc-opcode.h>
61 #include <asm/fadump.h>
62 #include <asm/switch_to.h>
64 #include <asm/debug.h>
65 #include <asm/asm-prototypes.h>
67 #include <sysdev/fsl_pci.h>
68 #include <asm/kprobes.h>
70 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
71 int (*__debugger
)(struct pt_regs
*regs
) __read_mostly
;
72 int (*__debugger_ipi
)(struct pt_regs
*regs
) __read_mostly
;
73 int (*__debugger_bpt
)(struct pt_regs
*regs
) __read_mostly
;
74 int (*__debugger_sstep
)(struct pt_regs
*regs
) __read_mostly
;
75 int (*__debugger_iabr_match
)(struct pt_regs
*regs
) __read_mostly
;
76 int (*__debugger_break_match
)(struct pt_regs
*regs
) __read_mostly
;
77 int (*__debugger_fault_handler
)(struct pt_regs
*regs
) __read_mostly
;
79 EXPORT_SYMBOL(__debugger
);
80 EXPORT_SYMBOL(__debugger_ipi
);
81 EXPORT_SYMBOL(__debugger_bpt
);
82 EXPORT_SYMBOL(__debugger_sstep
);
83 EXPORT_SYMBOL(__debugger_iabr_match
);
84 EXPORT_SYMBOL(__debugger_break_match
);
85 EXPORT_SYMBOL(__debugger_fault_handler
);
88 /* Transactional Memory trap debug */
90 #define TM_DEBUG(x...) printk(KERN_INFO x)
92 #define TM_DEBUG(x...) do { } while(0)
96 * Trap & Exception support
99 #ifdef CONFIG_PMAC_BACKLIGHT
100 static void pmac_backlight_unblank(void)
102 mutex_lock(&pmac_backlight_mutex
);
103 if (pmac_backlight
) {
104 struct backlight_properties
*props
;
106 props
= &pmac_backlight
->props
;
107 props
->brightness
= props
->max_brightness
;
108 props
->power
= FB_BLANK_UNBLANK
;
109 backlight_update_status(pmac_backlight
);
111 mutex_unlock(&pmac_backlight_mutex
);
114 static inline void pmac_backlight_unblank(void) { }
118 * If oops/die is expected to crash the machine, return true here.
120 * This should not be expected to be 100% accurate, there may be
121 * notifiers registered or other unexpected conditions that may bring
122 * down the kernel. Or if the current process in the kernel is holding
123 * locks or has other critical state, the kernel may become effectively
126 bool die_will_crash(void)
128 if (should_fadump_crash())
130 if (kexec_should_crash(current
))
132 if (in_interrupt() || panic_on_oops
||
133 !current
->pid
|| is_global_init(current
))
139 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
140 static int die_owner
= -1;
141 static unsigned int die_nest_count
;
142 static int die_counter
;
144 static unsigned long oops_begin(struct pt_regs
*regs
)
151 /* racy, but better than risking deadlock. */
152 raw_local_irq_save(flags
);
153 cpu
= smp_processor_id();
154 if (!arch_spin_trylock(&die_lock
)) {
155 if (cpu
== die_owner
)
156 /* nested oops. should stop eventually */;
158 arch_spin_lock(&die_lock
);
164 if (machine_is(powermac
))
165 pmac_backlight_unblank();
168 NOKPROBE_SYMBOL(oops_begin
);
170 static void oops_end(unsigned long flags
, struct pt_regs
*regs
,
174 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
178 if (!die_nest_count
) {
179 /* Nest count reaches zero, release the lock. */
181 arch_spin_unlock(&die_lock
);
183 raw_local_irq_restore(flags
);
185 crash_fadump(regs
, "die oops");
187 if (kexec_should_crash(current
))
194 * While our oops output is serialised by a spinlock, output
195 * from panic() called below can race and corrupt it. If we
196 * know we are going to panic, delay for 1 second so we have a
197 * chance to get clean backtraces from all CPUs that are oopsing.
199 if (in_interrupt() || panic_on_oops
|| !current
->pid
||
200 is_global_init(current
)) {
201 mdelay(MSEC_PER_SEC
);
205 panic("Fatal exception in interrupt");
207 panic("Fatal exception");
210 NOKPROBE_SYMBOL(oops_end
);
212 static int __die(const char *str
, struct pt_regs
*regs
, long err
)
214 printk("Oops: %s, sig: %ld [#%d]\n", str
, err
, ++die_counter
);
216 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN
))
221 if (IS_ENABLED(CONFIG_PREEMPT
))
224 if (IS_ENABLED(CONFIG_SMP
))
225 pr_cont("SMP NR_CPUS=%d ", NR_CPUS
);
227 if (debug_pagealloc_enabled())
228 pr_cont("DEBUG_PAGEALLOC ");
230 if (IS_ENABLED(CONFIG_NUMA
))
233 pr_cont("%s\n", ppc_md
.name
? ppc_md
.name
: "");
235 if (notify_die(DIE_OOPS
, str
, regs
, err
, 255, SIGSEGV
) == NOTIFY_STOP
)
243 NOKPROBE_SYMBOL(__die
);
245 void die(const char *str
, struct pt_regs
*regs
, long err
)
252 flags
= oops_begin(regs
);
253 if (__die(str
, regs
, err
))
255 oops_end(flags
, regs
, err
);
257 NOKPROBE_SYMBOL(die
);
259 void user_single_step_siginfo(struct task_struct
*tsk
,
260 struct pt_regs
*regs
, siginfo_t
*info
)
262 memset(info
, 0, sizeof(*info
));
263 info
->si_signo
= SIGTRAP
;
264 info
->si_code
= TRAP_TRACE
;
265 info
->si_addr
= (void __user
*)regs
->nip
;
268 void _exception(int signr
, struct pt_regs
*regs
, int code
, unsigned long addr
)
271 const char fmt32
[] = KERN_INFO
"%s[%d]: unhandled signal %d " \
272 "at %08lx nip %08lx lr %08lx code %x\n";
273 const char fmt64
[] = KERN_INFO
"%s[%d]: unhandled signal %d " \
274 "at %016lx nip %016lx lr %016lx code %x\n";
276 if (!user_mode(regs
)) {
277 die("Exception in kernel mode", regs
, signr
);
281 if (show_unhandled_signals
&& unhandled_signal(current
, signr
)) {
282 printk_ratelimited(regs
->msr
& MSR_64BIT
? fmt64
: fmt32
,
283 current
->comm
, current
->pid
, signr
,
284 addr
, regs
->nip
, regs
->link
, code
);
287 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs
))
290 current
->thread
.trap_nr
= code
;
291 memset(&info
, 0, sizeof(info
));
292 info
.si_signo
= signr
;
294 info
.si_addr
= (void __user
*) addr
;
295 force_sig_info(signr
, &info
, current
);
298 void system_reset_exception(struct pt_regs
*regs
)
301 * Avoid crashes in case of nested NMI exceptions. Recoverability
302 * is determined by RI and in_nmi
304 bool nested
= in_nmi();
308 __this_cpu_inc(irq_stat
.sreset_irqs
);
310 /* See if any machine dependent calls */
311 if (ppc_md
.system_reset_exception
) {
312 if (ppc_md
.system_reset_exception(regs
))
320 * A system reset is a request to dump, so we always send
321 * it through the crashdump code (if fadump or kdump are
324 crash_fadump(regs
, "System Reset");
329 * We aren't the primary crash CPU. We need to send it
330 * to a holding pattern to avoid it ending up in the panic
333 crash_kexec_secondary(regs
);
336 * No debugger or crash dump registered, print logs then
339 __die("System Reset", regs
, SIGABRT
);
341 mdelay(2*MSEC_PER_SEC
); /* Wait a little while for others to print */
342 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
343 nmi_panic(regs
, "System Reset");
346 #ifdef CONFIG_PPC_BOOK3S_64
347 BUG_ON(get_paca()->in_nmi
== 0);
348 if (get_paca()->in_nmi
> 1)
349 nmi_panic(regs
, "Unrecoverable nested System Reset");
351 /* Must die if the interrupt is not recoverable */
352 if (!(regs
->msr
& MSR_RI
))
353 nmi_panic(regs
, "Unrecoverable System Reset");
358 /* What should we do here? We could issue a shutdown or hard reset. */
362 * I/O accesses can cause machine checks on powermacs.
363 * Check if the NIP corresponds to the address of a sync
364 * instruction for which there is an entry in the exception
366 * Note that the 601 only takes a machine check on TEA
367 * (transfer error ack) signal assertion, and does not
368 * set any of the top 16 bits of SRR1.
371 static inline int check_io_access(struct pt_regs
*regs
)
374 unsigned long msr
= regs
->msr
;
375 const struct exception_table_entry
*entry
;
376 unsigned int *nip
= (unsigned int *)regs
->nip
;
378 if (((msr
& 0xffff0000) == 0 || (msr
& (0x80000 | 0x40000)))
379 && (entry
= search_exception_tables(regs
->nip
)) != NULL
) {
381 * Check that it's a sync instruction, or somewhere
382 * in the twi; isync; nop sequence that inb/inw/inl uses.
383 * As the address is in the exception table
384 * we should be able to read the instr there.
385 * For the debug message, we look at the preceding
388 if (*nip
== PPC_INST_NOP
)
390 else if (*nip
== PPC_INST_ISYNC
)
392 if (*nip
== PPC_INST_SYNC
|| (*nip
>> 26) == OP_TRAP
) {
396 rb
= (*nip
>> 11) & 0x1f;
397 printk(KERN_DEBUG
"%s bad port %lx at %p\n",
398 (*nip
& 0x100)? "OUT to": "IN from",
399 regs
->gpr
[rb
] - _IO_BASE
, nip
);
401 regs
->nip
= extable_fixup(entry
);
405 #endif /* CONFIG_PPC32 */
409 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
410 /* On 4xx, the reason for the machine check or program exception
412 #define get_reason(regs) ((regs)->dsisr)
413 #define REASON_FP ESR_FP
414 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
415 #define REASON_PRIVILEGED ESR_PPR
416 #define REASON_TRAP ESR_PTR
418 /* single-step stuff */
419 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
420 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
423 /* On non-4xx, the reason for the machine check or program
424 exception is in the MSR. */
425 #define get_reason(regs) ((regs)->msr)
426 #define REASON_TM SRR1_PROGTM
427 #define REASON_FP SRR1_PROGFPE
428 #define REASON_ILLEGAL SRR1_PROGILL
429 #define REASON_PRIVILEGED SRR1_PROGPRIV
430 #define REASON_TRAP SRR1_PROGTRAP
432 #define single_stepping(regs) ((regs)->msr & MSR_SE)
433 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
436 #if defined(CONFIG_E500)
437 int machine_check_e500mc(struct pt_regs
*regs
)
439 unsigned long mcsr
= mfspr(SPRN_MCSR
);
440 unsigned long pvr
= mfspr(SPRN_PVR
);
441 unsigned long reason
= mcsr
;
444 if (reason
& MCSR_LD
) {
445 recoverable
= fsl_rio_mcheck_exception(regs
);
446 if (recoverable
== 1)
450 printk("Machine check in kernel mode.\n");
451 printk("Caused by (from MCSR=%lx): ", reason
);
453 if (reason
& MCSR_MCP
)
454 printk("Machine Check Signal\n");
456 if (reason
& MCSR_ICPERR
) {
457 printk("Instruction Cache Parity Error\n");
460 * This is recoverable by invalidating the i-cache.
462 mtspr(SPRN_L1CSR1
, mfspr(SPRN_L1CSR1
) | L1CSR1_ICFI
);
463 while (mfspr(SPRN_L1CSR1
) & L1CSR1_ICFI
)
467 * This will generally be accompanied by an instruction
468 * fetch error report -- only treat MCSR_IF as fatal
469 * if it wasn't due to an L1 parity error.
474 if (reason
& MCSR_DCPERR_MC
) {
475 printk("Data Cache Parity Error\n");
478 * In write shadow mode we auto-recover from the error, but it
479 * may still get logged and cause a machine check. We should
480 * only treat the non-write shadow case as non-recoverable.
482 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
483 * is not implemented but L1 data cache always runs in write
484 * shadow mode. Hence on data cache parity errors HW will
485 * automatically invalidate the L1 Data Cache.
487 if (PVR_VER(pvr
) != PVR_VER_E6500
) {
488 if (!(mfspr(SPRN_L1CSR2
) & L1CSR2_DCWS
))
493 if (reason
& MCSR_L2MMU_MHIT
) {
494 printk("Hit on multiple TLB entries\n");
498 if (reason
& MCSR_NMI
)
499 printk("Non-maskable interrupt\n");
501 if (reason
& MCSR_IF
) {
502 printk("Instruction Fetch Error Report\n");
506 if (reason
& MCSR_LD
) {
507 printk("Load Error Report\n");
511 if (reason
& MCSR_ST
) {
512 printk("Store Error Report\n");
516 if (reason
& MCSR_LDG
) {
517 printk("Guarded Load Error Report\n");
521 if (reason
& MCSR_TLBSYNC
)
522 printk("Simultaneous tlbsync operations\n");
524 if (reason
& MCSR_BSL2_ERR
) {
525 printk("Level 2 Cache Error\n");
529 if (reason
& MCSR_MAV
) {
532 addr
= mfspr(SPRN_MCAR
);
533 addr
|= (u64
)mfspr(SPRN_MCARU
) << 32;
535 printk("Machine Check %s Address: %#llx\n",
536 reason
& MCSR_MEA
? "Effective" : "Physical", addr
);
540 mtspr(SPRN_MCSR
, mcsr
);
541 return mfspr(SPRN_MCSR
) == 0 && recoverable
;
544 int machine_check_e500(struct pt_regs
*regs
)
546 unsigned long reason
= mfspr(SPRN_MCSR
);
548 if (reason
& MCSR_BUS_RBERR
) {
549 if (fsl_rio_mcheck_exception(regs
))
551 if (fsl_pci_mcheck_exception(regs
))
555 printk("Machine check in kernel mode.\n");
556 printk("Caused by (from MCSR=%lx): ", reason
);
558 if (reason
& MCSR_MCP
)
559 printk("Machine Check Signal\n");
560 if (reason
& MCSR_ICPERR
)
561 printk("Instruction Cache Parity Error\n");
562 if (reason
& MCSR_DCP_PERR
)
563 printk("Data Cache Push Parity Error\n");
564 if (reason
& MCSR_DCPERR
)
565 printk("Data Cache Parity Error\n");
566 if (reason
& MCSR_BUS_IAERR
)
567 printk("Bus - Instruction Address Error\n");
568 if (reason
& MCSR_BUS_RAERR
)
569 printk("Bus - Read Address Error\n");
570 if (reason
& MCSR_BUS_WAERR
)
571 printk("Bus - Write Address Error\n");
572 if (reason
& MCSR_BUS_IBERR
)
573 printk("Bus - Instruction Data Error\n");
574 if (reason
& MCSR_BUS_RBERR
)
575 printk("Bus - Read Data Bus Error\n");
576 if (reason
& MCSR_BUS_WBERR
)
577 printk("Bus - Write Data Bus Error\n");
578 if (reason
& MCSR_BUS_IPERR
)
579 printk("Bus - Instruction Parity Error\n");
580 if (reason
& MCSR_BUS_RPERR
)
581 printk("Bus - Read Parity Error\n");
586 int machine_check_generic(struct pt_regs
*regs
)
590 #elif defined(CONFIG_E200)
591 int machine_check_e200(struct pt_regs
*regs
)
593 unsigned long reason
= mfspr(SPRN_MCSR
);
595 printk("Machine check in kernel mode.\n");
596 printk("Caused by (from MCSR=%lx): ", reason
);
598 if (reason
& MCSR_MCP
)
599 printk("Machine Check Signal\n");
600 if (reason
& MCSR_CP_PERR
)
601 printk("Cache Push Parity Error\n");
602 if (reason
& MCSR_CPERR
)
603 printk("Cache Parity Error\n");
604 if (reason
& MCSR_EXCP_ERR
)
605 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
606 if (reason
& MCSR_BUS_IRERR
)
607 printk("Bus - Read Bus Error on instruction fetch\n");
608 if (reason
& MCSR_BUS_DRERR
)
609 printk("Bus - Read Bus Error on data load\n");
610 if (reason
& MCSR_BUS_WRERR
)
611 printk("Bus - Write Bus Error on buffered store or cache line push\n");
615 #elif defined(CONFIG_PPC32)
616 int machine_check_generic(struct pt_regs
*regs
)
618 unsigned long reason
= regs
->msr
;
620 printk("Machine check in kernel mode.\n");
621 printk("Caused by (from SRR1=%lx): ", reason
);
622 switch (reason
& 0x601F0000) {
624 printk("Machine check signal\n");
626 case 0: /* for 601 */
628 case 0x140000: /* 7450 MSS error and TEA */
629 printk("Transfer error ack signal\n");
632 printk("Data parity error signal\n");
635 printk("Address parity error signal\n");
638 printk("L1 Data Cache error\n");
641 printk("L1 Instruction Cache error\n");
644 printk("L2 data cache parity error\n");
647 printk("Unknown values in msr\n");
651 #endif /* everything else */
653 void machine_check_exception(struct pt_regs
*regs
)
656 bool nested
= in_nmi();
660 /* 64s accounts the mce in machine_check_early when in HVMODE */
661 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64
) || !cpu_has_feature(CPU_FTR_HVMODE
))
662 __this_cpu_inc(irq_stat
.mce_exceptions
);
664 add_taint(TAINT_MACHINE_CHECK
, LOCKDEP_NOW_UNRELIABLE
);
666 /* See if any machine dependent calls. In theory, we would want
667 * to call the CPU first, and call the ppc_md. one if the CPU
668 * one returns a positive number. However there is existing code
669 * that assumes the board gets a first chance, so let's keep it
670 * that way for now and fix things later. --BenH.
672 if (ppc_md
.machine_check_exception
)
673 recover
= ppc_md
.machine_check_exception(regs
);
674 else if (cur_cpu_spec
->machine_check
)
675 recover
= cur_cpu_spec
->machine_check(regs
);
680 if (debugger_fault_handler(regs
))
683 if (check_io_access(regs
))
686 die("Machine check", regs
, SIGBUS
);
688 /* Must die if the interrupt is not recoverable */
689 if (!(regs
->msr
& MSR_RI
))
690 nmi_panic(regs
, "Unrecoverable Machine check");
697 void SMIException(struct pt_regs
*regs
)
699 die("System Management Interrupt", regs
, SIGABRT
);
702 void handle_hmi_exception(struct pt_regs
*regs
)
704 struct pt_regs
*old_regs
;
706 old_regs
= set_irq_regs(regs
);
709 if (ppc_md
.handle_hmi_exception
)
710 ppc_md
.handle_hmi_exception(regs
);
713 set_irq_regs(old_regs
);
716 void unknown_exception(struct pt_regs
*regs
)
718 enum ctx_state prev_state
= exception_enter();
720 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
721 regs
->nip
, regs
->msr
, regs
->trap
);
723 _exception(SIGTRAP
, regs
, 0, 0);
725 exception_exit(prev_state
);
728 void instruction_breakpoint_exception(struct pt_regs
*regs
)
730 enum ctx_state prev_state
= exception_enter();
732 if (notify_die(DIE_IABR_MATCH
, "iabr_match", regs
, 5,
733 5, SIGTRAP
) == NOTIFY_STOP
)
735 if (debugger_iabr_match(regs
))
737 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
740 exception_exit(prev_state
);
743 void RunModeException(struct pt_regs
*regs
)
745 _exception(SIGTRAP
, regs
, 0, 0);
748 void single_step_exception(struct pt_regs
*regs
)
750 enum ctx_state prev_state
= exception_enter();
752 clear_single_step(regs
);
754 if (kprobe_post_handler(regs
))
757 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
758 5, SIGTRAP
) == NOTIFY_STOP
)
760 if (debugger_sstep(regs
))
763 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
766 exception_exit(prev_state
);
768 NOKPROBE_SYMBOL(single_step_exception
);
771 * After we have successfully emulated an instruction, we have to
772 * check if the instruction was being single-stepped, and if so,
773 * pretend we got a single-step exception. This was pointed out
774 * by Kumar Gala. -- paulus
776 static void emulate_single_step(struct pt_regs
*regs
)
778 if (single_stepping(regs
))
779 single_step_exception(regs
);
782 static inline int __parse_fpscr(unsigned long fpscr
)
786 /* Invalid operation */
787 if ((fpscr
& FPSCR_VE
) && (fpscr
& FPSCR_VX
))
791 else if ((fpscr
& FPSCR_OE
) && (fpscr
& FPSCR_OX
))
795 else if ((fpscr
& FPSCR_UE
) && (fpscr
& FPSCR_UX
))
799 else if ((fpscr
& FPSCR_ZE
) && (fpscr
& FPSCR_ZX
))
803 else if ((fpscr
& FPSCR_XE
) && (fpscr
& FPSCR_XX
))
809 static void parse_fpe(struct pt_regs
*regs
)
813 flush_fp_to_thread(current
);
815 code
= __parse_fpscr(current
->thread
.fp_state
.fpscr
);
817 _exception(SIGFPE
, regs
, code
, regs
->nip
);
821 * Illegal instruction emulation support. Originally written to
822 * provide the PVR to user applications using the mfspr rd, PVR.
823 * Return non-zero if we can't emulate, or -EFAULT if the associated
824 * memory access caused an access fault. Return zero on success.
826 * There are a couple of ways to do this, either "decode" the instruction
827 * or directly match lots of bits. In this case, matching lots of
828 * bits is faster and easier.
831 static int emulate_string_inst(struct pt_regs
*regs
, u32 instword
)
833 u8 rT
= (instword
>> 21) & 0x1f;
834 u8 rA
= (instword
>> 16) & 0x1f;
835 u8 NB_RB
= (instword
>> 11) & 0x1f;
840 /* Early out if we are an invalid form of lswx */
841 if ((instword
& PPC_INST_STRING_MASK
) == PPC_INST_LSWX
)
842 if ((rT
== rA
) || (rT
== NB_RB
))
845 EA
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
847 switch (instword
& PPC_INST_STRING_MASK
) {
851 num_bytes
= regs
->xer
& 0x7f;
855 num_bytes
= (NB_RB
== 0) ? 32 : NB_RB
;
861 while (num_bytes
!= 0)
864 u32 shift
= 8 * (3 - (pos
& 0x3));
866 /* if process is 32-bit, clear upper 32 bits of EA */
867 if ((regs
->msr
& MSR_64BIT
) == 0)
870 switch ((instword
& PPC_INST_STRING_MASK
)) {
873 if (get_user(val
, (u8 __user
*)EA
))
875 /* first time updating this reg,
879 regs
->gpr
[rT
] |= val
<< shift
;
883 val
= regs
->gpr
[rT
] >> shift
;
884 if (put_user(val
, (u8 __user
*)EA
))
888 /* move EA to next address */
892 /* manage our position within the register */
903 static int emulate_popcntb_inst(struct pt_regs
*regs
, u32 instword
)
908 ra
= (instword
>> 16) & 0x1f;
909 rs
= (instword
>> 21) & 0x1f;
912 tmp
= tmp
- ((tmp
>> 1) & 0x5555555555555555ULL
);
913 tmp
= (tmp
& 0x3333333333333333ULL
) + ((tmp
>> 2) & 0x3333333333333333ULL
);
914 tmp
= (tmp
+ (tmp
>> 4)) & 0x0f0f0f0f0f0f0f0fULL
;
920 static int emulate_isel(struct pt_regs
*regs
, u32 instword
)
922 u8 rT
= (instword
>> 21) & 0x1f;
923 u8 rA
= (instword
>> 16) & 0x1f;
924 u8 rB
= (instword
>> 11) & 0x1f;
925 u8 BC
= (instword
>> 6) & 0x1f;
929 tmp
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
930 bit
= (regs
->ccr
>> (31 - BC
)) & 0x1;
932 regs
->gpr
[rT
] = bit
? tmp
: regs
->gpr
[rB
];
937 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
938 static inline bool tm_abort_check(struct pt_regs
*regs
, int cause
)
940 /* If we're emulating a load/store in an active transaction, we cannot
941 * emulate it as the kernel operates in transaction suspended context.
942 * We need to abort the transaction. This creates a persistent TM
943 * abort so tell the user what caused it with a new code.
945 if (MSR_TM_TRANSACTIONAL(regs
->msr
)) {
953 static inline bool tm_abort_check(struct pt_regs
*regs
, int reason
)
959 static int emulate_instruction(struct pt_regs
*regs
)
964 if (!user_mode(regs
))
966 CHECK_FULL_REGS(regs
);
968 if (get_user(instword
, (u32 __user
*)(regs
->nip
)))
971 /* Emulate the mfspr rD, PVR. */
972 if ((instword
& PPC_INST_MFSPR_PVR_MASK
) == PPC_INST_MFSPR_PVR
) {
973 PPC_WARN_EMULATED(mfpvr
, regs
);
974 rd
= (instword
>> 21) & 0x1f;
975 regs
->gpr
[rd
] = mfspr(SPRN_PVR
);
979 /* Emulating the dcba insn is just a no-op. */
980 if ((instword
& PPC_INST_DCBA_MASK
) == PPC_INST_DCBA
) {
981 PPC_WARN_EMULATED(dcba
, regs
);
985 /* Emulate the mcrxr insn. */
986 if ((instword
& PPC_INST_MCRXR_MASK
) == PPC_INST_MCRXR
) {
987 int shift
= (instword
>> 21) & 0x1c;
988 unsigned long msk
= 0xf0000000UL
>> shift
;
990 PPC_WARN_EMULATED(mcrxr
, regs
);
991 regs
->ccr
= (regs
->ccr
& ~msk
) | ((regs
->xer
>> shift
) & msk
);
992 regs
->xer
&= ~0xf0000000UL
;
996 /* Emulate load/store string insn. */
997 if ((instword
& PPC_INST_STRING_GEN_MASK
) == PPC_INST_STRING
) {
998 if (tm_abort_check(regs
,
999 TM_CAUSE_EMULATE
| TM_CAUSE_PERSISTENT
))
1001 PPC_WARN_EMULATED(string
, regs
);
1002 return emulate_string_inst(regs
, instword
);
1005 /* Emulate the popcntb (Population Count Bytes) instruction. */
1006 if ((instword
& PPC_INST_POPCNTB_MASK
) == PPC_INST_POPCNTB
) {
1007 PPC_WARN_EMULATED(popcntb
, regs
);
1008 return emulate_popcntb_inst(regs
, instword
);
1011 /* Emulate isel (Integer Select) instruction */
1012 if ((instword
& PPC_INST_ISEL_MASK
) == PPC_INST_ISEL
) {
1013 PPC_WARN_EMULATED(isel
, regs
);
1014 return emulate_isel(regs
, instword
);
1017 /* Emulate sync instruction variants */
1018 if ((instword
& PPC_INST_SYNC_MASK
) == PPC_INST_SYNC
) {
1019 PPC_WARN_EMULATED(sync
, regs
);
1020 asm volatile("sync");
1025 /* Emulate the mfspr rD, DSCR. */
1026 if ((((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
) ==
1027 PPC_INST_MFSPR_DSCR_USER
) ||
1028 ((instword
& PPC_INST_MFSPR_DSCR_MASK
) ==
1029 PPC_INST_MFSPR_DSCR
)) &&
1030 cpu_has_feature(CPU_FTR_DSCR
)) {
1031 PPC_WARN_EMULATED(mfdscr
, regs
);
1032 rd
= (instword
>> 21) & 0x1f;
1033 regs
->gpr
[rd
] = mfspr(SPRN_DSCR
);
1036 /* Emulate the mtspr DSCR, rD. */
1037 if ((((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
) ==
1038 PPC_INST_MTSPR_DSCR_USER
) ||
1039 ((instword
& PPC_INST_MTSPR_DSCR_MASK
) ==
1040 PPC_INST_MTSPR_DSCR
)) &&
1041 cpu_has_feature(CPU_FTR_DSCR
)) {
1042 PPC_WARN_EMULATED(mtdscr
, regs
);
1043 rd
= (instword
>> 21) & 0x1f;
1044 current
->thread
.dscr
= regs
->gpr
[rd
];
1045 current
->thread
.dscr_inherit
= 1;
1046 mtspr(SPRN_DSCR
, current
->thread
.dscr
);
1054 int is_valid_bugaddr(unsigned long addr
)
1056 return is_kernel_addr(addr
);
1059 #ifdef CONFIG_MATH_EMULATION
1060 static int emulate_math(struct pt_regs
*regs
)
1063 extern int do_mathemu(struct pt_regs
*regs
);
1065 ret
= do_mathemu(regs
);
1067 PPC_WARN_EMULATED(math
, regs
);
1071 emulate_single_step(regs
);
1075 code
= __parse_fpscr(current
->thread
.fp_state
.fpscr
);
1076 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1080 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1087 static inline int emulate_math(struct pt_regs
*regs
) { return -1; }
1090 void program_check_exception(struct pt_regs
*regs
)
1092 enum ctx_state prev_state
= exception_enter();
1093 unsigned int reason
= get_reason(regs
);
1095 /* We can now get here via a FP Unavailable exception if the core
1096 * has no FPU, in that case the reason flags will be 0 */
1098 if (reason
& REASON_FP
) {
1099 /* IEEE FP exception */
1103 if (reason
& REASON_TRAP
) {
1104 unsigned long bugaddr
;
1105 /* Debugger is first in line to stop recursive faults in
1106 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1107 if (debugger_bpt(regs
))
1110 if (kprobe_handler(regs
))
1113 /* trap exception */
1114 if (notify_die(DIE_BPT
, "breakpoint", regs
, 5, 5, SIGTRAP
)
1118 bugaddr
= regs
->nip
;
1120 * Fixup bugaddr for BUG_ON() in real mode
1122 if (!is_kernel_addr(bugaddr
) && !(regs
->msr
& MSR_IR
))
1123 bugaddr
+= PAGE_OFFSET
;
1125 if (!(regs
->msr
& MSR_PR
) && /* not user-mode */
1126 report_bug(bugaddr
, regs
) == BUG_TRAP_TYPE_WARN
) {
1130 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
1133 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1134 if (reason
& REASON_TM
) {
1135 /* This is a TM "Bad Thing Exception" program check.
1137 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1138 * transition in TM states.
1139 * - A trechkpt is attempted when transactional.
1140 * - A treclaim is attempted when non transactional.
1141 * - A tend is illegally attempted.
1142 * - writing a TM SPR when transactional.
1144 if (!user_mode(regs
) &&
1145 report_bug(regs
->nip
, regs
) == BUG_TRAP_TYPE_WARN
) {
1149 /* If usermode caused this, it's done something illegal and
1150 * gets a SIGILL slap on the wrist. We call it an illegal
1151 * operand to distinguish from the instruction just being bad
1152 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1153 * illegal /placement/ of a valid instruction.
1155 if (user_mode(regs
)) {
1156 _exception(SIGILL
, regs
, ILL_ILLOPN
, regs
->nip
);
1159 printk(KERN_EMERG
"Unexpected TM Bad Thing exception "
1160 "at %lx (msr 0x%x)\n", regs
->nip
, reason
);
1161 die("Unrecoverable exception", regs
, SIGABRT
);
1167 * If we took the program check in the kernel skip down to sending a
1168 * SIGILL. The subsequent cases all relate to emulating instructions
1169 * which we should only do for userspace. We also do not want to enable
1170 * interrupts for kernel faults because that might lead to further
1171 * faults, and loose the context of the original exception.
1173 if (!user_mode(regs
))
1176 /* We restore the interrupt state now */
1177 if (!arch_irq_disabled_regs(regs
))
1180 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1181 * but there seems to be a hardware bug on the 405GP (RevD)
1182 * that means ESR is sometimes set incorrectly - either to
1183 * ESR_DST (!?) or 0. In the process of chasing this with the
1184 * hardware people - not sure if it can happen on any illegal
1185 * instruction or only on FP instructions, whether there is a
1186 * pattern to occurrences etc. -dgibson 31/Mar/2003
1188 if (!emulate_math(regs
))
1191 /* Try to emulate it if we should. */
1192 if (reason
& (REASON_ILLEGAL
| REASON_PRIVILEGED
)) {
1193 switch (emulate_instruction(regs
)) {
1196 emulate_single_step(regs
);
1199 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1205 if (reason
& REASON_PRIVILEGED
)
1206 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1208 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1211 exception_exit(prev_state
);
1213 NOKPROBE_SYMBOL(program_check_exception
);
1216 * This occurs when running in hypervisor mode on POWER6 or later
1217 * and an illegal instruction is encountered.
1219 void emulation_assist_interrupt(struct pt_regs
*regs
)
1221 regs
->msr
|= REASON_ILLEGAL
;
1222 program_check_exception(regs
);
1224 NOKPROBE_SYMBOL(emulation_assist_interrupt
);
1226 void alignment_exception(struct pt_regs
*regs
)
1228 enum ctx_state prev_state
= exception_enter();
1229 int sig
, code
, fixed
= 0;
1231 /* We restore the interrupt state now */
1232 if (!arch_irq_disabled_regs(regs
))
1235 if (tm_abort_check(regs
, TM_CAUSE_ALIGNMENT
| TM_CAUSE_PERSISTENT
))
1238 /* we don't implement logging of alignment exceptions */
1239 if (!(current
->thread
.align_ctl
& PR_UNALIGN_SIGBUS
))
1240 fixed
= fix_alignment(regs
);
1243 regs
->nip
+= 4; /* skip over emulated instruction */
1244 emulate_single_step(regs
);
1248 /* Operand address was bad */
1249 if (fixed
== -EFAULT
) {
1256 if (user_mode(regs
))
1257 _exception(sig
, regs
, code
, regs
->dar
);
1259 bad_page_fault(regs
, regs
->dar
, sig
);
1262 exception_exit(prev_state
);
1265 void slb_miss_bad_addr(struct pt_regs
*regs
)
1267 enum ctx_state prev_state
= exception_enter();
1269 if (user_mode(regs
))
1270 _exception(SIGSEGV
, regs
, SEGV_BNDERR
, regs
->dar
);
1272 bad_page_fault(regs
, regs
->dar
, SIGSEGV
);
1274 exception_exit(prev_state
);
1277 void StackOverflow(struct pt_regs
*regs
)
1279 printk(KERN_CRIT
"Kernel stack overflow in process %p, r1=%lx\n",
1280 current
, regs
->gpr
[1]);
1283 panic("kernel stack overflow");
1286 void nonrecoverable_exception(struct pt_regs
*regs
)
1288 printk(KERN_ERR
"Non-recoverable exception at PC=%lx MSR=%lx\n",
1289 regs
->nip
, regs
->msr
);
1291 die("nonrecoverable exception", regs
, SIGKILL
);
1294 void kernel_fp_unavailable_exception(struct pt_regs
*regs
)
1296 enum ctx_state prev_state
= exception_enter();
1298 printk(KERN_EMERG
"Unrecoverable FP Unavailable Exception "
1299 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1300 die("Unrecoverable FP Unavailable Exception", regs
, SIGABRT
);
1302 exception_exit(prev_state
);
1305 void altivec_unavailable_exception(struct pt_regs
*regs
)
1307 enum ctx_state prev_state
= exception_enter();
1309 if (user_mode(regs
)) {
1310 /* A user program has executed an altivec instruction,
1311 but this kernel doesn't support altivec. */
1312 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1316 printk(KERN_EMERG
"Unrecoverable VMX/Altivec Unavailable Exception "
1317 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1318 die("Unrecoverable VMX/Altivec Unavailable Exception", regs
, SIGABRT
);
1321 exception_exit(prev_state
);
1324 void vsx_unavailable_exception(struct pt_regs
*regs
)
1326 if (user_mode(regs
)) {
1327 /* A user program has executed an vsx instruction,
1328 but this kernel doesn't support vsx. */
1329 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1333 printk(KERN_EMERG
"Unrecoverable VSX Unavailable Exception "
1334 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1335 die("Unrecoverable VSX Unavailable Exception", regs
, SIGABRT
);
1339 static void tm_unavailable(struct pt_regs
*regs
)
1341 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1342 if (user_mode(regs
)) {
1343 current
->thread
.load_tm
++;
1344 regs
->msr
|= MSR_TM
;
1346 tm_restore_sprs(¤t
->thread
);
1350 pr_emerg("Unrecoverable TM Unavailable Exception "
1351 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1352 die("Unrecoverable TM Unavailable Exception", regs
, SIGABRT
);
1355 void facility_unavailable_exception(struct pt_regs
*regs
)
1357 static char *facility_strings
[] = {
1358 [FSCR_FP_LG
] = "FPU",
1359 [FSCR_VECVSX_LG
] = "VMX/VSX",
1360 [FSCR_DSCR_LG
] = "DSCR",
1361 [FSCR_PM_LG
] = "PMU SPRs",
1362 [FSCR_BHRB_LG
] = "BHRB",
1363 [FSCR_TM_LG
] = "TM",
1364 [FSCR_EBB_LG
] = "EBB",
1365 [FSCR_TAR_LG
] = "TAR",
1366 [FSCR_MSGP_LG
] = "MSGP",
1367 [FSCR_SCV_LG
] = "SCV",
1369 char *facility
= "unknown";
1375 hv
= (regs
->trap
== 0xf80);
1377 value
= mfspr(SPRN_HFSCR
);
1379 value
= mfspr(SPRN_FSCR
);
1381 status
= value
>> 56;
1382 if (status
== FSCR_DSCR_LG
) {
1384 * User is accessing the DSCR register using the problem
1385 * state only SPR number (0x03) either through a mfspr or
1386 * a mtspr instruction. If it is a write attempt through
1387 * a mtspr, then we set the inherit bit. This also allows
1388 * the user to write or read the register directly in the
1389 * future by setting via the FSCR DSCR bit. But in case it
1390 * is a read DSCR attempt through a mfspr instruction, we
1391 * just emulate the instruction instead. This code path will
1392 * always emulate all the mfspr instructions till the user
1393 * has attempted at least one mtspr instruction. This way it
1394 * preserves the same behaviour when the user is accessing
1395 * the DSCR through privilege level only SPR number (0x11)
1396 * which is emulated through illegal instruction exception.
1397 * We always leave HFSCR DSCR set.
1399 if (get_user(instword
, (u32 __user
*)(regs
->nip
))) {
1400 pr_err("Failed to fetch the user instruction\n");
1404 /* Write into DSCR (mtspr 0x03, RS) */
1405 if ((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
)
1406 == PPC_INST_MTSPR_DSCR_USER
) {
1407 rd
= (instword
>> 21) & 0x1f;
1408 current
->thread
.dscr
= regs
->gpr
[rd
];
1409 current
->thread
.dscr_inherit
= 1;
1410 current
->thread
.fscr
|= FSCR_DSCR
;
1411 mtspr(SPRN_FSCR
, current
->thread
.fscr
);
1414 /* Read from DSCR (mfspr RT, 0x03) */
1415 if ((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
)
1416 == PPC_INST_MFSPR_DSCR_USER
) {
1417 if (emulate_instruction(regs
)) {
1418 pr_err("DSCR based mfspr emulation failed\n");
1422 emulate_single_step(regs
);
1427 if (status
== FSCR_TM_LG
) {
1429 * If we're here then the hardware is TM aware because it
1430 * generated an exception with FSRM_TM set.
1432 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1433 * told us not to do TM, or the kernel is not built with TM
1436 * If both of those things are true, then userspace can spam the
1437 * console by triggering the printk() below just by continually
1438 * doing tbegin (or any TM instruction). So in that case just
1439 * send the process a SIGILL immediately.
1441 if (!cpu_has_feature(CPU_FTR_TM
))
1444 tm_unavailable(regs
);
1448 if ((hv
|| status
>= 2) &&
1449 (status
< ARRAY_SIZE(facility_strings
)) &&
1450 facility_strings
[status
])
1451 facility
= facility_strings
[status
];
1453 /* We restore the interrupt state now */
1454 if (!arch_irq_disabled_regs(regs
))
1457 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1458 hv
? "Hypervisor " : "", facility
, status
, regs
->nip
, regs
->msr
);
1461 if (user_mode(regs
)) {
1462 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1466 die("Unexpected facility unavailable exception", regs
, SIGABRT
);
1470 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1472 void fp_unavailable_tm(struct pt_regs
*regs
)
1474 /* Note: This does not handle any kind of FP laziness. */
1476 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1477 regs
->nip
, regs
->msr
);
1479 /* We can only have got here if the task started using FP after
1480 * beginning the transaction. So, the transactional regs are just a
1481 * copy of the checkpointed ones. But, we still need to recheckpoint
1482 * as we're enabling FP for the process; it will return, abort the
1483 * transaction, and probably retry but now with FP enabled. So the
1484 * checkpointed FP registers need to be loaded.
1486 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1487 /* Reclaim didn't save out any FPRs to transact_fprs. */
1489 /* Enable FP for the task: */
1490 regs
->msr
|= (MSR_FP
| current
->thread
.fpexc_mode
);
1492 /* This loads and recheckpoints the FP registers from
1493 * thread.fpr[]. They will remain in registers after the
1494 * checkpoint so we don't need to reload them after.
1495 * If VMX is in use, the VRs now hold checkpointed values,
1496 * so we don't want to load the VRs from the thread_struct.
1498 tm_recheckpoint(¤t
->thread
, MSR_FP
);
1500 /* If VMX is in use, get the transactional values back */
1501 if (regs
->msr
& MSR_VEC
) {
1502 msr_check_and_set(MSR_VEC
);
1503 load_vr_state(¤t
->thread
.vr_state
);
1504 /* At this point all the VSX state is loaded, so enable it */
1505 regs
->msr
|= MSR_VSX
;
1509 void altivec_unavailable_tm(struct pt_regs
*regs
)
1511 /* See the comments in fp_unavailable_tm(). This function operates
1515 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1517 regs
->nip
, regs
->msr
);
1518 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1519 regs
->msr
|= MSR_VEC
;
1520 tm_recheckpoint(¤t
->thread
, MSR_VEC
);
1521 current
->thread
.used_vr
= 1;
1523 if (regs
->msr
& MSR_FP
) {
1524 msr_check_and_set(MSR_FP
);
1525 load_fp_state(¤t
->thread
.fp_state
);
1526 regs
->msr
|= MSR_VSX
;
1530 void vsx_unavailable_tm(struct pt_regs
*regs
)
1532 unsigned long orig_msr
= regs
->msr
;
1534 /* See the comments in fp_unavailable_tm(). This works similarly,
1535 * though we're loading both FP and VEC registers in here.
1537 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1538 * regs. Either way, set MSR_VSX.
1541 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1543 regs
->nip
, regs
->msr
);
1545 current
->thread
.used_vsr
= 1;
1547 /* If FP and VMX are already loaded, we have all the state we need */
1548 if ((orig_msr
& (MSR_FP
| MSR_VEC
)) == (MSR_FP
| MSR_VEC
)) {
1549 regs
->msr
|= MSR_VSX
;
1553 /* This reclaims FP and/or VR regs if they're already enabled */
1554 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1556 regs
->msr
|= MSR_VEC
| MSR_FP
| current
->thread
.fpexc_mode
|
1559 /* This loads & recheckpoints FP and VRs; but we have
1560 * to be sure not to overwrite previously-valid state.
1562 tm_recheckpoint(¤t
->thread
, regs
->msr
& ~orig_msr
);
1564 msr_check_and_set(orig_msr
& (MSR_FP
| MSR_VEC
));
1566 if (orig_msr
& MSR_FP
)
1567 load_fp_state(¤t
->thread
.fp_state
);
1568 if (orig_msr
& MSR_VEC
)
1569 load_vr_state(¤t
->thread
.vr_state
);
1571 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1573 void performance_monitor_exception(struct pt_regs
*regs
)
1575 __this_cpu_inc(irq_stat
.pmu_irqs
);
1580 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1581 static void handle_debug(struct pt_regs
*regs
, unsigned long debug_status
)
1585 * Determine the cause of the debug event, clear the
1586 * event flags and send a trap to the handler. Torez
1588 if (debug_status
& (DBSR_DAC1R
| DBSR_DAC1W
)) {
1589 dbcr_dac(current
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
1590 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1591 current
->thread
.debug
.dbcr2
&= ~DBCR2_DAC12MODE
;
1593 do_send_trap(regs
, mfspr(SPRN_DAC1
), debug_status
, TRAP_HWBKPT
,
1596 } else if (debug_status
& (DBSR_DAC2R
| DBSR_DAC2W
)) {
1597 dbcr_dac(current
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
1598 do_send_trap(regs
, mfspr(SPRN_DAC2
), debug_status
, TRAP_HWBKPT
,
1601 } else if (debug_status
& DBSR_IAC1
) {
1602 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC1
;
1603 dbcr_iac_range(current
) &= ~DBCR_IAC12MODE
;
1604 do_send_trap(regs
, mfspr(SPRN_IAC1
), debug_status
, TRAP_HWBKPT
,
1607 } else if (debug_status
& DBSR_IAC2
) {
1608 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC2
;
1609 do_send_trap(regs
, mfspr(SPRN_IAC2
), debug_status
, TRAP_HWBKPT
,
1612 } else if (debug_status
& DBSR_IAC3
) {
1613 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC3
;
1614 dbcr_iac_range(current
) &= ~DBCR_IAC34MODE
;
1615 do_send_trap(regs
, mfspr(SPRN_IAC3
), debug_status
, TRAP_HWBKPT
,
1618 } else if (debug_status
& DBSR_IAC4
) {
1619 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC4
;
1620 do_send_trap(regs
, mfspr(SPRN_IAC4
), debug_status
, TRAP_HWBKPT
,
1625 * At the point this routine was called, the MSR(DE) was turned off.
1626 * Check all other debug flags and see if that bit needs to be turned
1629 if (DBCR_ACTIVE_EVENTS(current
->thread
.debug
.dbcr0
,
1630 current
->thread
.debug
.dbcr1
))
1631 regs
->msr
|= MSR_DE
;
1633 /* Make sure the IDM flag is off */
1634 current
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
1637 mtspr(SPRN_DBCR0
, current
->thread
.debug
.dbcr0
);
1640 void DebugException(struct pt_regs
*regs
, unsigned long debug_status
)
1642 current
->thread
.debug
.dbsr
= debug_status
;
1644 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1645 * on server, it stops on the target of the branch. In order to simulate
1646 * the server behaviour, we thus restart right away with a single step
1647 * instead of stopping here when hitting a BT
1649 if (debug_status
& DBSR_BT
) {
1650 regs
->msr
&= ~MSR_DE
;
1653 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_BT
);
1654 /* Clear the BT event */
1655 mtspr(SPRN_DBSR
, DBSR_BT
);
1657 /* Do the single step trick only when coming from userspace */
1658 if (user_mode(regs
)) {
1659 current
->thread
.debug
.dbcr0
&= ~DBCR0_BT
;
1660 current
->thread
.debug
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
1661 regs
->msr
|= MSR_DE
;
1665 if (kprobe_post_handler(regs
))
1668 if (notify_die(DIE_SSTEP
, "block_step", regs
, 5,
1669 5, SIGTRAP
) == NOTIFY_STOP
) {
1672 if (debugger_sstep(regs
))
1674 } else if (debug_status
& DBSR_IC
) { /* Instruction complete */
1675 regs
->msr
&= ~MSR_DE
;
1677 /* Disable instruction completion */
1678 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_IC
);
1679 /* Clear the instruction completion event */
1680 mtspr(SPRN_DBSR
, DBSR_IC
);
1682 if (kprobe_post_handler(regs
))
1685 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
1686 5, SIGTRAP
) == NOTIFY_STOP
) {
1690 if (debugger_sstep(regs
))
1693 if (user_mode(regs
)) {
1694 current
->thread
.debug
.dbcr0
&= ~DBCR0_IC
;
1695 if (DBCR_ACTIVE_EVENTS(current
->thread
.debug
.dbcr0
,
1696 current
->thread
.debug
.dbcr1
))
1697 regs
->msr
|= MSR_DE
;
1699 /* Make sure the IDM bit is off */
1700 current
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
1703 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
1705 handle_debug(regs
, debug_status
);
1707 NOKPROBE_SYMBOL(DebugException
);
1708 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1710 #if !defined(CONFIG_TAU_INT)
1711 void TAUException(struct pt_regs
*regs
)
1713 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1714 regs
->nip
, regs
->msr
, regs
->trap
, print_tainted());
1716 #endif /* CONFIG_INT_TAU */
1718 #ifdef CONFIG_ALTIVEC
1719 void altivec_assist_exception(struct pt_regs
*regs
)
1723 if (!user_mode(regs
)) {
1724 printk(KERN_EMERG
"VMX/Altivec assist exception in kernel mode"
1725 " at %lx\n", regs
->nip
);
1726 die("Kernel VMX/Altivec assist exception", regs
, SIGILL
);
1729 flush_altivec_to_thread(current
);
1731 PPC_WARN_EMULATED(altivec
, regs
);
1732 err
= emulate_altivec(regs
);
1734 regs
->nip
+= 4; /* skip emulated instruction */
1735 emulate_single_step(regs
);
1739 if (err
== -EFAULT
) {
1740 /* got an error reading the instruction */
1741 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1743 /* didn't recognize the instruction */
1744 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1745 printk_ratelimited(KERN_ERR
"Unrecognized altivec instruction "
1746 "in %s at %lx\n", current
->comm
, regs
->nip
);
1747 current
->thread
.vr_state
.vscr
.u
[3] |= 0x10000;
1750 #endif /* CONFIG_ALTIVEC */
1752 #ifdef CONFIG_FSL_BOOKE
1753 void CacheLockingException(struct pt_regs
*regs
, unsigned long address
,
1754 unsigned long error_code
)
1756 /* We treat cache locking instructions from the user
1757 * as priv ops, in the future we could try to do
1760 if (error_code
& (ESR_DLK
|ESR_ILK
))
1761 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1764 #endif /* CONFIG_FSL_BOOKE */
1767 void SPEFloatingPointException(struct pt_regs
*regs
)
1769 extern int do_spe_mathemu(struct pt_regs
*regs
);
1770 unsigned long spefscr
;
1775 flush_spe_to_thread(current
);
1777 spefscr
= current
->thread
.spefscr
;
1778 fpexc_mode
= current
->thread
.fpexc_mode
;
1780 if ((spefscr
& SPEFSCR_FOVF
) && (fpexc_mode
& PR_FP_EXC_OVF
)) {
1783 else if ((spefscr
& SPEFSCR_FUNF
) && (fpexc_mode
& PR_FP_EXC_UND
)) {
1786 else if ((spefscr
& SPEFSCR_FDBZ
) && (fpexc_mode
& PR_FP_EXC_DIV
))
1788 else if ((spefscr
& SPEFSCR_FINV
) && (fpexc_mode
& PR_FP_EXC_INV
)) {
1791 else if ((spefscr
& (SPEFSCR_FG
| SPEFSCR_FX
)) && (fpexc_mode
& PR_FP_EXC_RES
))
1794 err
= do_spe_mathemu(regs
);
1796 regs
->nip
+= 4; /* skip emulated instruction */
1797 emulate_single_step(regs
);
1801 if (err
== -EFAULT
) {
1802 /* got an error reading the instruction */
1803 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1804 } else if (err
== -EINVAL
) {
1805 /* didn't recognize the instruction */
1806 printk(KERN_ERR
"unrecognized spe instruction "
1807 "in %s at %lx\n", current
->comm
, regs
->nip
);
1809 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1815 void SPEFloatingPointRoundException(struct pt_regs
*regs
)
1817 extern int speround_handler(struct pt_regs
*regs
);
1821 if (regs
->msr
& MSR_SPE
)
1822 giveup_spe(current
);
1826 err
= speround_handler(regs
);
1828 regs
->nip
+= 4; /* skip emulated instruction */
1829 emulate_single_step(regs
);
1833 if (err
== -EFAULT
) {
1834 /* got an error reading the instruction */
1835 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1836 } else if (err
== -EINVAL
) {
1837 /* didn't recognize the instruction */
1838 printk(KERN_ERR
"unrecognized spe instruction "
1839 "in %s at %lx\n", current
->comm
, regs
->nip
);
1841 _exception(SIGFPE
, regs
, 0, regs
->nip
);
1848 * We enter here if we get an unrecoverable exception, that is, one
1849 * that happened at a point where the RI (recoverable interrupt) bit
1850 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1851 * we therefore lost state by taking this exception.
1853 void unrecoverable_exception(struct pt_regs
*regs
)
1855 printk(KERN_EMERG
"Unrecoverable exception %lx at %lx\n",
1856 regs
->trap
, regs
->nip
);
1857 die("Unrecoverable exception", regs
, SIGABRT
);
1859 NOKPROBE_SYMBOL(unrecoverable_exception
);
1861 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1863 * Default handler for a Watchdog exception,
1864 * spins until a reboot occurs
1866 void __attribute__ ((weak
)) WatchdogHandler(struct pt_regs
*regs
)
1868 /* Generic WatchdogHandler, implement your own */
1869 mtspr(SPRN_TCR
, mfspr(SPRN_TCR
)&(~TCR_WIE
));
1873 void WatchdogException(struct pt_regs
*regs
)
1875 printk (KERN_EMERG
"PowerPC Book-E Watchdog Exception\n");
1876 WatchdogHandler(regs
);
1881 * We enter here if we discover during exception entry that we are
1882 * running in supervisor mode with a userspace value in the stack pointer.
1884 void kernel_bad_stack(struct pt_regs
*regs
)
1886 printk(KERN_EMERG
"Bad kernel stack pointer %lx at %lx\n",
1887 regs
->gpr
[1], regs
->nip
);
1888 die("Bad kernel stack pointer", regs
, SIGABRT
);
1890 NOKPROBE_SYMBOL(kernel_bad_stack
);
1892 void __init
trap_init(void)
1897 #ifdef CONFIG_PPC_EMULATED_STATS
1899 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1901 struct ppc_emulated ppc_emulated
= {
1902 #ifdef CONFIG_ALTIVEC
1903 WARN_EMULATED_SETUP(altivec
),
1905 WARN_EMULATED_SETUP(dcba
),
1906 WARN_EMULATED_SETUP(dcbz
),
1907 WARN_EMULATED_SETUP(fp_pair
),
1908 WARN_EMULATED_SETUP(isel
),
1909 WARN_EMULATED_SETUP(mcrxr
),
1910 WARN_EMULATED_SETUP(mfpvr
),
1911 WARN_EMULATED_SETUP(multiple
),
1912 WARN_EMULATED_SETUP(popcntb
),
1913 WARN_EMULATED_SETUP(spe
),
1914 WARN_EMULATED_SETUP(string
),
1915 WARN_EMULATED_SETUP(sync
),
1916 WARN_EMULATED_SETUP(unaligned
),
1917 #ifdef CONFIG_MATH_EMULATION
1918 WARN_EMULATED_SETUP(math
),
1921 WARN_EMULATED_SETUP(vsx
),
1924 WARN_EMULATED_SETUP(mfdscr
),
1925 WARN_EMULATED_SETUP(mtdscr
),
1926 WARN_EMULATED_SETUP(lq_stq
),
1930 u32 ppc_warn_emulated
;
1932 void ppc_warn_emulated_print(const char *type
)
1934 pr_warn_ratelimited("%s used emulated %s instruction\n", current
->comm
,
1938 static int __init
ppc_warn_emulated_init(void)
1940 struct dentry
*dir
, *d
;
1942 struct ppc_emulated_entry
*entries
= (void *)&ppc_emulated
;
1944 if (!powerpc_debugfs_root
)
1947 dir
= debugfs_create_dir("emulated_instructions",
1948 powerpc_debugfs_root
);
1952 d
= debugfs_create_u32("do_warn", S_IRUGO
| S_IWUSR
, dir
,
1953 &ppc_warn_emulated
);
1957 for (i
= 0; i
< sizeof(ppc_emulated
)/sizeof(*entries
); i
++) {
1958 d
= debugfs_create_u32(entries
[i
].name
, S_IRUGO
| S_IWUSR
, dir
,
1959 (u32
*)&entries
[i
].val
.counter
);
1967 debugfs_remove_recursive(dir
);
1971 device_initcall(ppc_warn_emulated_init
);
1973 #endif /* CONFIG_PPC_EMULATED_STATS */