2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
23 #include <linux/pkeys.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/extable.h>
31 #include <linux/module.h> /* print_modules */
32 #include <linux/prctl.h>
33 #include <linux/delay.h>
34 #include <linux/kprobes.h>
35 #include <linux/kexec.h>
36 #include <linux/backlight.h>
37 #include <linux/bug.h>
38 #include <linux/kdebug.h>
39 #include <linux/ratelimit.h>
40 #include <linux/context_tracking.h>
41 #include <linux/smp.h>
42 #include <linux/console.h>
43 #include <linux/kmsg_dump.h>
45 #include <asm/emulated_ops.h>
46 #include <asm/pgtable.h>
47 #include <linux/uaccess.h>
48 #include <asm/debugfs.h>
50 #include <asm/machdep.h>
54 #ifdef CONFIG_PMAC_BACKLIGHT
55 #include <asm/backlight.h>
58 #include <asm/firmware.h>
59 #include <asm/processor.h>
62 #include <asm/kexec.h>
63 #include <asm/ppc-opcode.h>
65 #include <asm/fadump.h>
66 #include <asm/switch_to.h>
68 #include <asm/debug.h>
69 #include <asm/asm-prototypes.h>
71 #include <sysdev/fsl_pci.h>
72 #include <asm/kprobes.h>
73 #include <asm/stacktrace.h>
76 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
77 int (*__debugger
)(struct pt_regs
*regs
) __read_mostly
;
78 int (*__debugger_ipi
)(struct pt_regs
*regs
) __read_mostly
;
79 int (*__debugger_bpt
)(struct pt_regs
*regs
) __read_mostly
;
80 int (*__debugger_sstep
)(struct pt_regs
*regs
) __read_mostly
;
81 int (*__debugger_iabr_match
)(struct pt_regs
*regs
) __read_mostly
;
82 int (*__debugger_break_match
)(struct pt_regs
*regs
) __read_mostly
;
83 int (*__debugger_fault_handler
)(struct pt_regs
*regs
) __read_mostly
;
85 EXPORT_SYMBOL(__debugger
);
86 EXPORT_SYMBOL(__debugger_ipi
);
87 EXPORT_SYMBOL(__debugger_bpt
);
88 EXPORT_SYMBOL(__debugger_sstep
);
89 EXPORT_SYMBOL(__debugger_iabr_match
);
90 EXPORT_SYMBOL(__debugger_break_match
);
91 EXPORT_SYMBOL(__debugger_fault_handler
);
94 /* Transactional Memory trap debug */
96 #define TM_DEBUG(x...) printk(KERN_INFO x)
98 #define TM_DEBUG(x...) do { } while(0)
101 static const char *signame(int signr
)
104 case SIGBUS
: return "bus error";
105 case SIGFPE
: return "floating point exception";
106 case SIGILL
: return "illegal instruction";
107 case SIGSEGV
: return "segfault";
108 case SIGTRAP
: return "unhandled trap";
111 return "unknown signal";
115 * Trap & Exception support
118 #ifdef CONFIG_PMAC_BACKLIGHT
119 static void pmac_backlight_unblank(void)
121 mutex_lock(&pmac_backlight_mutex
);
122 if (pmac_backlight
) {
123 struct backlight_properties
*props
;
125 props
= &pmac_backlight
->props
;
126 props
->brightness
= props
->max_brightness
;
127 props
->power
= FB_BLANK_UNBLANK
;
128 backlight_update_status(pmac_backlight
);
130 mutex_unlock(&pmac_backlight_mutex
);
133 static inline void pmac_backlight_unblank(void) { }
137 * If oops/die is expected to crash the machine, return true here.
139 * This should not be expected to be 100% accurate, there may be
140 * notifiers registered or other unexpected conditions that may bring
141 * down the kernel. Or if the current process in the kernel is holding
142 * locks or has other critical state, the kernel may become effectively
145 bool die_will_crash(void)
147 if (should_fadump_crash())
149 if (kexec_should_crash(current
))
151 if (in_interrupt() || panic_on_oops
||
152 !current
->pid
|| is_global_init(current
))
158 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
159 static int die_owner
= -1;
160 static unsigned int die_nest_count
;
161 static int die_counter
;
163 extern void panic_flush_kmsg_start(void)
166 * These are mostly taken from kernel/panic.c, but tries to do
167 * relatively minimal work. Don't use delay functions (TB may
168 * be broken), don't crash dump (need to set a firmware log),
169 * don't run notifiers. We do want to get some information to
176 extern void panic_flush_kmsg_end(void)
178 printk_safe_flush_on_panic();
179 kmsg_dump(KMSG_DUMP_PANIC
);
182 console_flush_on_panic(CONSOLE_FLUSH_PENDING
);
185 static unsigned long oops_begin(struct pt_regs
*regs
)
192 /* racy, but better than risking deadlock. */
193 raw_local_irq_save(flags
);
194 cpu
= smp_processor_id();
195 if (!arch_spin_trylock(&die_lock
)) {
196 if (cpu
== die_owner
)
197 /* nested oops. should stop eventually */;
199 arch_spin_lock(&die_lock
);
205 if (machine_is(powermac
))
206 pmac_backlight_unblank();
209 NOKPROBE_SYMBOL(oops_begin
);
211 static void oops_end(unsigned long flags
, struct pt_regs
*regs
,
215 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
219 if (!die_nest_count
) {
220 /* Nest count reaches zero, release the lock. */
222 arch_spin_unlock(&die_lock
);
224 raw_local_irq_restore(flags
);
227 * system_reset_excption handles debugger, crash dump, panic, for 0x100
229 if (TRAP(regs
) == 0x100)
232 crash_fadump(regs
, "die oops");
234 if (kexec_should_crash(current
))
241 * While our oops output is serialised by a spinlock, output
242 * from panic() called below can race and corrupt it. If we
243 * know we are going to panic, delay for 1 second so we have a
244 * chance to get clean backtraces from all CPUs that are oopsing.
246 if (in_interrupt() || panic_on_oops
|| !current
->pid
||
247 is_global_init(current
)) {
248 mdelay(MSEC_PER_SEC
);
252 panic("Fatal exception");
255 NOKPROBE_SYMBOL(oops_end
);
257 static int __die(const char *str
, struct pt_regs
*regs
, long err
)
259 printk("Oops: %s, sig: %ld [#%d]\n", str
, err
, ++die_counter
);
261 printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
262 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN
) ? "LE" : "BE",
264 early_radix_enabled() ? " MMU=Radix" : "",
265 early_mmu_has_feature(MMU_FTR_HPTE_TABLE
) ? " MMU=Hash" : "",
266 IS_ENABLED(CONFIG_PREEMPT
) ? " PREEMPT" : "",
267 IS_ENABLED(CONFIG_SMP
) ? " SMP" : "",
268 IS_ENABLED(CONFIG_SMP
) ? (" NR_CPUS=" __stringify(NR_CPUS
)) : "",
269 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
270 IS_ENABLED(CONFIG_NUMA
) ? " NUMA" : "",
271 ppc_md
.name
? ppc_md
.name
: "");
273 if (notify_die(DIE_OOPS
, str
, regs
, err
, 255, SIGSEGV
) == NOTIFY_STOP
)
281 NOKPROBE_SYMBOL(__die
);
283 void die(const char *str
, struct pt_regs
*regs
, long err
)
288 * system_reset_excption handles debugger, crash dump, panic, for 0x100
290 if (TRAP(regs
) != 0x100) {
295 flags
= oops_begin(regs
);
296 if (__die(str
, regs
, err
))
298 oops_end(flags
, regs
, err
);
300 NOKPROBE_SYMBOL(die
);
302 void user_single_step_report(struct pt_regs
*regs
)
304 force_sig_fault(SIGTRAP
, TRAP_TRACE
, (void __user
*)regs
->nip
, current
);
307 static void show_signal_msg(int signr
, struct pt_regs
*regs
, int code
,
310 static DEFINE_RATELIMIT_STATE(rs
, DEFAULT_RATELIMIT_INTERVAL
,
311 DEFAULT_RATELIMIT_BURST
);
313 if (!show_unhandled_signals
)
316 if (!unhandled_signal(current
, signr
))
319 if (!__ratelimit(&rs
))
322 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
323 current
->comm
, current
->pid
, signame(signr
), signr
,
324 addr
, regs
->nip
, regs
->link
, code
);
326 print_vma_addr(KERN_CONT
" in ", regs
->nip
);
330 show_user_instructions(regs
);
333 static bool exception_common(int signr
, struct pt_regs
*regs
, int code
,
336 if (!user_mode(regs
)) {
337 die("Exception in kernel mode", regs
, signr
);
341 show_signal_msg(signr
, regs
, code
, addr
);
343 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs
))
346 current
->thread
.trap_nr
= code
;
349 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
350 * to capture the content, if the task gets killed.
352 thread_pkey_regs_save(¤t
->thread
);
357 void _exception_pkey(struct pt_regs
*regs
, unsigned long addr
, int key
)
359 if (!exception_common(SIGSEGV
, regs
, SEGV_PKUERR
, addr
))
362 force_sig_pkuerr((void __user
*) addr
, key
);
365 void _exception(int signr
, struct pt_regs
*regs
, int code
, unsigned long addr
)
367 if (!exception_common(signr
, regs
, code
, addr
))
370 force_sig_fault(signr
, code
, (void __user
*)addr
, current
);
374 * The interrupt architecture has a quirk in that the HV interrupts excluding
375 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
376 * that an interrupt handler must do is save off a GPR into a scratch register,
377 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
378 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
379 * that it is non-reentrant, which leads to random data corruption.
381 * The solution is for NMI interrupts in HV mode to check if they originated
382 * from these critical HV interrupt regions. If so, then mark them not
385 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
386 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
387 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
388 * that would work. However any other guest OS that may have the SPRG live
389 * and MSR[RI]=1 could encounter silent corruption.
391 * Builds that do not support KVM could take this second option to increase
392 * the recoverability of NMIs.
394 void hv_nmi_check_nonrecoverable(struct pt_regs
*regs
)
396 #ifdef CONFIG_PPC_POWERNV
397 unsigned long kbase
= (unsigned long)_stext
;
398 unsigned long nip
= regs
->nip
;
400 if (!(regs
->msr
& MSR_RI
))
402 if (!(regs
->msr
& MSR_HV
))
404 if (regs
->msr
& MSR_PR
)
408 * Now test if the interrupt has hit a range that may be using
409 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
410 * problem ranges all run un-relocated. Test real and virt modes
411 * at the same time by droping the high bit of the nip (virt mode
412 * entry points still have the +0x4000 offset).
414 nip
&= ~0xc000000000000000ULL
;
415 if ((nip
>= 0x500 && nip
< 0x600) || (nip
>= 0x4500 && nip
< 0x4600))
417 if ((nip
>= 0x980 && nip
< 0xa00) || (nip
>= 0x4980 && nip
< 0x4a00))
419 if ((nip
>= 0xe00 && nip
< 0xec0) || (nip
>= 0x4e00 && nip
< 0x4ec0))
421 if ((nip
>= 0xf80 && nip
< 0xfa0) || (nip
>= 0x4f80 && nip
< 0x4fa0))
424 /* Trampoline code runs un-relocated so subtract kbase. */
425 if (nip
>= (unsigned long)(start_real_trampolines
- kbase
) &&
426 nip
< (unsigned long)(end_real_trampolines
- kbase
))
428 if (nip
>= (unsigned long)(start_virt_trampolines
- kbase
) &&
429 nip
< (unsigned long)(end_virt_trampolines
- kbase
))
434 regs
->msr
&= ~MSR_RI
;
438 void system_reset_exception(struct pt_regs
*regs
)
440 unsigned long hsrr0
, hsrr1
;
441 bool nested
= in_nmi();
442 bool saved_hsrrs
= false;
445 * Avoid crashes in case of nested NMI exceptions. Recoverability
446 * is determined by RI and in_nmi
452 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
453 * The system reset interrupt itself may clobber HSRRs (e.g., to call
454 * OPAL), so save them here and restore them before returning.
456 * Machine checks don't need to save HSRRs, as the real mode handler
457 * is careful to avoid them, and the regular handler is not delivered
460 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
461 hsrr0
= mfspr(SPRN_HSRR0
);
462 hsrr1
= mfspr(SPRN_HSRR1
);
466 hv_nmi_check_nonrecoverable(regs
);
468 __this_cpu_inc(irq_stat
.sreset_irqs
);
470 /* See if any machine dependent calls */
471 if (ppc_md
.system_reset_exception
) {
472 if (ppc_md
.system_reset_exception(regs
))
480 * A system reset is a request to dump, so we always send
481 * it through the crashdump code (if fadump or kdump are
484 crash_fadump(regs
, "System Reset");
489 * We aren't the primary crash CPU. We need to send it
490 * to a holding pattern to avoid it ending up in the panic
493 crash_kexec_secondary(regs
);
496 * No debugger or crash dump registered, print logs then
499 die("System Reset", regs
, SIGABRT
);
501 mdelay(2*MSEC_PER_SEC
); /* Wait a little while for others to print */
502 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
503 nmi_panic(regs
, "System Reset");
506 #ifdef CONFIG_PPC_BOOK3S_64
507 BUG_ON(get_paca()->in_nmi
== 0);
508 if (get_paca()->in_nmi
> 1)
509 nmi_panic(regs
, "Unrecoverable nested System Reset");
511 /* Must die if the interrupt is not recoverable */
512 if (!(regs
->msr
& MSR_RI
))
513 nmi_panic(regs
, "Unrecoverable System Reset");
516 mtspr(SPRN_HSRR0
, hsrr0
);
517 mtspr(SPRN_HSRR1
, hsrr1
);
523 /* What should we do here? We could issue a shutdown or hard reset. */
527 * I/O accesses can cause machine checks on powermacs.
528 * Check if the NIP corresponds to the address of a sync
529 * instruction for which there is an entry in the exception
531 * Note that the 601 only takes a machine check on TEA
532 * (transfer error ack) signal assertion, and does not
533 * set any of the top 16 bits of SRR1.
536 static inline int check_io_access(struct pt_regs
*regs
)
539 unsigned long msr
= regs
->msr
;
540 const struct exception_table_entry
*entry
;
541 unsigned int *nip
= (unsigned int *)regs
->nip
;
543 if (((msr
& 0xffff0000) == 0 || (msr
& (0x80000 | 0x40000)))
544 && (entry
= search_exception_tables(regs
->nip
)) != NULL
) {
546 * Check that it's a sync instruction, or somewhere
547 * in the twi; isync; nop sequence that inb/inw/inl uses.
548 * As the address is in the exception table
549 * we should be able to read the instr there.
550 * For the debug message, we look at the preceding
553 if (*nip
== PPC_INST_NOP
)
555 else if (*nip
== PPC_INST_ISYNC
)
557 if (*nip
== PPC_INST_SYNC
|| (*nip
>> 26) == OP_TRAP
) {
561 rb
= (*nip
>> 11) & 0x1f;
562 printk(KERN_DEBUG
"%s bad port %lx at %p\n",
563 (*nip
& 0x100)? "OUT to": "IN from",
564 regs
->gpr
[rb
] - _IO_BASE
, nip
);
566 regs
->nip
= extable_fixup(entry
);
570 #endif /* CONFIG_PPC32 */
574 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
575 /* On 4xx, the reason for the machine check or program exception
577 #define get_reason(regs) ((regs)->dsisr)
578 #define REASON_FP ESR_FP
579 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
580 #define REASON_PRIVILEGED ESR_PPR
581 #define REASON_TRAP ESR_PTR
583 /* single-step stuff */
584 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
585 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
586 #define clear_br_trace(regs) do {} while(0)
588 /* On non-4xx, the reason for the machine check or program
589 exception is in the MSR. */
590 #define get_reason(regs) ((regs)->msr)
591 #define REASON_TM SRR1_PROGTM
592 #define REASON_FP SRR1_PROGFPE
593 #define REASON_ILLEGAL SRR1_PROGILL
594 #define REASON_PRIVILEGED SRR1_PROGPRIV
595 #define REASON_TRAP SRR1_PROGTRAP
597 #define single_stepping(regs) ((regs)->msr & MSR_SE)
598 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
599 #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
602 #if defined(CONFIG_E500)
603 int machine_check_e500mc(struct pt_regs
*regs
)
605 unsigned long mcsr
= mfspr(SPRN_MCSR
);
606 unsigned long pvr
= mfspr(SPRN_PVR
);
607 unsigned long reason
= mcsr
;
610 if (reason
& MCSR_LD
) {
611 recoverable
= fsl_rio_mcheck_exception(regs
);
612 if (recoverable
== 1)
616 printk("Machine check in kernel mode.\n");
617 printk("Caused by (from MCSR=%lx): ", reason
);
619 if (reason
& MCSR_MCP
)
620 pr_cont("Machine Check Signal\n");
622 if (reason
& MCSR_ICPERR
) {
623 pr_cont("Instruction Cache Parity Error\n");
626 * This is recoverable by invalidating the i-cache.
628 mtspr(SPRN_L1CSR1
, mfspr(SPRN_L1CSR1
) | L1CSR1_ICFI
);
629 while (mfspr(SPRN_L1CSR1
) & L1CSR1_ICFI
)
633 * This will generally be accompanied by an instruction
634 * fetch error report -- only treat MCSR_IF as fatal
635 * if it wasn't due to an L1 parity error.
640 if (reason
& MCSR_DCPERR_MC
) {
641 pr_cont("Data Cache Parity Error\n");
644 * In write shadow mode we auto-recover from the error, but it
645 * may still get logged and cause a machine check. We should
646 * only treat the non-write shadow case as non-recoverable.
648 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
649 * is not implemented but L1 data cache always runs in write
650 * shadow mode. Hence on data cache parity errors HW will
651 * automatically invalidate the L1 Data Cache.
653 if (PVR_VER(pvr
) != PVR_VER_E6500
) {
654 if (!(mfspr(SPRN_L1CSR2
) & L1CSR2_DCWS
))
659 if (reason
& MCSR_L2MMU_MHIT
) {
660 pr_cont("Hit on multiple TLB entries\n");
664 if (reason
& MCSR_NMI
)
665 pr_cont("Non-maskable interrupt\n");
667 if (reason
& MCSR_IF
) {
668 pr_cont("Instruction Fetch Error Report\n");
672 if (reason
& MCSR_LD
) {
673 pr_cont("Load Error Report\n");
677 if (reason
& MCSR_ST
) {
678 pr_cont("Store Error Report\n");
682 if (reason
& MCSR_LDG
) {
683 pr_cont("Guarded Load Error Report\n");
687 if (reason
& MCSR_TLBSYNC
)
688 pr_cont("Simultaneous tlbsync operations\n");
690 if (reason
& MCSR_BSL2_ERR
) {
691 pr_cont("Level 2 Cache Error\n");
695 if (reason
& MCSR_MAV
) {
698 addr
= mfspr(SPRN_MCAR
);
699 addr
|= (u64
)mfspr(SPRN_MCARU
) << 32;
701 pr_cont("Machine Check %s Address: %#llx\n",
702 reason
& MCSR_MEA
? "Effective" : "Physical", addr
);
706 mtspr(SPRN_MCSR
, mcsr
);
707 return mfspr(SPRN_MCSR
) == 0 && recoverable
;
710 int machine_check_e500(struct pt_regs
*regs
)
712 unsigned long reason
= mfspr(SPRN_MCSR
);
714 if (reason
& MCSR_BUS_RBERR
) {
715 if (fsl_rio_mcheck_exception(regs
))
717 if (fsl_pci_mcheck_exception(regs
))
721 printk("Machine check in kernel mode.\n");
722 printk("Caused by (from MCSR=%lx): ", reason
);
724 if (reason
& MCSR_MCP
)
725 pr_cont("Machine Check Signal\n");
726 if (reason
& MCSR_ICPERR
)
727 pr_cont("Instruction Cache Parity Error\n");
728 if (reason
& MCSR_DCP_PERR
)
729 pr_cont("Data Cache Push Parity Error\n");
730 if (reason
& MCSR_DCPERR
)
731 pr_cont("Data Cache Parity Error\n");
732 if (reason
& MCSR_BUS_IAERR
)
733 pr_cont("Bus - Instruction Address Error\n");
734 if (reason
& MCSR_BUS_RAERR
)
735 pr_cont("Bus - Read Address Error\n");
736 if (reason
& MCSR_BUS_WAERR
)
737 pr_cont("Bus - Write Address Error\n");
738 if (reason
& MCSR_BUS_IBERR
)
739 pr_cont("Bus - Instruction Data Error\n");
740 if (reason
& MCSR_BUS_RBERR
)
741 pr_cont("Bus - Read Data Bus Error\n");
742 if (reason
& MCSR_BUS_WBERR
)
743 pr_cont("Bus - Write Data Bus Error\n");
744 if (reason
& MCSR_BUS_IPERR
)
745 pr_cont("Bus - Instruction Parity Error\n");
746 if (reason
& MCSR_BUS_RPERR
)
747 pr_cont("Bus - Read Parity Error\n");
752 int machine_check_generic(struct pt_regs
*regs
)
756 #elif defined(CONFIG_E200)
757 int machine_check_e200(struct pt_regs
*regs
)
759 unsigned long reason
= mfspr(SPRN_MCSR
);
761 printk("Machine check in kernel mode.\n");
762 printk("Caused by (from MCSR=%lx): ", reason
);
764 if (reason
& MCSR_MCP
)
765 pr_cont("Machine Check Signal\n");
766 if (reason
& MCSR_CP_PERR
)
767 pr_cont("Cache Push Parity Error\n");
768 if (reason
& MCSR_CPERR
)
769 pr_cont("Cache Parity Error\n");
770 if (reason
& MCSR_EXCP_ERR
)
771 pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
772 if (reason
& MCSR_BUS_IRERR
)
773 pr_cont("Bus - Read Bus Error on instruction fetch\n");
774 if (reason
& MCSR_BUS_DRERR
)
775 pr_cont("Bus - Read Bus Error on data load\n");
776 if (reason
& MCSR_BUS_WRERR
)
777 pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
781 #elif defined(CONFIG_PPC32)
782 int machine_check_generic(struct pt_regs
*regs
)
784 unsigned long reason
= regs
->msr
;
786 printk("Machine check in kernel mode.\n");
787 printk("Caused by (from SRR1=%lx): ", reason
);
788 switch (reason
& 0x601F0000) {
790 pr_cont("Machine check signal\n");
792 case 0: /* for 601 */
794 case 0x140000: /* 7450 MSS error and TEA */
795 pr_cont("Transfer error ack signal\n");
798 pr_cont("Data parity error signal\n");
801 pr_cont("Address parity error signal\n");
804 pr_cont("L1 Data Cache error\n");
807 pr_cont("L1 Instruction Cache error\n");
810 pr_cont("L2 data cache parity error\n");
813 pr_cont("Unknown values in msr\n");
817 #endif /* everything else */
819 void machine_check_exception(struct pt_regs
*regs
)
822 bool nested
= in_nmi();
826 __this_cpu_inc(irq_stat
.mce_exceptions
);
828 add_taint(TAINT_MACHINE_CHECK
, LOCKDEP_NOW_UNRELIABLE
);
830 /* See if any machine dependent calls. In theory, we would want
831 * to call the CPU first, and call the ppc_md. one if the CPU
832 * one returns a positive number. However there is existing code
833 * that assumes the board gets a first chance, so let's keep it
834 * that way for now and fix things later. --BenH.
836 if (ppc_md
.machine_check_exception
)
837 recover
= ppc_md
.machine_check_exception(regs
);
838 else if (cur_cpu_spec
->machine_check
)
839 recover
= cur_cpu_spec
->machine_check(regs
);
844 if (debugger_fault_handler(regs
))
847 if (check_io_access(regs
))
853 die("Machine check", regs
, SIGBUS
);
855 /* Must die if the interrupt is not recoverable */
856 if (!(regs
->msr
& MSR_RI
))
857 nmi_panic(regs
, "Unrecoverable Machine check");
866 void SMIException(struct pt_regs
*regs
)
868 die("System Management Interrupt", regs
, SIGABRT
);
872 static void p9_hmi_special_emu(struct pt_regs
*regs
)
874 unsigned int ra
, rb
, t
, i
, sel
, instr
, rc
;
875 const void __user
*addr
;
877 unsigned long ea
, msr
, msr_mask
;
880 if (__get_user_inatomic(instr
, (unsigned int __user
*)regs
->nip
))
884 * lxvb16x opcode: 0x7c0006d8
885 * lxvd2x opcode: 0x7c000698
886 * lxvh8x opcode: 0x7c000658
887 * lxvw4x opcode: 0x7c000618
889 if ((instr
& 0xfc00073e) != 0x7c000618) {
890 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
892 smp_processor_id(), current
->comm
, current
->pid
,
897 /* Grab vector registers into the task struct */
898 msr
= regs
->msr
; /* Grab msr before we flush the bits */
899 flush_vsx_to_thread(current
);
900 enable_kernel_altivec();
903 * Is userspace running with a different endian (this is rare but
906 swap
= (msr
& MSR_LE
) != (MSR_KERNEL
& MSR_LE
);
908 /* Decode the instruction */
909 ra
= (instr
>> 16) & 0x1f;
910 rb
= (instr
>> 11) & 0x1f;
911 t
= (instr
>> 21) & 0x1f;
913 vdst
= (u8
*)¤t
->thread
.vr_state
.vr
[t
];
915 vdst
= (u8
*)¤t
->thread
.fp_state
.fpr
[t
][0];
917 /* Grab the vector address */
918 ea
= regs
->gpr
[rb
] + (ra
? regs
->gpr
[ra
] : 0);
921 addr
= (__force
const void __user
*)ea
;
924 if (!access_ok(addr
, 16)) {
925 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
926 " instr=%08x addr=%016lx\n",
927 smp_processor_id(), current
->comm
, current
->pid
,
928 regs
->nip
, instr
, (unsigned long)addr
);
932 /* Read the vector */
934 if ((unsigned long)addr
& 0xfUL
)
936 rc
= __copy_from_user_inatomic(vbuf
, addr
, 16);
938 __get_user_atomic_128_aligned(vbuf
, addr
, rc
);
940 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
941 " instr=%08x addr=%016lx\n",
942 smp_processor_id(), current
->comm
, current
->pid
,
943 regs
->nip
, instr
, (unsigned long)addr
);
947 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
948 " instr=%08x addr=%016lx\n",
949 smp_processor_id(), current
->comm
, current
->pid
, regs
->nip
,
950 instr
, (unsigned long) addr
);
952 /* Grab instruction "selector" */
953 sel
= (instr
>> 6) & 3;
956 * Check to make sure the facility is actually enabled. This
957 * could happen if we get a false positive hit.
959 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
960 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
963 if ((sel
& 1) && (instr
& 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
965 if (!(msr
& msr_mask
)) {
966 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
967 " instr=%08x msr:%016lx\n",
968 smp_processor_id(), current
->comm
, current
->pid
,
969 regs
->nip
, instr
, msr
);
973 /* Do logging here before we modify sel based on endian */
976 PPC_WARN_EMULATED(lxvw4x
, regs
);
979 PPC_WARN_EMULATED(lxvh8x
, regs
);
982 PPC_WARN_EMULATED(lxvd2x
, regs
);
984 case 3: /* lxvb16x */
985 PPC_WARN_EMULATED(lxvb16x
, regs
);
989 #ifdef __LITTLE_ENDIAN__
991 * An LE kernel stores the vector in the task struct as an LE
992 * byte array (effectively swapping both the components and
993 * the content of the components). Those instructions expect
994 * the components to remain in ascending address order, so we
997 * If we are running a BE user space, the expectation is that
998 * of a simple memcpy, so forcing the emulation to look like
999 * a lxvb16x should do the trick.
1005 case 0: /* lxvw4x */
1006 for (i
= 0; i
< 4; i
++)
1007 ((u32
*)vdst
)[i
] = ((u32
*)vbuf
)[3-i
];
1009 case 1: /* lxvh8x */
1010 for (i
= 0; i
< 8; i
++)
1011 ((u16
*)vdst
)[i
] = ((u16
*)vbuf
)[7-i
];
1013 case 2: /* lxvd2x */
1014 for (i
= 0; i
< 2; i
++)
1015 ((u64
*)vdst
)[i
] = ((u64
*)vbuf
)[1-i
];
1017 case 3: /* lxvb16x */
1018 for (i
= 0; i
< 16; i
++)
1019 vdst
[i
] = vbuf
[15-i
];
1022 #else /* __LITTLE_ENDIAN__ */
1023 /* On a big endian kernel, a BE userspace only needs a memcpy */
1027 /* Otherwise, we need to swap the content of the components */
1029 case 0: /* lxvw4x */
1030 for (i
= 0; i
< 4; i
++)
1031 ((u32
*)vdst
)[i
] = cpu_to_le32(((u32
*)vbuf
)[i
]);
1033 case 1: /* lxvh8x */
1034 for (i
= 0; i
< 8; i
++)
1035 ((u16
*)vdst
)[i
] = cpu_to_le16(((u16
*)vbuf
)[i
]);
1037 case 2: /* lxvd2x */
1038 for (i
= 0; i
< 2; i
++)
1039 ((u64
*)vdst
)[i
] = cpu_to_le64(((u64
*)vbuf
)[i
]);
1041 case 3: /* lxvb16x */
1042 memcpy(vdst
, vbuf
, 16);
1045 #endif /* !__LITTLE_ENDIAN__ */
1047 /* Go to next instruction */
1050 #endif /* CONFIG_VSX */
1052 void handle_hmi_exception(struct pt_regs
*regs
)
1054 struct pt_regs
*old_regs
;
1056 old_regs
= set_irq_regs(regs
);
1060 /* Real mode flagged P9 special emu is needed */
1061 if (local_paca
->hmi_p9_special_emu
) {
1062 local_paca
->hmi_p9_special_emu
= 0;
1065 * We don't want to take page faults while doing the
1066 * emulation, we just replay the instruction if necessary.
1068 pagefault_disable();
1069 p9_hmi_special_emu(regs
);
1072 #endif /* CONFIG_VSX */
1074 if (ppc_md
.handle_hmi_exception
)
1075 ppc_md
.handle_hmi_exception(regs
);
1078 set_irq_regs(old_regs
);
1081 void unknown_exception(struct pt_regs
*regs
)
1083 enum ctx_state prev_state
= exception_enter();
1085 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1086 regs
->nip
, regs
->msr
, regs
->trap
);
1088 _exception(SIGTRAP
, regs
, TRAP_UNK
, 0);
1090 exception_exit(prev_state
);
1093 void instruction_breakpoint_exception(struct pt_regs
*regs
)
1095 enum ctx_state prev_state
= exception_enter();
1097 if (notify_die(DIE_IABR_MATCH
, "iabr_match", regs
, 5,
1098 5, SIGTRAP
) == NOTIFY_STOP
)
1100 if (debugger_iabr_match(regs
))
1102 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
1105 exception_exit(prev_state
);
1108 void RunModeException(struct pt_regs
*regs
)
1110 _exception(SIGTRAP
, regs
, TRAP_UNK
, 0);
1113 void single_step_exception(struct pt_regs
*regs
)
1115 enum ctx_state prev_state
= exception_enter();
1117 clear_single_step(regs
);
1118 clear_br_trace(regs
);
1120 if (kprobe_post_handler(regs
))
1123 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
1124 5, SIGTRAP
) == NOTIFY_STOP
)
1126 if (debugger_sstep(regs
))
1129 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
1132 exception_exit(prev_state
);
1134 NOKPROBE_SYMBOL(single_step_exception
);
1137 * After we have successfully emulated an instruction, we have to
1138 * check if the instruction was being single-stepped, and if so,
1139 * pretend we got a single-step exception. This was pointed out
1140 * by Kumar Gala. -- paulus
1142 static void emulate_single_step(struct pt_regs
*regs
)
1144 if (single_stepping(regs
))
1145 single_step_exception(regs
);
1148 static inline int __parse_fpscr(unsigned long fpscr
)
1150 int ret
= FPE_FLTUNK
;
1152 /* Invalid operation */
1153 if ((fpscr
& FPSCR_VE
) && (fpscr
& FPSCR_VX
))
1157 else if ((fpscr
& FPSCR_OE
) && (fpscr
& FPSCR_OX
))
1161 else if ((fpscr
& FPSCR_UE
) && (fpscr
& FPSCR_UX
))
1164 /* Divide by zero */
1165 else if ((fpscr
& FPSCR_ZE
) && (fpscr
& FPSCR_ZX
))
1168 /* Inexact result */
1169 else if ((fpscr
& FPSCR_XE
) && (fpscr
& FPSCR_XX
))
1175 static void parse_fpe(struct pt_regs
*regs
)
1179 flush_fp_to_thread(current
);
1181 code
= __parse_fpscr(current
->thread
.fp_state
.fpscr
);
1183 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1187 * Illegal instruction emulation support. Originally written to
1188 * provide the PVR to user applications using the mfspr rd, PVR.
1189 * Return non-zero if we can't emulate, or -EFAULT if the associated
1190 * memory access caused an access fault. Return zero on success.
1192 * There are a couple of ways to do this, either "decode" the instruction
1193 * or directly match lots of bits. In this case, matching lots of
1194 * bits is faster and easier.
1197 static int emulate_string_inst(struct pt_regs
*regs
, u32 instword
)
1199 u8 rT
= (instword
>> 21) & 0x1f;
1200 u8 rA
= (instword
>> 16) & 0x1f;
1201 u8 NB_RB
= (instword
>> 11) & 0x1f;
1206 /* Early out if we are an invalid form of lswx */
1207 if ((instword
& PPC_INST_STRING_MASK
) == PPC_INST_LSWX
)
1208 if ((rT
== rA
) || (rT
== NB_RB
))
1211 EA
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
1213 switch (instword
& PPC_INST_STRING_MASK
) {
1215 case PPC_INST_STSWX
:
1217 num_bytes
= regs
->xer
& 0x7f;
1220 case PPC_INST_STSWI
:
1221 num_bytes
= (NB_RB
== 0) ? 32 : NB_RB
;
1227 while (num_bytes
!= 0)
1230 u32 shift
= 8 * (3 - (pos
& 0x3));
1232 /* if process is 32-bit, clear upper 32 bits of EA */
1233 if ((regs
->msr
& MSR_64BIT
) == 0)
1236 switch ((instword
& PPC_INST_STRING_MASK
)) {
1239 if (get_user(val
, (u8 __user
*)EA
))
1241 /* first time updating this reg,
1245 regs
->gpr
[rT
] |= val
<< shift
;
1247 case PPC_INST_STSWI
:
1248 case PPC_INST_STSWX
:
1249 val
= regs
->gpr
[rT
] >> shift
;
1250 if (put_user(val
, (u8 __user
*)EA
))
1254 /* move EA to next address */
1258 /* manage our position within the register */
1269 static int emulate_popcntb_inst(struct pt_regs
*regs
, u32 instword
)
1274 ra
= (instword
>> 16) & 0x1f;
1275 rs
= (instword
>> 21) & 0x1f;
1277 tmp
= regs
->gpr
[rs
];
1278 tmp
= tmp
- ((tmp
>> 1) & 0x5555555555555555ULL
);
1279 tmp
= (tmp
& 0x3333333333333333ULL
) + ((tmp
>> 2) & 0x3333333333333333ULL
);
1280 tmp
= (tmp
+ (tmp
>> 4)) & 0x0f0f0f0f0f0f0f0fULL
;
1281 regs
->gpr
[ra
] = tmp
;
1286 static int emulate_isel(struct pt_regs
*regs
, u32 instword
)
1288 u8 rT
= (instword
>> 21) & 0x1f;
1289 u8 rA
= (instword
>> 16) & 0x1f;
1290 u8 rB
= (instword
>> 11) & 0x1f;
1291 u8 BC
= (instword
>> 6) & 0x1f;
1295 tmp
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
1296 bit
= (regs
->ccr
>> (31 - BC
)) & 0x1;
1298 regs
->gpr
[rT
] = bit
? tmp
: regs
->gpr
[rB
];
1303 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1304 static inline bool tm_abort_check(struct pt_regs
*regs
, int cause
)
1306 /* If we're emulating a load/store in an active transaction, we cannot
1307 * emulate it as the kernel operates in transaction suspended context.
1308 * We need to abort the transaction. This creates a persistent TM
1309 * abort so tell the user what caused it with a new code.
1311 if (MSR_TM_TRANSACTIONAL(regs
->msr
)) {
1319 static inline bool tm_abort_check(struct pt_regs
*regs
, int reason
)
1325 static int emulate_instruction(struct pt_regs
*regs
)
1330 if (!user_mode(regs
))
1332 CHECK_FULL_REGS(regs
);
1334 if (get_user(instword
, (u32 __user
*)(regs
->nip
)))
1337 /* Emulate the mfspr rD, PVR. */
1338 if ((instword
& PPC_INST_MFSPR_PVR_MASK
) == PPC_INST_MFSPR_PVR
) {
1339 PPC_WARN_EMULATED(mfpvr
, regs
);
1340 rd
= (instword
>> 21) & 0x1f;
1341 regs
->gpr
[rd
] = mfspr(SPRN_PVR
);
1345 /* Emulating the dcba insn is just a no-op. */
1346 if ((instword
& PPC_INST_DCBA_MASK
) == PPC_INST_DCBA
) {
1347 PPC_WARN_EMULATED(dcba
, regs
);
1351 /* Emulate the mcrxr insn. */
1352 if ((instword
& PPC_INST_MCRXR_MASK
) == PPC_INST_MCRXR
) {
1353 int shift
= (instword
>> 21) & 0x1c;
1354 unsigned long msk
= 0xf0000000UL
>> shift
;
1356 PPC_WARN_EMULATED(mcrxr
, regs
);
1357 regs
->ccr
= (regs
->ccr
& ~msk
) | ((regs
->xer
>> shift
) & msk
);
1358 regs
->xer
&= ~0xf0000000UL
;
1362 /* Emulate load/store string insn. */
1363 if ((instword
& PPC_INST_STRING_GEN_MASK
) == PPC_INST_STRING
) {
1364 if (tm_abort_check(regs
,
1365 TM_CAUSE_EMULATE
| TM_CAUSE_PERSISTENT
))
1367 PPC_WARN_EMULATED(string
, regs
);
1368 return emulate_string_inst(regs
, instword
);
1371 /* Emulate the popcntb (Population Count Bytes) instruction. */
1372 if ((instword
& PPC_INST_POPCNTB_MASK
) == PPC_INST_POPCNTB
) {
1373 PPC_WARN_EMULATED(popcntb
, regs
);
1374 return emulate_popcntb_inst(regs
, instword
);
1377 /* Emulate isel (Integer Select) instruction */
1378 if ((instword
& PPC_INST_ISEL_MASK
) == PPC_INST_ISEL
) {
1379 PPC_WARN_EMULATED(isel
, regs
);
1380 return emulate_isel(regs
, instword
);
1383 /* Emulate sync instruction variants */
1384 if ((instword
& PPC_INST_SYNC_MASK
) == PPC_INST_SYNC
) {
1385 PPC_WARN_EMULATED(sync
, regs
);
1386 asm volatile("sync");
1391 /* Emulate the mfspr rD, DSCR. */
1392 if ((((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
) ==
1393 PPC_INST_MFSPR_DSCR_USER
) ||
1394 ((instword
& PPC_INST_MFSPR_DSCR_MASK
) ==
1395 PPC_INST_MFSPR_DSCR
)) &&
1396 cpu_has_feature(CPU_FTR_DSCR
)) {
1397 PPC_WARN_EMULATED(mfdscr
, regs
);
1398 rd
= (instword
>> 21) & 0x1f;
1399 regs
->gpr
[rd
] = mfspr(SPRN_DSCR
);
1402 /* Emulate the mtspr DSCR, rD. */
1403 if ((((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
) ==
1404 PPC_INST_MTSPR_DSCR_USER
) ||
1405 ((instword
& PPC_INST_MTSPR_DSCR_MASK
) ==
1406 PPC_INST_MTSPR_DSCR
)) &&
1407 cpu_has_feature(CPU_FTR_DSCR
)) {
1408 PPC_WARN_EMULATED(mtdscr
, regs
);
1409 rd
= (instword
>> 21) & 0x1f;
1410 current
->thread
.dscr
= regs
->gpr
[rd
];
1411 current
->thread
.dscr_inherit
= 1;
1412 mtspr(SPRN_DSCR
, current
->thread
.dscr
);
1420 int is_valid_bugaddr(unsigned long addr
)
1422 return is_kernel_addr(addr
);
1425 #ifdef CONFIG_MATH_EMULATION
1426 static int emulate_math(struct pt_regs
*regs
)
1429 extern int do_mathemu(struct pt_regs
*regs
);
1431 ret
= do_mathemu(regs
);
1433 PPC_WARN_EMULATED(math
, regs
);
1437 emulate_single_step(regs
);
1441 code
= __parse_fpscr(current
->thread
.fp_state
.fpscr
);
1442 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1446 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1453 static inline int emulate_math(struct pt_regs
*regs
) { return -1; }
1456 void program_check_exception(struct pt_regs
*regs
)
1458 enum ctx_state prev_state
= exception_enter();
1459 unsigned int reason
= get_reason(regs
);
1461 /* We can now get here via a FP Unavailable exception if the core
1462 * has no FPU, in that case the reason flags will be 0 */
1464 if (reason
& REASON_FP
) {
1465 /* IEEE FP exception */
1469 if (reason
& REASON_TRAP
) {
1470 unsigned long bugaddr
;
1471 /* Debugger is first in line to stop recursive faults in
1472 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1473 if (debugger_bpt(regs
))
1476 if (kprobe_handler(regs
))
1479 /* trap exception */
1480 if (notify_die(DIE_BPT
, "breakpoint", regs
, 5, 5, SIGTRAP
)
1484 bugaddr
= regs
->nip
;
1486 * Fixup bugaddr for BUG_ON() in real mode
1488 if (!is_kernel_addr(bugaddr
) && !(regs
->msr
& MSR_IR
))
1489 bugaddr
+= PAGE_OFFSET
;
1491 if (!(regs
->msr
& MSR_PR
) && /* not user-mode */
1492 report_bug(bugaddr
, regs
) == BUG_TRAP_TYPE_WARN
) {
1496 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
1499 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1500 if (reason
& REASON_TM
) {
1501 /* This is a TM "Bad Thing Exception" program check.
1503 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1504 * transition in TM states.
1505 * - A trechkpt is attempted when transactional.
1506 * - A treclaim is attempted when non transactional.
1507 * - A tend is illegally attempted.
1508 * - writing a TM SPR when transactional.
1510 * If usermode caused this, it's done something illegal and
1511 * gets a SIGILL slap on the wrist. We call it an illegal
1512 * operand to distinguish from the instruction just being bad
1513 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1514 * illegal /placement/ of a valid instruction.
1516 if (user_mode(regs
)) {
1517 _exception(SIGILL
, regs
, ILL_ILLOPN
, regs
->nip
);
1520 printk(KERN_EMERG
"Unexpected TM Bad Thing exception "
1521 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1522 regs
->nip
, regs
->msr
, get_paca()->tm_scratch
);
1523 die("Unrecoverable exception", regs
, SIGABRT
);
1529 * If we took the program check in the kernel skip down to sending a
1530 * SIGILL. The subsequent cases all relate to emulating instructions
1531 * which we should only do for userspace. We also do not want to enable
1532 * interrupts for kernel faults because that might lead to further
1533 * faults, and loose the context of the original exception.
1535 if (!user_mode(regs
))
1538 /* We restore the interrupt state now */
1539 if (!arch_irq_disabled_regs(regs
))
1542 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1543 * but there seems to be a hardware bug on the 405GP (RevD)
1544 * that means ESR is sometimes set incorrectly - either to
1545 * ESR_DST (!?) or 0. In the process of chasing this with the
1546 * hardware people - not sure if it can happen on any illegal
1547 * instruction or only on FP instructions, whether there is a
1548 * pattern to occurrences etc. -dgibson 31/Mar/2003
1550 if (!emulate_math(regs
))
1553 /* Try to emulate it if we should. */
1554 if (reason
& (REASON_ILLEGAL
| REASON_PRIVILEGED
)) {
1555 switch (emulate_instruction(regs
)) {
1558 emulate_single_step(regs
);
1561 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1567 if (reason
& REASON_PRIVILEGED
)
1568 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1570 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1573 exception_exit(prev_state
);
1575 NOKPROBE_SYMBOL(program_check_exception
);
1578 * This occurs when running in hypervisor mode on POWER6 or later
1579 * and an illegal instruction is encountered.
1581 void emulation_assist_interrupt(struct pt_regs
*regs
)
1583 regs
->msr
|= REASON_ILLEGAL
;
1584 program_check_exception(regs
);
1586 NOKPROBE_SYMBOL(emulation_assist_interrupt
);
1588 void alignment_exception(struct pt_regs
*regs
)
1590 enum ctx_state prev_state
= exception_enter();
1591 int sig
, code
, fixed
= 0;
1593 /* We restore the interrupt state now */
1594 if (!arch_irq_disabled_regs(regs
))
1597 if (tm_abort_check(regs
, TM_CAUSE_ALIGNMENT
| TM_CAUSE_PERSISTENT
))
1600 /* we don't implement logging of alignment exceptions */
1601 if (!(current
->thread
.align_ctl
& PR_UNALIGN_SIGBUS
))
1602 fixed
= fix_alignment(regs
);
1605 regs
->nip
+= 4; /* skip over emulated instruction */
1606 emulate_single_step(regs
);
1610 /* Operand address was bad */
1611 if (fixed
== -EFAULT
) {
1618 if (user_mode(regs
))
1619 _exception(sig
, regs
, code
, regs
->dar
);
1621 bad_page_fault(regs
, regs
->dar
, sig
);
1624 exception_exit(prev_state
);
1627 void StackOverflow(struct pt_regs
*regs
)
1629 pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
1630 current
->comm
, task_pid_nr(current
), regs
->gpr
[1]);
1633 panic("kernel stack overflow");
1636 void kernel_fp_unavailable_exception(struct pt_regs
*regs
)
1638 enum ctx_state prev_state
= exception_enter();
1640 printk(KERN_EMERG
"Unrecoverable FP Unavailable Exception "
1641 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1642 die("Unrecoverable FP Unavailable Exception", regs
, SIGABRT
);
1644 exception_exit(prev_state
);
1647 void altivec_unavailable_exception(struct pt_regs
*regs
)
1649 enum ctx_state prev_state
= exception_enter();
1651 if (user_mode(regs
)) {
1652 /* A user program has executed an altivec instruction,
1653 but this kernel doesn't support altivec. */
1654 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1658 printk(KERN_EMERG
"Unrecoverable VMX/Altivec Unavailable Exception "
1659 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1660 die("Unrecoverable VMX/Altivec Unavailable Exception", regs
, SIGABRT
);
1663 exception_exit(prev_state
);
1666 void vsx_unavailable_exception(struct pt_regs
*regs
)
1668 if (user_mode(regs
)) {
1669 /* A user program has executed an vsx instruction,
1670 but this kernel doesn't support vsx. */
1671 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1675 printk(KERN_EMERG
"Unrecoverable VSX Unavailable Exception "
1676 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1677 die("Unrecoverable VSX Unavailable Exception", regs
, SIGABRT
);
1681 static void tm_unavailable(struct pt_regs
*regs
)
1683 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1684 if (user_mode(regs
)) {
1685 current
->thread
.load_tm
++;
1686 regs
->msr
|= MSR_TM
;
1688 tm_restore_sprs(¤t
->thread
);
1692 pr_emerg("Unrecoverable TM Unavailable Exception "
1693 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1694 die("Unrecoverable TM Unavailable Exception", regs
, SIGABRT
);
1697 void facility_unavailable_exception(struct pt_regs
*regs
)
1699 static char *facility_strings
[] = {
1700 [FSCR_FP_LG
] = "FPU",
1701 [FSCR_VECVSX_LG
] = "VMX/VSX",
1702 [FSCR_DSCR_LG
] = "DSCR",
1703 [FSCR_PM_LG
] = "PMU SPRs",
1704 [FSCR_BHRB_LG
] = "BHRB",
1705 [FSCR_TM_LG
] = "TM",
1706 [FSCR_EBB_LG
] = "EBB",
1707 [FSCR_TAR_LG
] = "TAR",
1708 [FSCR_MSGP_LG
] = "MSGP",
1709 [FSCR_SCV_LG
] = "SCV",
1711 char *facility
= "unknown";
1717 hv
= (TRAP(regs
) == 0xf80);
1719 value
= mfspr(SPRN_HFSCR
);
1721 value
= mfspr(SPRN_FSCR
);
1723 status
= value
>> 56;
1724 if ((hv
|| status
>= 2) &&
1725 (status
< ARRAY_SIZE(facility_strings
)) &&
1726 facility_strings
[status
])
1727 facility
= facility_strings
[status
];
1729 /* We should not have taken this interrupt in kernel */
1730 if (!user_mode(regs
)) {
1731 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1732 facility
, status
, regs
->nip
);
1733 die("Unexpected facility unavailable exception", regs
, SIGABRT
);
1736 /* We restore the interrupt state now */
1737 if (!arch_irq_disabled_regs(regs
))
1740 if (status
== FSCR_DSCR_LG
) {
1742 * User is accessing the DSCR register using the problem
1743 * state only SPR number (0x03) either through a mfspr or
1744 * a mtspr instruction. If it is a write attempt through
1745 * a mtspr, then we set the inherit bit. This also allows
1746 * the user to write or read the register directly in the
1747 * future by setting via the FSCR DSCR bit. But in case it
1748 * is a read DSCR attempt through a mfspr instruction, we
1749 * just emulate the instruction instead. This code path will
1750 * always emulate all the mfspr instructions till the user
1751 * has attempted at least one mtspr instruction. This way it
1752 * preserves the same behaviour when the user is accessing
1753 * the DSCR through privilege level only SPR number (0x11)
1754 * which is emulated through illegal instruction exception.
1755 * We always leave HFSCR DSCR set.
1757 if (get_user(instword
, (u32 __user
*)(regs
->nip
))) {
1758 pr_err("Failed to fetch the user instruction\n");
1762 /* Write into DSCR (mtspr 0x03, RS) */
1763 if ((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
)
1764 == PPC_INST_MTSPR_DSCR_USER
) {
1765 rd
= (instword
>> 21) & 0x1f;
1766 current
->thread
.dscr
= regs
->gpr
[rd
];
1767 current
->thread
.dscr_inherit
= 1;
1768 current
->thread
.fscr
|= FSCR_DSCR
;
1769 mtspr(SPRN_FSCR
, current
->thread
.fscr
);
1772 /* Read from DSCR (mfspr RT, 0x03) */
1773 if ((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
)
1774 == PPC_INST_MFSPR_DSCR_USER
) {
1775 if (emulate_instruction(regs
)) {
1776 pr_err("DSCR based mfspr emulation failed\n");
1780 emulate_single_step(regs
);
1785 if (status
== FSCR_TM_LG
) {
1787 * If we're here then the hardware is TM aware because it
1788 * generated an exception with FSRM_TM set.
1790 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1791 * told us not to do TM, or the kernel is not built with TM
1794 * If both of those things are true, then userspace can spam the
1795 * console by triggering the printk() below just by continually
1796 * doing tbegin (or any TM instruction). So in that case just
1797 * send the process a SIGILL immediately.
1799 if (!cpu_has_feature(CPU_FTR_TM
))
1802 tm_unavailable(regs
);
1806 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1807 hv
? "Hypervisor " : "", facility
, status
, regs
->nip
, regs
->msr
);
1810 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1814 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1816 void fp_unavailable_tm(struct pt_regs
*regs
)
1818 /* Note: This does not handle any kind of FP laziness. */
1820 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1821 regs
->nip
, regs
->msr
);
1823 /* We can only have got here if the task started using FP after
1824 * beginning the transaction. So, the transactional regs are just a
1825 * copy of the checkpointed ones. But, we still need to recheckpoint
1826 * as we're enabling FP for the process; it will return, abort the
1827 * transaction, and probably retry but now with FP enabled. So the
1828 * checkpointed FP registers need to be loaded.
1830 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1833 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1834 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1836 * At this point, ck{fp,vr}_state contains the exact values we want to
1840 /* Enable FP for the task: */
1841 current
->thread
.load_fp
= 1;
1844 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
1846 tm_recheckpoint(¤t
->thread
);
1849 void altivec_unavailable_tm(struct pt_regs
*regs
)
1851 /* See the comments in fp_unavailable_tm(). This function operates
1855 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1857 regs
->nip
, regs
->msr
);
1858 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1859 current
->thread
.load_vec
= 1;
1860 tm_recheckpoint(¤t
->thread
);
1861 current
->thread
.used_vr
= 1;
1864 void vsx_unavailable_tm(struct pt_regs
*regs
)
1866 /* See the comments in fp_unavailable_tm(). This works similarly,
1867 * though we're loading both FP and VEC registers in here.
1869 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1870 * regs. Either way, set MSR_VSX.
1873 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1875 regs
->nip
, regs
->msr
);
1877 current
->thread
.used_vsr
= 1;
1879 /* This reclaims FP and/or VR regs if they're already enabled */
1880 tm_reclaim_current(TM_CAUSE_FAC_UNAV
);
1882 current
->thread
.load_vec
= 1;
1883 current
->thread
.load_fp
= 1;
1885 tm_recheckpoint(¤t
->thread
);
1887 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1889 void performance_monitor_exception(struct pt_regs
*regs
)
1891 __this_cpu_inc(irq_stat
.pmu_irqs
);
1896 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1897 static void handle_debug(struct pt_regs
*regs
, unsigned long debug_status
)
1901 * Determine the cause of the debug event, clear the
1902 * event flags and send a trap to the handler. Torez
1904 if (debug_status
& (DBSR_DAC1R
| DBSR_DAC1W
)) {
1905 dbcr_dac(current
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
1906 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1907 current
->thread
.debug
.dbcr2
&= ~DBCR2_DAC12MODE
;
1909 do_send_trap(regs
, mfspr(SPRN_DAC1
), debug_status
,
1912 } else if (debug_status
& (DBSR_DAC2R
| DBSR_DAC2W
)) {
1913 dbcr_dac(current
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
1914 do_send_trap(regs
, mfspr(SPRN_DAC2
), debug_status
,
1917 } else if (debug_status
& DBSR_IAC1
) {
1918 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC1
;
1919 dbcr_iac_range(current
) &= ~DBCR_IAC12MODE
;
1920 do_send_trap(regs
, mfspr(SPRN_IAC1
), debug_status
,
1923 } else if (debug_status
& DBSR_IAC2
) {
1924 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC2
;
1925 do_send_trap(regs
, mfspr(SPRN_IAC2
), debug_status
,
1928 } else if (debug_status
& DBSR_IAC3
) {
1929 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC3
;
1930 dbcr_iac_range(current
) &= ~DBCR_IAC34MODE
;
1931 do_send_trap(regs
, mfspr(SPRN_IAC3
), debug_status
,
1934 } else if (debug_status
& DBSR_IAC4
) {
1935 current
->thread
.debug
.dbcr0
&= ~DBCR0_IAC4
;
1936 do_send_trap(regs
, mfspr(SPRN_IAC4
), debug_status
,
1941 * At the point this routine was called, the MSR(DE) was turned off.
1942 * Check all other debug flags and see if that bit needs to be turned
1945 if (DBCR_ACTIVE_EVENTS(current
->thread
.debug
.dbcr0
,
1946 current
->thread
.debug
.dbcr1
))
1947 regs
->msr
|= MSR_DE
;
1949 /* Make sure the IDM flag is off */
1950 current
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
1953 mtspr(SPRN_DBCR0
, current
->thread
.debug
.dbcr0
);
1956 void DebugException(struct pt_regs
*regs
, unsigned long debug_status
)
1958 current
->thread
.debug
.dbsr
= debug_status
;
1960 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1961 * on server, it stops on the target of the branch. In order to simulate
1962 * the server behaviour, we thus restart right away with a single step
1963 * instead of stopping here when hitting a BT
1965 if (debug_status
& DBSR_BT
) {
1966 regs
->msr
&= ~MSR_DE
;
1969 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_BT
);
1970 /* Clear the BT event */
1971 mtspr(SPRN_DBSR
, DBSR_BT
);
1973 /* Do the single step trick only when coming from userspace */
1974 if (user_mode(regs
)) {
1975 current
->thread
.debug
.dbcr0
&= ~DBCR0_BT
;
1976 current
->thread
.debug
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
1977 regs
->msr
|= MSR_DE
;
1981 if (kprobe_post_handler(regs
))
1984 if (notify_die(DIE_SSTEP
, "block_step", regs
, 5,
1985 5, SIGTRAP
) == NOTIFY_STOP
) {
1988 if (debugger_sstep(regs
))
1990 } else if (debug_status
& DBSR_IC
) { /* Instruction complete */
1991 regs
->msr
&= ~MSR_DE
;
1993 /* Disable instruction completion */
1994 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_IC
);
1995 /* Clear the instruction completion event */
1996 mtspr(SPRN_DBSR
, DBSR_IC
);
1998 if (kprobe_post_handler(regs
))
2001 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
2002 5, SIGTRAP
) == NOTIFY_STOP
) {
2006 if (debugger_sstep(regs
))
2009 if (user_mode(regs
)) {
2010 current
->thread
.debug
.dbcr0
&= ~DBCR0_IC
;
2011 if (DBCR_ACTIVE_EVENTS(current
->thread
.debug
.dbcr0
,
2012 current
->thread
.debug
.dbcr1
))
2013 regs
->msr
|= MSR_DE
;
2015 /* Make sure the IDM bit is off */
2016 current
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2019 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
2021 handle_debug(regs
, debug_status
);
2023 NOKPROBE_SYMBOL(DebugException
);
2024 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2026 #if !defined(CONFIG_TAU_INT)
2027 void TAUException(struct pt_regs
*regs
)
2029 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
2030 regs
->nip
, regs
->msr
, regs
->trap
, print_tainted());
2032 #endif /* CONFIG_INT_TAU */
2034 #ifdef CONFIG_ALTIVEC
2035 void altivec_assist_exception(struct pt_regs
*regs
)
2039 if (!user_mode(regs
)) {
2040 printk(KERN_EMERG
"VMX/Altivec assist exception in kernel mode"
2041 " at %lx\n", regs
->nip
);
2042 die("Kernel VMX/Altivec assist exception", regs
, SIGILL
);
2045 flush_altivec_to_thread(current
);
2047 PPC_WARN_EMULATED(altivec
, regs
);
2048 err
= emulate_altivec(regs
);
2050 regs
->nip
+= 4; /* skip emulated instruction */
2051 emulate_single_step(regs
);
2055 if (err
== -EFAULT
) {
2056 /* got an error reading the instruction */
2057 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
2059 /* didn't recognize the instruction */
2060 /* XXX quick hack for now: set the non-Java bit in the VSCR */
2061 printk_ratelimited(KERN_ERR
"Unrecognized altivec instruction "
2062 "in %s at %lx\n", current
->comm
, regs
->nip
);
2063 current
->thread
.vr_state
.vscr
.u
[3] |= 0x10000;
2066 #endif /* CONFIG_ALTIVEC */
2068 #ifdef CONFIG_FSL_BOOKE
2069 void CacheLockingException(struct pt_regs
*regs
, unsigned long address
,
2070 unsigned long error_code
)
2072 /* We treat cache locking instructions from the user
2073 * as priv ops, in the future we could try to do
2076 if (error_code
& (ESR_DLK
|ESR_ILK
))
2077 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
2080 #endif /* CONFIG_FSL_BOOKE */
2083 void SPEFloatingPointException(struct pt_regs
*regs
)
2085 extern int do_spe_mathemu(struct pt_regs
*regs
);
2086 unsigned long spefscr
;
2088 int code
= FPE_FLTUNK
;
2091 /* We restore the interrupt state now */
2092 if (!arch_irq_disabled_regs(regs
))
2095 flush_spe_to_thread(current
);
2097 spefscr
= current
->thread
.spefscr
;
2098 fpexc_mode
= current
->thread
.fpexc_mode
;
2100 if ((spefscr
& SPEFSCR_FOVF
) && (fpexc_mode
& PR_FP_EXC_OVF
)) {
2103 else if ((spefscr
& SPEFSCR_FUNF
) && (fpexc_mode
& PR_FP_EXC_UND
)) {
2106 else if ((spefscr
& SPEFSCR_FDBZ
) && (fpexc_mode
& PR_FP_EXC_DIV
))
2108 else if ((spefscr
& SPEFSCR_FINV
) && (fpexc_mode
& PR_FP_EXC_INV
)) {
2111 else if ((spefscr
& (SPEFSCR_FG
| SPEFSCR_FX
)) && (fpexc_mode
& PR_FP_EXC_RES
))
2114 err
= do_spe_mathemu(regs
);
2116 regs
->nip
+= 4; /* skip emulated instruction */
2117 emulate_single_step(regs
);
2121 if (err
== -EFAULT
) {
2122 /* got an error reading the instruction */
2123 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
2124 } else if (err
== -EINVAL
) {
2125 /* didn't recognize the instruction */
2126 printk(KERN_ERR
"unrecognized spe instruction "
2127 "in %s at %lx\n", current
->comm
, regs
->nip
);
2129 _exception(SIGFPE
, regs
, code
, regs
->nip
);
2135 void SPEFloatingPointRoundException(struct pt_regs
*regs
)
2137 extern int speround_handler(struct pt_regs
*regs
);
2140 /* We restore the interrupt state now */
2141 if (!arch_irq_disabled_regs(regs
))
2145 if (regs
->msr
& MSR_SPE
)
2146 giveup_spe(current
);
2150 err
= speround_handler(regs
);
2152 regs
->nip
+= 4; /* skip emulated instruction */
2153 emulate_single_step(regs
);
2157 if (err
== -EFAULT
) {
2158 /* got an error reading the instruction */
2159 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
2160 } else if (err
== -EINVAL
) {
2161 /* didn't recognize the instruction */
2162 printk(KERN_ERR
"unrecognized spe instruction "
2163 "in %s at %lx\n", current
->comm
, regs
->nip
);
2165 _exception(SIGFPE
, regs
, FPE_FLTUNK
, regs
->nip
);
2172 * We enter here if we get an unrecoverable exception, that is, one
2173 * that happened at a point where the RI (recoverable interrupt) bit
2174 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2175 * we therefore lost state by taking this exception.
2177 void unrecoverable_exception(struct pt_regs
*regs
)
2179 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2180 regs
->trap
, regs
->nip
, regs
->msr
);
2181 die("Unrecoverable exception", regs
, SIGABRT
);
2183 NOKPROBE_SYMBOL(unrecoverable_exception
);
2185 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2187 * Default handler for a Watchdog exception,
2188 * spins until a reboot occurs
2190 void __attribute__ ((weak
)) WatchdogHandler(struct pt_regs
*regs
)
2192 /* Generic WatchdogHandler, implement your own */
2193 mtspr(SPRN_TCR
, mfspr(SPRN_TCR
)&(~TCR_WIE
));
2197 void WatchdogException(struct pt_regs
*regs
)
2199 printk (KERN_EMERG
"PowerPC Book-E Watchdog Exception\n");
2200 WatchdogHandler(regs
);
2205 * We enter here if we discover during exception entry that we are
2206 * running in supervisor mode with a userspace value in the stack pointer.
2208 void kernel_bad_stack(struct pt_regs
*regs
)
2210 printk(KERN_EMERG
"Bad kernel stack pointer %lx at %lx\n",
2211 regs
->gpr
[1], regs
->nip
);
2212 die("Bad kernel stack pointer", regs
, SIGABRT
);
2214 NOKPROBE_SYMBOL(kernel_bad_stack
);
2216 void __init
trap_init(void)
2221 #ifdef CONFIG_PPC_EMULATED_STATS
2223 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2225 struct ppc_emulated ppc_emulated
= {
2226 #ifdef CONFIG_ALTIVEC
2227 WARN_EMULATED_SETUP(altivec
),
2229 WARN_EMULATED_SETUP(dcba
),
2230 WARN_EMULATED_SETUP(dcbz
),
2231 WARN_EMULATED_SETUP(fp_pair
),
2232 WARN_EMULATED_SETUP(isel
),
2233 WARN_EMULATED_SETUP(mcrxr
),
2234 WARN_EMULATED_SETUP(mfpvr
),
2235 WARN_EMULATED_SETUP(multiple
),
2236 WARN_EMULATED_SETUP(popcntb
),
2237 WARN_EMULATED_SETUP(spe
),
2238 WARN_EMULATED_SETUP(string
),
2239 WARN_EMULATED_SETUP(sync
),
2240 WARN_EMULATED_SETUP(unaligned
),
2241 #ifdef CONFIG_MATH_EMULATION
2242 WARN_EMULATED_SETUP(math
),
2245 WARN_EMULATED_SETUP(vsx
),
2248 WARN_EMULATED_SETUP(mfdscr
),
2249 WARN_EMULATED_SETUP(mtdscr
),
2250 WARN_EMULATED_SETUP(lq_stq
),
2251 WARN_EMULATED_SETUP(lxvw4x
),
2252 WARN_EMULATED_SETUP(lxvh8x
),
2253 WARN_EMULATED_SETUP(lxvd2x
),
2254 WARN_EMULATED_SETUP(lxvb16x
),
2258 u32 ppc_warn_emulated
;
2260 void ppc_warn_emulated_print(const char *type
)
2262 pr_warn_ratelimited("%s used emulated %s instruction\n", current
->comm
,
2266 static int __init
ppc_warn_emulated_init(void)
2268 struct dentry
*dir
, *d
;
2270 struct ppc_emulated_entry
*entries
= (void *)&ppc_emulated
;
2272 if (!powerpc_debugfs_root
)
2275 dir
= debugfs_create_dir("emulated_instructions",
2276 powerpc_debugfs_root
);
2280 d
= debugfs_create_u32("do_warn", 0644, dir
,
2281 &ppc_warn_emulated
);
2285 for (i
= 0; i
< sizeof(ppc_emulated
)/sizeof(*entries
); i
++) {
2286 d
= debugfs_create_u32(entries
[i
].name
, 0644, dir
,
2287 (u32
*)&entries
[i
].val
.counter
);
2295 debugfs_remove_recursive(dir
);
2299 device_initcall(ppc_warn_emulated_init
);
2301 #endif /* CONFIG_PPC_EMULATED_STATS */