]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/powerpc/kernel/traps.c
openrisc: prefer memblock APIs returning virtual address
[mirror_ubuntu-focal-kernel.git] / arch / powerpc / kernel / traps.c
CommitLineData
14cf11af 1/*
14cf11af 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
fe04b112 3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
14cf11af
PM
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
12 */
13
14/*
15 * This file handles the architecture-dependent parts of hardware exceptions
16 */
17
14cf11af
PM
18#include <linux/errno.h>
19#include <linux/sched.h>
b17b0153 20#include <linux/sched/debug.h>
14cf11af
PM
21#include <linux/kernel.h>
22#include <linux/mm.h>
99cd1302 23#include <linux/pkeys.h>
14cf11af
PM
24#include <linux/stddef.h>
25#include <linux/unistd.h>
8dad3f92 26#include <linux/ptrace.h>
14cf11af 27#include <linux/user.h>
14cf11af 28#include <linux/interrupt.h>
14cf11af 29#include <linux/init.h>
8a39b05f
PG
30#include <linux/extable.h>
31#include <linux/module.h> /* print_modules */
8dad3f92 32#include <linux/prctl.h>
14cf11af
PM
33#include <linux/delay.h>
34#include <linux/kprobes.h>
cc532915 35#include <linux/kexec.h>
5474c120 36#include <linux/backlight.h>
73c9ceab 37#include <linux/bug.h>
1eeb66a1 38#include <linux/kdebug.h>
76462232 39#include <linux/ratelimit.h>
ba12eede 40#include <linux/context_tracking.h>
5080332c 41#include <linux/smp.h>
35adacd6
NP
42#include <linux/console.h>
43#include <linux/kmsg_dump.h>
14cf11af 44
80947e7c 45#include <asm/emulated_ops.h>
14cf11af 46#include <asm/pgtable.h>
7c0f6ba6 47#include <linux/uaccess.h>
7644d581 48#include <asm/debugfs.h>
14cf11af 49#include <asm/io.h>
86417780
PM
50#include <asm/machdep.h>
51#include <asm/rtas.h>
f7f6f4fe 52#include <asm/pmc.h>
14cf11af 53#include <asm/reg.h>
14cf11af
PM
54#ifdef CONFIG_PMAC_BACKLIGHT
55#include <asm/backlight.h>
56#endif
dc1c1ca3 57#ifdef CONFIG_PPC64
86417780 58#include <asm/firmware.h>
dc1c1ca3 59#include <asm/processor.h>
6ce6c629 60#include <asm/tm.h>
dc1c1ca3 61#endif
c0ce7d08 62#include <asm/kexec.h>
16c57b36 63#include <asm/ppc-opcode.h>
cce1f106 64#include <asm/rio.h>
ebaeb5ae 65#include <asm/fadump.h>
ae3a197e 66#include <asm/switch_to.h>
f54db641 67#include <asm/tm.h>
ae3a197e 68#include <asm/debug.h>
42f5b4ca 69#include <asm/asm-prototypes.h>
fd7bacbc 70#include <asm/hmi.h>
4e0e3435 71#include <sysdev/fsl_pci.h>
6cc89bad 72#include <asm/kprobes.h>
a99b9c5e 73#include <asm/stacktrace.h>
dc1c1ca3 74
da665885 75#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
5be3492f
AB
76int (*__debugger)(struct pt_regs *regs) __read_mostly;
77int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
78int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
79int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
80int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
9422de3e 81int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
5be3492f 82int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
14cf11af
PM
83
84EXPORT_SYMBOL(__debugger);
85EXPORT_SYMBOL(__debugger_ipi);
86EXPORT_SYMBOL(__debugger_bpt);
87EXPORT_SYMBOL(__debugger_sstep);
88EXPORT_SYMBOL(__debugger_iabr_match);
9422de3e 89EXPORT_SYMBOL(__debugger_break_match);
14cf11af
PM
90EXPORT_SYMBOL(__debugger_fault_handler);
91#endif
92
8b3c34cf
MN
93/* Transactional Memory trap debug */
94#ifdef TM_DEBUG_SW
95#define TM_DEBUG(x...) printk(KERN_INFO x)
96#else
97#define TM_DEBUG(x...) do { } while(0)
98#endif
99
0f642d61
MOA
100static const char *signame(int signr)
101{
102 switch (signr) {
103 case SIGBUS: return "bus error";
104 case SIGFPE: return "floating point exception";
105 case SIGILL: return "illegal instruction";
106 case SIGSEGV: return "segfault";
107 case SIGTRAP: return "unhandled trap";
108 }
109
110 return "unknown signal";
111}
112
14cf11af
PM
113/*
114 * Trap & Exception support
115 */
116
6031d9d9 117#ifdef CONFIG_PMAC_BACKLIGHT
118static void pmac_backlight_unblank(void)
119{
120 mutex_lock(&pmac_backlight_mutex);
121 if (pmac_backlight) {
122 struct backlight_properties *props;
123
124 props = &pmac_backlight->props;
125 props->brightness = props->max_brightness;
126 props->power = FB_BLANK_UNBLANK;
127 backlight_update_status(pmac_backlight);
128 }
129 mutex_unlock(&pmac_backlight_mutex);
130}
131#else
132static inline void pmac_backlight_unblank(void) { }
133#endif
134
6fcd6baa
NP
135/*
136 * If oops/die is expected to crash the machine, return true here.
137 *
138 * This should not be expected to be 100% accurate, there may be
139 * notifiers registered or other unexpected conditions that may bring
140 * down the kernel. Or if the current process in the kernel is holding
141 * locks or has other critical state, the kernel may become effectively
142 * unusable anyway.
143 */
144bool die_will_crash(void)
145{
146 if (should_fadump_crash())
147 return true;
148 if (kexec_should_crash(current))
149 return true;
150 if (in_interrupt() || panic_on_oops ||
151 !current->pid || is_global_init(current))
152 return true;
153
154 return false;
155}
156
760ca4dc
AB
157static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
158static int die_owner = -1;
159static unsigned int die_nest_count;
160static int die_counter;
161
35adacd6
NP
162extern void panic_flush_kmsg_start(void)
163{
164 /*
165 * These are mostly taken from kernel/panic.c, but tries to do
166 * relatively minimal work. Don't use delay functions (TB may
167 * be broken), don't crash dump (need to set a firmware log),
168 * don't run notifiers. We do want to get some information to
169 * Linux console.
170 */
171 console_verbose();
172 bust_spinlocks(1);
173}
174
175extern void panic_flush_kmsg_end(void)
176{
177 printk_safe_flush_on_panic();
178 kmsg_dump(KMSG_DUMP_PANIC);
179 bust_spinlocks(0);
180 debug_locks_off();
181 console_flush_on_panic();
182}
183
03465f89 184static unsigned long oops_begin(struct pt_regs *regs)
14cf11af 185{
760ca4dc 186 int cpu;
34c2a14f 187 unsigned long flags;
14cf11af 188
293e4688 189 oops_enter();
190
760ca4dc
AB
191 /* racy, but better than risking deadlock. */
192 raw_local_irq_save(flags);
193 cpu = smp_processor_id();
194 if (!arch_spin_trylock(&die_lock)) {
195 if (cpu == die_owner)
196 /* nested oops. should stop eventually */;
197 else
198 arch_spin_lock(&die_lock);
34c2a14f 199 }
760ca4dc
AB
200 die_nest_count++;
201 die_owner = cpu;
202 console_verbose();
203 bust_spinlocks(1);
204 if (machine_is(powermac))
205 pmac_backlight_unblank();
206 return flags;
207}
03465f89 208NOKPROBE_SYMBOL(oops_begin);
e8222502 209
03465f89 210static void oops_end(unsigned long flags, struct pt_regs *regs,
760ca4dc
AB
211 int signr)
212{
14cf11af 213 bust_spinlocks(0);
373d4d09 214 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
760ca4dc 215 die_nest_count--;
58154c8c
AB
216 oops_exit();
217 printk("\n");
7458e8b2 218 if (!die_nest_count) {
760ca4dc 219 /* Nest count reaches zero, release the lock. */
7458e8b2 220 die_owner = -1;
760ca4dc 221 arch_spin_unlock(&die_lock);
7458e8b2 222 }
760ca4dc 223 raw_local_irq_restore(flags);
cc532915 224
d40b6768
NP
225 /*
226 * system_reset_excption handles debugger, crash dump, panic, for 0x100
227 */
228 if (TRAP(regs) == 0x100)
229 return;
230
ebaeb5ae
MS
231 crash_fadump(regs, "die oops");
232
4388c9b3 233 if (kexec_should_crash(current))
cc532915 234 crash_kexec(regs);
9b00ac06 235
760ca4dc
AB
236 if (!signr)
237 return;
238
58154c8c
AB
239 /*
240 * While our oops output is serialised by a spinlock, output
241 * from panic() called below can race and corrupt it. If we
242 * know we are going to panic, delay for 1 second so we have a
243 * chance to get clean backtraces from all CPUs that are oopsing.
244 */
245 if (in_interrupt() || panic_on_oops || !current->pid ||
246 is_global_init(current)) {
247 mdelay(MSEC_PER_SEC);
248 }
249
cea6a4ba 250 if (panic_on_oops)
012c437d 251 panic("Fatal exception");
760ca4dc
AB
252 do_exit(signr);
253}
03465f89 254NOKPROBE_SYMBOL(oops_end);
cea6a4ba 255
03465f89 256static int __die(const char *str, struct pt_regs *regs, long err)
760ca4dc
AB
257{
258 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
2e82ca3c 259
16842516 260 printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
78227443 261 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
18405139 262 PAGE_SIZE / 1024,
16842516
ME
263 early_radix_enabled() ? " MMU=Radix" : "",
264 early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
78227443
ME
265 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
266 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
267 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
268 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
269 IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
270 ppc_md.name ? ppc_md.name : "");
760ca4dc
AB
271
272 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
273 return 1;
274
275 print_modules();
276 show_regs(regs);
14cf11af
PM
277
278 return 0;
279}
03465f89 280NOKPROBE_SYMBOL(__die);
14cf11af 281
760ca4dc
AB
282void die(const char *str, struct pt_regs *regs, long err)
283{
6f44b20e
NP
284 unsigned long flags;
285
d40b6768
NP
286 /*
287 * system_reset_excption handles debugger, crash dump, panic, for 0x100
288 */
289 if (TRAP(regs) != 0x100) {
290 if (debugger(regs))
291 return;
292 }
760ca4dc 293
6f44b20e 294 flags = oops_begin(regs);
760ca4dc
AB
295 if (__die(str, regs, err))
296 err = 0;
297 oops_end(flags, regs, err);
298}
15770a13 299NOKPROBE_SYMBOL(die);
760ca4dc 300
efc463ad 301void user_single_step_report(struct pt_regs *regs)
25baa35b 302{
efc463ad 303 force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip, current);
25baa35b
ON
304}
305
997dd26c
ME
306static void show_signal_msg(int signr, struct pt_regs *regs, int code,
307 unsigned long addr)
35a52a10
MOA
308{
309 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
310 DEFAULT_RATELIMIT_BURST);
35a52a10 311
997dd26c 312 if (!show_unhandled_signals)
35a52a10
MOA
313 return;
314
315 if (!unhandled_signal(current, signr))
316 return;
317
997dd26c
ME
318 if (!__ratelimit(&rs))
319 return;
320
0f642d61
MOA
321 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
322 current->comm, current->pid, signame(signr), signr,
49d8f201 323 addr, regs->nip, regs->link, code);
0f642d61
MOA
324
325 print_vma_addr(KERN_CONT " in ", regs->nip);
326
327 pr_cont("\n");
a99b9c5e
MOA
328
329 show_user_instructions(regs);
658b0f92 330}
99cd1302 331
2c44ce28
EB
332static bool exception_common(int signr, struct pt_regs *regs, int code,
333 unsigned long addr)
14cf11af 334{
14cf11af 335 if (!user_mode(regs)) {
760ca4dc 336 die("Exception in kernel mode", regs, signr);
2c44ce28 337 return false;
760ca4dc
AB
338 }
339
658b0f92 340 show_signal_msg(signr, regs, code, addr);
14cf11af 341
a3512b2d 342 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
9f2f79e3
BH
343 local_irq_enable();
344
41ab5266 345 current->thread.trap_nr = code;
c5cc1f4d
TJB
346
347 /*
348 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
349 * to capture the content, if the task gets killed.
350 */
351 thread_pkey_regs_save(&current->thread);
352
2c44ce28
EB
353 return true;
354}
355
5d8fb8a5 356void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
2c44ce28 357{
5d8fb8a5 358 if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
2c44ce28
EB
359 return;
360
77c70728 361 force_sig_pkuerr((void __user *) addr, key);
14cf11af
PM
362}
363
99cd1302
RP
364void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
365{
c1c7c85c
EB
366 if (!exception_common(signr, regs, code, addr))
367 return;
368
369 force_sig_fault(signr, code, (void __user *)addr, current);
99cd1302
RP
370}
371
ccd47702
NP
372/*
373 * The interrupt architecture has a quirk in that the HV interrupts excluding
374 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
375 * that an interrupt handler must do is save off a GPR into a scratch register,
376 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
377 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
378 * that it is non-reentrant, which leads to random data corruption.
379 *
380 * The solution is for NMI interrupts in HV mode to check if they originated
381 * from these critical HV interrupt regions. If so, then mark them not
382 * recoverable.
383 *
384 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
385 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
386 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
387 * that would work. However any other guest OS that may have the SPRG live
388 * and MSR[RI]=1 could encounter silent corruption.
389 *
390 * Builds that do not support KVM could take this second option to increase
391 * the recoverability of NMIs.
392 */
393void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
394{
395#ifdef CONFIG_PPC_POWERNV
396 unsigned long kbase = (unsigned long)_stext;
397 unsigned long nip = regs->nip;
398
399 if (!(regs->msr & MSR_RI))
400 return;
401 if (!(regs->msr & MSR_HV))
402 return;
403 if (regs->msr & MSR_PR)
404 return;
405
406 /*
407 * Now test if the interrupt has hit a range that may be using
408 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
409 * problem ranges all run un-relocated. Test real and virt modes
410 * at the same time by droping the high bit of the nip (virt mode
411 * entry points still have the +0x4000 offset).
412 */
413 nip &= ~0xc000000000000000ULL;
414 if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
415 goto nonrecoverable;
416 if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
417 goto nonrecoverable;
418 if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
419 goto nonrecoverable;
420 if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
421 goto nonrecoverable;
bd3524fe 422
ccd47702 423 /* Trampoline code runs un-relocated so subtract kbase. */
bd3524fe
NP
424 if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
425 nip < (unsigned long)(end_real_trampolines - kbase))
ccd47702 426 goto nonrecoverable;
bd3524fe
NP
427 if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
428 nip < (unsigned long)(end_virt_trampolines - kbase))
ccd47702
NP
429 goto nonrecoverable;
430 return;
431
432nonrecoverable:
433 regs->msr &= ~MSR_RI;
434#endif
435}
436
14cf11af
PM
437void system_reset_exception(struct pt_regs *regs)
438{
cbf2ba95
NP
439 unsigned long hsrr0, hsrr1;
440 bool nested = in_nmi();
441 bool saved_hsrrs = false;
442
2b4f3ac5
NP
443 /*
444 * Avoid crashes in case of nested NMI exceptions. Recoverability
445 * is determined by RI and in_nmi
446 */
2b4f3ac5
NP
447 if (!nested)
448 nmi_enter();
449
cbf2ba95
NP
450 /*
451 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
452 * The system reset interrupt itself may clobber HSRRs (e.g., to call
453 * OPAL), so save them here and restore them before returning.
454 *
455 * Machine checks don't need to save HSRRs, as the real mode handler
456 * is careful to avoid them, and the regular handler is not delivered
457 * as an NMI.
458 */
459 if (cpu_has_feature(CPU_FTR_HVMODE)) {
460 hsrr0 = mfspr(SPRN_HSRR0);
461 hsrr1 = mfspr(SPRN_HSRR1);
462 saved_hsrrs = true;
463 }
464
ccd47702
NP
465 hv_nmi_check_nonrecoverable(regs);
466
ca41ad43
NP
467 __this_cpu_inc(irq_stat.sreset_irqs);
468
14cf11af 469 /* See if any machine dependent calls */
c902be71
AB
470 if (ppc_md.system_reset_exception) {
471 if (ppc_md.system_reset_exception(regs))
c4f3b52c 472 goto out;
c902be71 473 }
14cf11af 474
4388c9b3
NP
475 if (debugger(regs))
476 goto out;
477
478 /*
479 * A system reset is a request to dump, so we always send
480 * it through the crashdump code (if fadump or kdump are
481 * registered).
482 */
483 crash_fadump(regs, "System Reset");
484
485 crash_kexec(regs);
486
487 /*
488 * We aren't the primary crash CPU. We need to send it
489 * to a holding pattern to avoid it ending up in the panic
490 * code.
491 */
492 crash_kexec_secondary(regs);
493
494 /*
495 * No debugger or crash dump registered, print logs then
496 * panic.
497 */
4552d128 498 die("System Reset", regs, SIGABRT);
4388c9b3
NP
499
500 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
501 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
502 nmi_panic(regs, "System Reset");
14cf11af 503
c4f3b52c
NP
504out:
505#ifdef CONFIG_PPC_BOOK3S_64
506 BUG_ON(get_paca()->in_nmi == 0);
507 if (get_paca()->in_nmi > 1)
4388c9b3 508 nmi_panic(regs, "Unrecoverable nested System Reset");
c4f3b52c 509#endif
14cf11af
PM
510 /* Must die if the interrupt is not recoverable */
511 if (!(regs->msr & MSR_RI))
4388c9b3 512 nmi_panic(regs, "Unrecoverable System Reset");
14cf11af 513
cbf2ba95
NP
514 if (saved_hsrrs) {
515 mtspr(SPRN_HSRR0, hsrr0);
516 mtspr(SPRN_HSRR1, hsrr1);
517 }
518
2b4f3ac5
NP
519 if (!nested)
520 nmi_exit();
521
14cf11af
PM
522 /* What should we do here? We could issue a shutdown or hard reset. */
523}
1e9b4507 524
14cf11af
PM
525/*
526 * I/O accesses can cause machine checks on powermacs.
527 * Check if the NIP corresponds to the address of a sync
528 * instruction for which there is an entry in the exception
529 * table.
530 * Note that the 601 only takes a machine check on TEA
531 * (transfer error ack) signal assertion, and does not
532 * set any of the top 16 bits of SRR1.
533 * -- paulus.
534 */
535static inline int check_io_access(struct pt_regs *regs)
536{
68a64357 537#ifdef CONFIG_PPC32
14cf11af
PM
538 unsigned long msr = regs->msr;
539 const struct exception_table_entry *entry;
540 unsigned int *nip = (unsigned int *)regs->nip;
541
542 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
543 && (entry = search_exception_tables(regs->nip)) != NULL) {
544 /*
545 * Check that it's a sync instruction, or somewhere
546 * in the twi; isync; nop sequence that inb/inw/inl uses.
547 * As the address is in the exception table
548 * we should be able to read the instr there.
549 * For the debug message, we look at the preceding
550 * load or store.
551 */
ddc6cd0d 552 if (*nip == PPC_INST_NOP)
14cf11af 553 nip -= 2;
ddc6cd0d 554 else if (*nip == PPC_INST_ISYNC)
14cf11af 555 --nip;
ddc6cd0d 556 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
14cf11af
PM
557 unsigned int rb;
558
559 --nip;
560 rb = (*nip >> 11) & 0x1f;
561 printk(KERN_DEBUG "%s bad port %lx at %p\n",
562 (*nip & 0x100)? "OUT to": "IN from",
563 regs->gpr[rb] - _IO_BASE, nip);
564 regs->msr |= MSR_RI;
61a92f70 565 regs->nip = extable_fixup(entry);
14cf11af
PM
566 return 1;
567 }
568 }
68a64357 569#endif /* CONFIG_PPC32 */
14cf11af
PM
570 return 0;
571}
572
172ae2e7 573#ifdef CONFIG_PPC_ADV_DEBUG_REGS
14cf11af
PM
574/* On 4xx, the reason for the machine check or program exception
575 is in the ESR. */
576#define get_reason(regs) ((regs)->dsisr)
14cf11af
PM
577#define REASON_FP ESR_FP
578#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
579#define REASON_PRIVILEGED ESR_PPR
580#define REASON_TRAP ESR_PTR
581
582/* single-step stuff */
51ae8d4a
BB
583#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
584#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
0e524e76 585#define clear_br_trace(regs) do {} while(0)
14cf11af
PM
586#else
587/* On non-4xx, the reason for the machine check or program
588 exception is in the MSR. */
589#define get_reason(regs) ((regs)->msr)
d30a5a52
ME
590#define REASON_TM SRR1_PROGTM
591#define REASON_FP SRR1_PROGFPE
592#define REASON_ILLEGAL SRR1_PROGILL
593#define REASON_PRIVILEGED SRR1_PROGPRIV
594#define REASON_TRAP SRR1_PROGTRAP
14cf11af
PM
595
596#define single_stepping(regs) ((regs)->msr & MSR_SE)
597#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
0e524e76 598#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
14cf11af
PM
599#endif
600
0d0935b3 601#if defined(CONFIG_E500)
fe04b112
SW
602int machine_check_e500mc(struct pt_regs *regs)
603{
604 unsigned long mcsr = mfspr(SPRN_MCSR);
a4e89ffb 605 unsigned long pvr = mfspr(SPRN_PVR);
fe04b112
SW
606 unsigned long reason = mcsr;
607 int recoverable = 1;
608
82a9a480 609 if (reason & MCSR_LD) {
cce1f106
SX
610 recoverable = fsl_rio_mcheck_exception(regs);
611 if (recoverable == 1)
612 goto silent_out;
613 }
614
fe04b112
SW
615 printk("Machine check in kernel mode.\n");
616 printk("Caused by (from MCSR=%lx): ", reason);
617
618 if (reason & MCSR_MCP)
422123cc 619 pr_cont("Machine Check Signal\n");
fe04b112
SW
620
621 if (reason & MCSR_ICPERR) {
422123cc 622 pr_cont("Instruction Cache Parity Error\n");
fe04b112
SW
623
624 /*
625 * This is recoverable by invalidating the i-cache.
626 */
627 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
628 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
629 ;
630
631 /*
632 * This will generally be accompanied by an instruction
633 * fetch error report -- only treat MCSR_IF as fatal
634 * if it wasn't due to an L1 parity error.
635 */
636 reason &= ~MCSR_IF;
637 }
638
639 if (reason & MCSR_DCPERR_MC) {
422123cc 640 pr_cont("Data Cache Parity Error\n");
37caf9f2
KG
641
642 /*
643 * In write shadow mode we auto-recover from the error, but it
644 * may still get logged and cause a machine check. We should
645 * only treat the non-write shadow case as non-recoverable.
646 */
a4e89ffb
MW
647 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
648 * is not implemented but L1 data cache always runs in write
649 * shadow mode. Hence on data cache parity errors HW will
650 * automatically invalidate the L1 Data Cache.
651 */
652 if (PVR_VER(pvr) != PVR_VER_E6500) {
653 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
654 recoverable = 0;
655 }
fe04b112
SW
656 }
657
658 if (reason & MCSR_L2MMU_MHIT) {
422123cc 659 pr_cont("Hit on multiple TLB entries\n");
fe04b112
SW
660 recoverable = 0;
661 }
662
663 if (reason & MCSR_NMI)
422123cc 664 pr_cont("Non-maskable interrupt\n");
fe04b112
SW
665
666 if (reason & MCSR_IF) {
422123cc 667 pr_cont("Instruction Fetch Error Report\n");
fe04b112
SW
668 recoverable = 0;
669 }
670
671 if (reason & MCSR_LD) {
422123cc 672 pr_cont("Load Error Report\n");
fe04b112
SW
673 recoverable = 0;
674 }
675
676 if (reason & MCSR_ST) {
422123cc 677 pr_cont("Store Error Report\n");
fe04b112
SW
678 recoverable = 0;
679 }
680
681 if (reason & MCSR_LDG) {
422123cc 682 pr_cont("Guarded Load Error Report\n");
fe04b112
SW
683 recoverable = 0;
684 }
685
686 if (reason & MCSR_TLBSYNC)
422123cc 687 pr_cont("Simultaneous tlbsync operations\n");
fe04b112
SW
688
689 if (reason & MCSR_BSL2_ERR) {
422123cc 690 pr_cont("Level 2 Cache Error\n");
fe04b112
SW
691 recoverable = 0;
692 }
693
694 if (reason & MCSR_MAV) {
695 u64 addr;
696
697 addr = mfspr(SPRN_MCAR);
698 addr |= (u64)mfspr(SPRN_MCARU) << 32;
699
422123cc 700 pr_cont("Machine Check %s Address: %#llx\n",
fe04b112
SW
701 reason & MCSR_MEA ? "Effective" : "Physical", addr);
702 }
703
cce1f106 704silent_out:
fe04b112
SW
705 mtspr(SPRN_MCSR, mcsr);
706 return mfspr(SPRN_MCSR) == 0 && recoverable;
707}
708
47c0bd1a
BH
709int machine_check_e500(struct pt_regs *regs)
710{
42bff234 711 unsigned long reason = mfspr(SPRN_MCSR);
47c0bd1a 712
cce1f106
SX
713 if (reason & MCSR_BUS_RBERR) {
714 if (fsl_rio_mcheck_exception(regs))
715 return 1;
4e0e3435
HJ
716 if (fsl_pci_mcheck_exception(regs))
717 return 1;
cce1f106
SX
718 }
719
14cf11af
PM
720 printk("Machine check in kernel mode.\n");
721 printk("Caused by (from MCSR=%lx): ", reason);
722
723 if (reason & MCSR_MCP)
422123cc 724 pr_cont("Machine Check Signal\n");
14cf11af 725 if (reason & MCSR_ICPERR)
422123cc 726 pr_cont("Instruction Cache Parity Error\n");
14cf11af 727 if (reason & MCSR_DCP_PERR)
422123cc 728 pr_cont("Data Cache Push Parity Error\n");
14cf11af 729 if (reason & MCSR_DCPERR)
422123cc 730 pr_cont("Data Cache Parity Error\n");
14cf11af 731 if (reason & MCSR_BUS_IAERR)
422123cc 732 pr_cont("Bus - Instruction Address Error\n");
14cf11af 733 if (reason & MCSR_BUS_RAERR)
422123cc 734 pr_cont("Bus - Read Address Error\n");
14cf11af 735 if (reason & MCSR_BUS_WAERR)
422123cc 736 pr_cont("Bus - Write Address Error\n");
14cf11af 737 if (reason & MCSR_BUS_IBERR)
422123cc 738 pr_cont("Bus - Instruction Data Error\n");
14cf11af 739 if (reason & MCSR_BUS_RBERR)
422123cc 740 pr_cont("Bus - Read Data Bus Error\n");
14cf11af 741 if (reason & MCSR_BUS_WBERR)
422123cc 742 pr_cont("Bus - Write Data Bus Error\n");
14cf11af 743 if (reason & MCSR_BUS_IPERR)
422123cc 744 pr_cont("Bus - Instruction Parity Error\n");
14cf11af 745 if (reason & MCSR_BUS_RPERR)
422123cc 746 pr_cont("Bus - Read Parity Error\n");
47c0bd1a
BH
747
748 return 0;
749}
4490c06b
KG
750
751int machine_check_generic(struct pt_regs *regs)
752{
753 return 0;
754}
47c0bd1a
BH
755#elif defined(CONFIG_E200)
756int machine_check_e200(struct pt_regs *regs)
757{
42bff234 758 unsigned long reason = mfspr(SPRN_MCSR);
47c0bd1a 759
14cf11af
PM
760 printk("Machine check in kernel mode.\n");
761 printk("Caused by (from MCSR=%lx): ", reason);
762
763 if (reason & MCSR_MCP)
422123cc 764 pr_cont("Machine Check Signal\n");
14cf11af 765 if (reason & MCSR_CP_PERR)
422123cc 766 pr_cont("Cache Push Parity Error\n");
14cf11af 767 if (reason & MCSR_CPERR)
422123cc 768 pr_cont("Cache Parity Error\n");
14cf11af 769 if (reason & MCSR_EXCP_ERR)
422123cc 770 pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
14cf11af 771 if (reason & MCSR_BUS_IRERR)
422123cc 772 pr_cont("Bus - Read Bus Error on instruction fetch\n");
14cf11af 773 if (reason & MCSR_BUS_DRERR)
422123cc 774 pr_cont("Bus - Read Bus Error on data load\n");
14cf11af 775 if (reason & MCSR_BUS_WRERR)
422123cc 776 pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
47c0bd1a
BH
777
778 return 0;
779}
7f3f819e 780#elif defined(CONFIG_PPC32)
47c0bd1a
BH
781int machine_check_generic(struct pt_regs *regs)
782{
42bff234 783 unsigned long reason = regs->msr;
47c0bd1a 784
14cf11af
PM
785 printk("Machine check in kernel mode.\n");
786 printk("Caused by (from SRR1=%lx): ", reason);
787 switch (reason & 0x601F0000) {
788 case 0x80000:
422123cc 789 pr_cont("Machine check signal\n");
14cf11af
PM
790 break;
791 case 0: /* for 601 */
792 case 0x40000:
793 case 0x140000: /* 7450 MSS error and TEA */
422123cc 794 pr_cont("Transfer error ack signal\n");
14cf11af
PM
795 break;
796 case 0x20000:
422123cc 797 pr_cont("Data parity error signal\n");
14cf11af
PM
798 break;
799 case 0x10000:
422123cc 800 pr_cont("Address parity error signal\n");
14cf11af
PM
801 break;
802 case 0x20000000:
422123cc 803 pr_cont("L1 Data Cache error\n");
14cf11af
PM
804 break;
805 case 0x40000000:
422123cc 806 pr_cont("L1 Instruction Cache error\n");
14cf11af
PM
807 break;
808 case 0x00100000:
422123cc 809 pr_cont("L2 data cache parity error\n");
14cf11af
PM
810 break;
811 default:
422123cc 812 pr_cont("Unknown values in msr\n");
14cf11af 813 }
75918a4b
OJ
814 return 0;
815}
47c0bd1a 816#endif /* everything else */
75918a4b
OJ
817
818void machine_check_exception(struct pt_regs *regs)
819{
820 int recover = 0;
b96672dd
NP
821 bool nested = in_nmi();
822 if (!nested)
823 nmi_enter();
75918a4b 824
8a03e81c 825 __this_cpu_inc(irq_stat.mce_exceptions);
89713ed1 826
d93b0ac0
MS
827 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
828
47c0bd1a
BH
829 /* See if any machine dependent calls. In theory, we would want
830 * to call the CPU first, and call the ppc_md. one if the CPU
831 * one returns a positive number. However there is existing code
832 * that assumes the board gets a first chance, so let's keep it
833 * that way for now and fix things later. --BenH.
834 */
75918a4b
OJ
835 if (ppc_md.machine_check_exception)
836 recover = ppc_md.machine_check_exception(regs);
47c0bd1a
BH
837 else if (cur_cpu_spec->machine_check)
838 recover = cur_cpu_spec->machine_check(regs);
75918a4b 839
47c0bd1a 840 if (recover > 0)
ba12eede 841 goto bail;
75918a4b 842
a443506b 843 if (debugger_fault_handler(regs))
ba12eede 844 goto bail;
75918a4b
OJ
845
846 if (check_io_access(regs))
ba12eede 847 goto bail;
75918a4b 848
daf00ae7
CL
849 if (!nested)
850 nmi_exit();
851
852 die("Machine check", regs, SIGBUS);
853
0bbea75c
CL
854 /* Must die if the interrupt is not recoverable */
855 if (!(regs->msr & MSR_RI))
856 nmi_panic(regs, "Unrecoverable Machine check");
857
daf00ae7
CL
858 return;
859
ba12eede 860bail:
b96672dd
NP
861 if (!nested)
862 nmi_exit();
14cf11af
PM
863}
864
865void SMIException(struct pt_regs *regs)
866{
867 die("System Management Interrupt", regs, SIGABRT);
868}
869
5080332c
MN
870#ifdef CONFIG_VSX
871static void p9_hmi_special_emu(struct pt_regs *regs)
872{
873 unsigned int ra, rb, t, i, sel, instr, rc;
874 const void __user *addr;
875 u8 vbuf[16], *vdst;
876 unsigned long ea, msr, msr_mask;
877 bool swap;
878
879 if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
880 return;
881
882 /*
883 * lxvb16x opcode: 0x7c0006d8
884 * lxvd2x opcode: 0x7c000698
885 * lxvh8x opcode: 0x7c000658
886 * lxvw4x opcode: 0x7c000618
887 */
888 if ((instr & 0xfc00073e) != 0x7c000618) {
889 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
890 " instr=%08x\n",
891 smp_processor_id(), current->comm, current->pid,
892 regs->nip, instr);
893 return;
894 }
895
896 /* Grab vector registers into the task struct */
897 msr = regs->msr; /* Grab msr before we flush the bits */
898 flush_vsx_to_thread(current);
899 enable_kernel_altivec();
900
901 /*
902 * Is userspace running with a different endian (this is rare but
903 * not impossible)
904 */
905 swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
906
907 /* Decode the instruction */
908 ra = (instr >> 16) & 0x1f;
909 rb = (instr >> 11) & 0x1f;
910 t = (instr >> 21) & 0x1f;
911 if (instr & 1)
912 vdst = (u8 *)&current->thread.vr_state.vr[t];
913 else
914 vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
915
916 /* Grab the vector address */
917 ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
918 if (is_32bit_task())
919 ea &= 0xfffffffful;
920 addr = (__force const void __user *)ea;
921
922 /* Check it */
96d4f267 923 if (!access_ok(addr, 16)) {
5080332c
MN
924 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
925 " instr=%08x addr=%016lx\n",
926 smp_processor_id(), current->comm, current->pid,
927 regs->nip, instr, (unsigned long)addr);
928 return;
929 }
930
931 /* Read the vector */
932 rc = 0;
933 if ((unsigned long)addr & 0xfUL)
934 /* unaligned case */
935 rc = __copy_from_user_inatomic(vbuf, addr, 16);
936 else
937 __get_user_atomic_128_aligned(vbuf, addr, rc);
938 if (rc) {
939 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
940 " instr=%08x addr=%016lx\n",
941 smp_processor_id(), current->comm, current->pid,
942 regs->nip, instr, (unsigned long)addr);
943 return;
944 }
945
946 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
947 " instr=%08x addr=%016lx\n",
948 smp_processor_id(), current->comm, current->pid, regs->nip,
949 instr, (unsigned long) addr);
950
951 /* Grab instruction "selector" */
952 sel = (instr >> 6) & 3;
953
954 /*
955 * Check to make sure the facility is actually enabled. This
956 * could happen if we get a false positive hit.
957 *
958 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
959 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
960 */
961 msr_mask = MSR_VSX;
962 if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
963 msr_mask = MSR_VEC;
964 if (!(msr & msr_mask)) {
965 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
966 " instr=%08x msr:%016lx\n",
967 smp_processor_id(), current->comm, current->pid,
968 regs->nip, instr, msr);
969 return;
970 }
971
972 /* Do logging here before we modify sel based on endian */
973 switch (sel) {
974 case 0: /* lxvw4x */
975 PPC_WARN_EMULATED(lxvw4x, regs);
976 break;
977 case 1: /* lxvh8x */
978 PPC_WARN_EMULATED(lxvh8x, regs);
979 break;
980 case 2: /* lxvd2x */
981 PPC_WARN_EMULATED(lxvd2x, regs);
982 break;
983 case 3: /* lxvb16x */
984 PPC_WARN_EMULATED(lxvb16x, regs);
985 break;
986 }
987
988#ifdef __LITTLE_ENDIAN__
989 /*
990 * An LE kernel stores the vector in the task struct as an LE
991 * byte array (effectively swapping both the components and
992 * the content of the components). Those instructions expect
993 * the components to remain in ascending address order, so we
994 * swap them back.
995 *
996 * If we are running a BE user space, the expectation is that
997 * of a simple memcpy, so forcing the emulation to look like
998 * a lxvb16x should do the trick.
999 */
1000 if (swap)
1001 sel = 3;
1002
1003 switch (sel) {
1004 case 0: /* lxvw4x */
1005 for (i = 0; i < 4; i++)
1006 ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
1007 break;
1008 case 1: /* lxvh8x */
1009 for (i = 0; i < 8; i++)
1010 ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
1011 break;
1012 case 2: /* lxvd2x */
1013 for (i = 0; i < 2; i++)
1014 ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
1015 break;
1016 case 3: /* lxvb16x */
1017 for (i = 0; i < 16; i++)
1018 vdst[i] = vbuf[15-i];
1019 break;
1020 }
1021#else /* __LITTLE_ENDIAN__ */
1022 /* On a big endian kernel, a BE userspace only needs a memcpy */
1023 if (!swap)
1024 sel = 3;
1025
1026 /* Otherwise, we need to swap the content of the components */
1027 switch (sel) {
1028 case 0: /* lxvw4x */
1029 for (i = 0; i < 4; i++)
1030 ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
1031 break;
1032 case 1: /* lxvh8x */
1033 for (i = 0; i < 8; i++)
1034 ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
1035 break;
1036 case 2: /* lxvd2x */
1037 for (i = 0; i < 2; i++)
1038 ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
1039 break;
1040 case 3: /* lxvb16x */
1041 memcpy(vdst, vbuf, 16);
1042 break;
1043 }
1044#endif /* !__LITTLE_ENDIAN__ */
1045
1046 /* Go to next instruction */
1047 regs->nip += 4;
1048}
1049#endif /* CONFIG_VSX */
1050
0869b6fd
MS
1051void handle_hmi_exception(struct pt_regs *regs)
1052{
1053 struct pt_regs *old_regs;
1054
1055 old_regs = set_irq_regs(regs);
1056 irq_enter();
1057
5080332c
MN
1058#ifdef CONFIG_VSX
1059 /* Real mode flagged P9 special emu is needed */
1060 if (local_paca->hmi_p9_special_emu) {
1061 local_paca->hmi_p9_special_emu = 0;
1062
1063 /*
1064 * We don't want to take page faults while doing the
1065 * emulation, we just replay the instruction if necessary.
1066 */
1067 pagefault_disable();
1068 p9_hmi_special_emu(regs);
1069 pagefault_enable();
1070 }
1071#endif /* CONFIG_VSX */
1072
0869b6fd
MS
1073 if (ppc_md.handle_hmi_exception)
1074 ppc_md.handle_hmi_exception(regs);
1075
1076 irq_exit();
1077 set_irq_regs(old_regs);
1078}
1079
dc1c1ca3 1080void unknown_exception(struct pt_regs *regs)
14cf11af 1081{
ba12eede
LZ
1082 enum ctx_state prev_state = exception_enter();
1083
14cf11af
PM
1084 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1085 regs->nip, regs->msr, regs->trap);
1086
e821fa42 1087 _exception(SIGTRAP, regs, TRAP_UNK, 0);
ba12eede
LZ
1088
1089 exception_exit(prev_state);
14cf11af
PM
1090}
1091
dc1c1ca3 1092void instruction_breakpoint_exception(struct pt_regs *regs)
14cf11af 1093{
ba12eede
LZ
1094 enum ctx_state prev_state = exception_enter();
1095
14cf11af
PM
1096 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1097 5, SIGTRAP) == NOTIFY_STOP)
ba12eede 1098 goto bail;
14cf11af 1099 if (debugger_iabr_match(regs))
ba12eede 1100 goto bail;
14cf11af 1101 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
ba12eede
LZ
1102
1103bail:
1104 exception_exit(prev_state);
14cf11af
PM
1105}
1106
1107void RunModeException(struct pt_regs *regs)
1108{
e821fa42 1109 _exception(SIGTRAP, regs, TRAP_UNK, 0);
14cf11af
PM
1110}
1111
03465f89 1112void single_step_exception(struct pt_regs *regs)
14cf11af 1113{
ba12eede
LZ
1114 enum ctx_state prev_state = exception_enter();
1115
2538c2d0 1116 clear_single_step(regs);
0e524e76 1117 clear_br_trace(regs);
14cf11af 1118
6cc89bad
NR
1119 if (kprobe_post_handler(regs))
1120 return;
1121
14cf11af
PM
1122 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1123 5, SIGTRAP) == NOTIFY_STOP)
ba12eede 1124 goto bail;
14cf11af 1125 if (debugger_sstep(regs))
ba12eede 1126 goto bail;
14cf11af
PM
1127
1128 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
ba12eede
LZ
1129
1130bail:
1131 exception_exit(prev_state);
14cf11af 1132}
03465f89 1133NOKPROBE_SYMBOL(single_step_exception);
14cf11af
PM
1134
1135/*
1136 * After we have successfully emulated an instruction, we have to
1137 * check if the instruction was being single-stepped, and if so,
1138 * pretend we got a single-step exception. This was pointed out
1139 * by Kumar Gala. -- paulus
1140 */
8dad3f92 1141static void emulate_single_step(struct pt_regs *regs)
14cf11af 1142{
2538c2d0
P
1143 if (single_stepping(regs))
1144 single_step_exception(regs);
14cf11af
PM
1145}
1146
5fad293b 1147static inline int __parse_fpscr(unsigned long fpscr)
dc1c1ca3 1148{
aeb1c0f6 1149 int ret = FPE_FLTUNK;
dc1c1ca3
SR
1150
1151 /* Invalid operation */
1152 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
5fad293b 1153 ret = FPE_FLTINV;
dc1c1ca3
SR
1154
1155 /* Overflow */
1156 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
5fad293b 1157 ret = FPE_FLTOVF;
dc1c1ca3
SR
1158
1159 /* Underflow */
1160 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
5fad293b 1161 ret = FPE_FLTUND;
dc1c1ca3
SR
1162
1163 /* Divide by zero */
1164 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
5fad293b 1165 ret = FPE_FLTDIV;
dc1c1ca3
SR
1166
1167 /* Inexact result */
1168 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
5fad293b
KG
1169 ret = FPE_FLTRES;
1170
1171 return ret;
1172}
1173
1174static void parse_fpe(struct pt_regs *regs)
1175{
1176 int code = 0;
1177
1178 flush_fp_to_thread(current);
1179
de79f7b9 1180 code = __parse_fpscr(current->thread.fp_state.fpscr);
dc1c1ca3
SR
1181
1182 _exception(SIGFPE, regs, code, regs->nip);
1183}
1184
1185/*
1186 * Illegal instruction emulation support. Originally written to
14cf11af
PM
1187 * provide the PVR to user applications using the mfspr rd, PVR.
1188 * Return non-zero if we can't emulate, or -EFAULT if the associated
1189 * memory access caused an access fault. Return zero on success.
1190 *
1191 * There are a couple of ways to do this, either "decode" the instruction
1192 * or directly match lots of bits. In this case, matching lots of
1193 * bits is faster and easier.
86417780 1194 *
14cf11af 1195 */
14cf11af
PM
1196static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1197{
1198 u8 rT = (instword >> 21) & 0x1f;
1199 u8 rA = (instword >> 16) & 0x1f;
1200 u8 NB_RB = (instword >> 11) & 0x1f;
1201 u32 num_bytes;
1202 unsigned long EA;
1203 int pos = 0;
1204
1205 /* Early out if we are an invalid form of lswx */
16c57b36 1206 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
14cf11af
PM
1207 if ((rT == rA) || (rT == NB_RB))
1208 return -EINVAL;
1209
1210 EA = (rA == 0) ? 0 : regs->gpr[rA];
1211
16c57b36
KG
1212 switch (instword & PPC_INST_STRING_MASK) {
1213 case PPC_INST_LSWX:
1214 case PPC_INST_STSWX:
14cf11af
PM
1215 EA += NB_RB;
1216 num_bytes = regs->xer & 0x7f;
1217 break;
16c57b36
KG
1218 case PPC_INST_LSWI:
1219 case PPC_INST_STSWI:
14cf11af
PM
1220 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1221 break;
1222 default:
1223 return -EINVAL;
1224 }
1225
1226 while (num_bytes != 0)
1227 {
1228 u8 val;
1229 u32 shift = 8 * (3 - (pos & 0x3));
1230
80aa0fb4
JY
1231 /* if process is 32-bit, clear upper 32 bits of EA */
1232 if ((regs->msr & MSR_64BIT) == 0)
1233 EA &= 0xFFFFFFFF;
1234
16c57b36
KG
1235 switch ((instword & PPC_INST_STRING_MASK)) {
1236 case PPC_INST_LSWX:
1237 case PPC_INST_LSWI:
14cf11af
PM
1238 if (get_user(val, (u8 __user *)EA))
1239 return -EFAULT;
1240 /* first time updating this reg,
1241 * zero it out */
1242 if (pos == 0)
1243 regs->gpr[rT] = 0;
1244 regs->gpr[rT] |= val << shift;
1245 break;
16c57b36
KG
1246 case PPC_INST_STSWI:
1247 case PPC_INST_STSWX:
14cf11af
PM
1248 val = regs->gpr[rT] >> shift;
1249 if (put_user(val, (u8 __user *)EA))
1250 return -EFAULT;
1251 break;
1252 }
1253 /* move EA to next address */
1254 EA += 1;
1255 num_bytes--;
1256
1257 /* manage our position within the register */
1258 if (++pos == 4) {
1259 pos = 0;
1260 if (++rT == 32)
1261 rT = 0;
1262 }
1263 }
1264
1265 return 0;
1266}
1267
c3412dcb
WS
1268static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1269{
1270 u32 ra,rs;
1271 unsigned long tmp;
1272
1273 ra = (instword >> 16) & 0x1f;
1274 rs = (instword >> 21) & 0x1f;
1275
1276 tmp = regs->gpr[rs];
1277 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1278 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1279 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1280 regs->gpr[ra] = tmp;
1281
1282 return 0;
1283}
1284
c1469f13
KG
1285static int emulate_isel(struct pt_regs *regs, u32 instword)
1286{
1287 u8 rT = (instword >> 21) & 0x1f;
1288 u8 rA = (instword >> 16) & 0x1f;
1289 u8 rB = (instword >> 11) & 0x1f;
1290 u8 BC = (instword >> 6) & 0x1f;
1291 u8 bit;
1292 unsigned long tmp;
1293
1294 tmp = (rA == 0) ? 0 : regs->gpr[rA];
1295 bit = (regs->ccr >> (31 - BC)) & 0x1;
1296
1297 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1298
1299 return 0;
1300}
1301
6ce6c629
MN
1302#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1303static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1304{
1305 /* If we're emulating a load/store in an active transaction, we cannot
1306 * emulate it as the kernel operates in transaction suspended context.
1307 * We need to abort the transaction. This creates a persistent TM
1308 * abort so tell the user what caused it with a new code.
1309 */
1310 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1311 tm_enable();
1312 tm_abort(cause);
1313 return true;
1314 }
1315 return false;
1316}
1317#else
1318static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1319{
1320 return false;
1321}
1322#endif
1323
14cf11af
PM
1324static int emulate_instruction(struct pt_regs *regs)
1325{
1326 u32 instword;
1327 u32 rd;
1328
4288e343 1329 if (!user_mode(regs))
14cf11af
PM
1330 return -EINVAL;
1331 CHECK_FULL_REGS(regs);
1332
1333 if (get_user(instword, (u32 __user *)(regs->nip)))
1334 return -EFAULT;
1335
1336 /* Emulate the mfspr rD, PVR. */
16c57b36 1337 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
eecff81d 1338 PPC_WARN_EMULATED(mfpvr, regs);
14cf11af
PM
1339 rd = (instword >> 21) & 0x1f;
1340 regs->gpr[rd] = mfspr(SPRN_PVR);
1341 return 0;
1342 }
1343
1344 /* Emulating the dcba insn is just a no-op. */
80947e7c 1345 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
eecff81d 1346 PPC_WARN_EMULATED(dcba, regs);
14cf11af 1347 return 0;
80947e7c 1348 }
14cf11af
PM
1349
1350 /* Emulate the mcrxr insn. */
16c57b36 1351 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
86417780 1352 int shift = (instword >> 21) & 0x1c;
14cf11af
PM
1353 unsigned long msk = 0xf0000000UL >> shift;
1354
eecff81d 1355 PPC_WARN_EMULATED(mcrxr, regs);
14cf11af
PM
1356 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1357 regs->xer &= ~0xf0000000UL;
1358 return 0;
1359 }
1360
1361 /* Emulate load/store string insn. */
80947e7c 1362 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
6ce6c629
MN
1363 if (tm_abort_check(regs,
1364 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1365 return -EINVAL;
eecff81d 1366 PPC_WARN_EMULATED(string, regs);
14cf11af 1367 return emulate_string_inst(regs, instword);
80947e7c 1368 }
14cf11af 1369
c3412dcb 1370 /* Emulate the popcntb (Population Count Bytes) instruction. */
16c57b36 1371 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
eecff81d 1372 PPC_WARN_EMULATED(popcntb, regs);
c3412dcb
WS
1373 return emulate_popcntb_inst(regs, instword);
1374 }
1375
c1469f13 1376 /* Emulate isel (Integer Select) instruction */
16c57b36 1377 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
eecff81d 1378 PPC_WARN_EMULATED(isel, regs);
c1469f13
KG
1379 return emulate_isel(regs, instword);
1380 }
1381
9863c28a
JY
1382 /* Emulate sync instruction variants */
1383 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1384 PPC_WARN_EMULATED(sync, regs);
1385 asm volatile("sync");
1386 return 0;
1387 }
1388
efcac658
AK
1389#ifdef CONFIG_PPC64
1390 /* Emulate the mfspr rD, DSCR. */
73d2fb75
AB
1391 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1392 PPC_INST_MFSPR_DSCR_USER) ||
1393 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1394 PPC_INST_MFSPR_DSCR)) &&
efcac658
AK
1395 cpu_has_feature(CPU_FTR_DSCR)) {
1396 PPC_WARN_EMULATED(mfdscr, regs);
1397 rd = (instword >> 21) & 0x1f;
1398 regs->gpr[rd] = mfspr(SPRN_DSCR);
1399 return 0;
1400 }
1401 /* Emulate the mtspr DSCR, rD. */
73d2fb75
AB
1402 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1403 PPC_INST_MTSPR_DSCR_USER) ||
1404 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1405 PPC_INST_MTSPR_DSCR)) &&
efcac658
AK
1406 cpu_has_feature(CPU_FTR_DSCR)) {
1407 PPC_WARN_EMULATED(mtdscr, regs);
1408 rd = (instword >> 21) & 0x1f;
00ca0de0 1409 current->thread.dscr = regs->gpr[rd];
efcac658 1410 current->thread.dscr_inherit = 1;
00ca0de0 1411 mtspr(SPRN_DSCR, current->thread.dscr);
efcac658
AK
1412 return 0;
1413 }
1414#endif
1415
14cf11af
PM
1416 return -EINVAL;
1417}
1418
73c9ceab 1419int is_valid_bugaddr(unsigned long addr)
14cf11af 1420{
73c9ceab 1421 return is_kernel_addr(addr);
14cf11af
PM
1422}
1423
3a3b5aa6
KH
1424#ifdef CONFIG_MATH_EMULATION
1425static int emulate_math(struct pt_regs *regs)
1426{
1427 int ret;
1428 extern int do_mathemu(struct pt_regs *regs);
1429
1430 ret = do_mathemu(regs);
1431 if (ret >= 0)
1432 PPC_WARN_EMULATED(math, regs);
1433
1434 switch (ret) {
1435 case 0:
1436 emulate_single_step(regs);
1437 return 0;
1438 case 1: {
1439 int code = 0;
de79f7b9 1440 code = __parse_fpscr(current->thread.fp_state.fpscr);
3a3b5aa6
KH
1441 _exception(SIGFPE, regs, code, regs->nip);
1442 return 0;
1443 }
1444 case -EFAULT:
1445 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1446 return 0;
1447 }
1448
1449 return -1;
1450}
1451#else
1452static inline int emulate_math(struct pt_regs *regs) { return -1; }
1453#endif
1454
03465f89 1455void program_check_exception(struct pt_regs *regs)
14cf11af 1456{
ba12eede 1457 enum ctx_state prev_state = exception_enter();
14cf11af 1458 unsigned int reason = get_reason(regs);
14cf11af 1459
aa42c69c 1460 /* We can now get here via a FP Unavailable exception if the core
04903a30 1461 * has no FPU, in that case the reason flags will be 0 */
14cf11af 1462
dc1c1ca3
SR
1463 if (reason & REASON_FP) {
1464 /* IEEE FP exception */
1465 parse_fpe(regs);
ba12eede 1466 goto bail;
8dad3f92
PM
1467 }
1468 if (reason & REASON_TRAP) {
a4c3f909 1469 unsigned long bugaddr;
ba797b28
JW
1470 /* Debugger is first in line to stop recursive faults in
1471 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1472 if (debugger_bpt(regs))
ba12eede 1473 goto bail;
ba797b28 1474
6cc89bad
NR
1475 if (kprobe_handler(regs))
1476 goto bail;
1477
14cf11af 1478 /* trap exception */
dc1c1ca3
SR
1479 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1480 == NOTIFY_STOP)
ba12eede 1481 goto bail;
73c9ceab 1482
a4c3f909
BS
1483 bugaddr = regs->nip;
1484 /*
1485 * Fixup bugaddr for BUG_ON() in real mode
1486 */
1487 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1488 bugaddr += PAGE_OFFSET;
1489
73c9ceab 1490 if (!(regs->msr & MSR_PR) && /* not user-mode */
a4c3f909 1491 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
14cf11af 1492 regs->nip += 4;
ba12eede 1493 goto bail;
14cf11af 1494 }
8dad3f92 1495 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
ba12eede 1496 goto bail;
8dad3f92 1497 }
bc2a9408
MN
1498#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1499 if (reason & REASON_TM) {
1500 /* This is a TM "Bad Thing Exception" program check.
1501 * This occurs when:
1502 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1503 * transition in TM states.
1504 * - A trechkpt is attempted when transactional.
1505 * - A treclaim is attempted when non transactional.
1506 * - A tend is illegally attempted.
1507 * - writing a TM SPR when transactional.
632f0574
ME
1508 *
1509 * If usermode caused this, it's done something illegal and
bc2a9408
MN
1510 * gets a SIGILL slap on the wrist. We call it an illegal
1511 * operand to distinguish from the instruction just being bad
1512 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1513 * illegal /placement/ of a valid instruction.
1514 */
1515 if (user_mode(regs)) {
1516 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
ba12eede 1517 goto bail;
bc2a9408
MN
1518 } else {
1519 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
11be3958
BL
1520 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1521 regs->nip, regs->msr, get_paca()->tm_scratch);
bc2a9408
MN
1522 die("Unrecoverable exception", regs, SIGABRT);
1523 }
1524 }
1525#endif
8dad3f92 1526
b3f6a459
ME
1527 /*
1528 * If we took the program check in the kernel skip down to sending a
1529 * SIGILL. The subsequent cases all relate to emulating instructions
1530 * which we should only do for userspace. We also do not want to enable
1531 * interrupts for kernel faults because that might lead to further
1532 * faults, and loose the context of the original exception.
1533 */
1534 if (!user_mode(regs))
1535 goto sigill;
1536
a3512b2d
BH
1537 /* We restore the interrupt state now */
1538 if (!arch_irq_disabled_regs(regs))
1539 local_irq_enable();
cd8a5673 1540
04903a30
KG
1541 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1542 * but there seems to be a hardware bug on the 405GP (RevD)
1543 * that means ESR is sometimes set incorrectly - either to
1544 * ESR_DST (!?) or 0. In the process of chasing this with the
1545 * hardware people - not sure if it can happen on any illegal
1546 * instruction or only on FP instructions, whether there is a
4e63f8ed
BH
1547 * pattern to occurrences etc. -dgibson 31/Mar/2003
1548 */
3a3b5aa6 1549 if (!emulate_math(regs))
ba12eede 1550 goto bail;
04903a30 1551
8dad3f92
PM
1552 /* Try to emulate it if we should. */
1553 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
14cf11af
PM
1554 switch (emulate_instruction(regs)) {
1555 case 0:
1556 regs->nip += 4;
1557 emulate_single_step(regs);
ba12eede 1558 goto bail;
14cf11af
PM
1559 case -EFAULT:
1560 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
ba12eede 1561 goto bail;
14cf11af
PM
1562 }
1563 }
8dad3f92 1564
b3f6a459 1565sigill:
8dad3f92
PM
1566 if (reason & REASON_PRIVILEGED)
1567 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1568 else
1569 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
ba12eede
LZ
1570
1571bail:
1572 exception_exit(prev_state);
14cf11af 1573}
03465f89 1574NOKPROBE_SYMBOL(program_check_exception);
14cf11af 1575
bf593907
PM
1576/*
1577 * This occurs when running in hypervisor mode on POWER6 or later
1578 * and an illegal instruction is encountered.
1579 */
03465f89 1580void emulation_assist_interrupt(struct pt_regs *regs)
bf593907
PM
1581{
1582 regs->msr |= REASON_ILLEGAL;
1583 program_check_exception(regs);
1584}
03465f89 1585NOKPROBE_SYMBOL(emulation_assist_interrupt);
bf593907 1586
dc1c1ca3 1587void alignment_exception(struct pt_regs *regs)
14cf11af 1588{
ba12eede 1589 enum ctx_state prev_state = exception_enter();
4393c4f6 1590 int sig, code, fixed = 0;
14cf11af 1591
a3512b2d
BH
1592 /* We restore the interrupt state now */
1593 if (!arch_irq_disabled_regs(regs))
1594 local_irq_enable();
1595
6ce6c629
MN
1596 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1597 goto bail;
1598
e9370ae1
PM
1599 /* we don't implement logging of alignment exceptions */
1600 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1601 fixed = fix_alignment(regs);
14cf11af
PM
1602
1603 if (fixed == 1) {
1604 regs->nip += 4; /* skip over emulated instruction */
1605 emulate_single_step(regs);
ba12eede 1606 goto bail;
14cf11af
PM
1607 }
1608
dc1c1ca3 1609 /* Operand address was bad */
14cf11af 1610 if (fixed == -EFAULT) {
4393c4f6
BH
1611 sig = SIGSEGV;
1612 code = SEGV_ACCERR;
1613 } else {
1614 sig = SIGBUS;
1615 code = BUS_ADRALN;
14cf11af 1616 }
4393c4f6
BH
1617 if (user_mode(regs))
1618 _exception(sig, regs, code, regs->dar);
1619 else
1620 bad_page_fault(regs, regs->dar, sig);
ba12eede
LZ
1621
1622bail:
1623 exception_exit(prev_state);
14cf11af
PM
1624}
1625
1626void StackOverflow(struct pt_regs *regs)
1627{
9bf3d3c4
CL
1628 pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
1629 current->comm, task_pid_nr(current), regs->gpr[1]);
14cf11af
PM
1630 debugger(regs);
1631 show_regs(regs);
1632 panic("kernel stack overflow");
1633}
1634
dc1c1ca3
SR
1635void kernel_fp_unavailable_exception(struct pt_regs *regs)
1636{
ba12eede
LZ
1637 enum ctx_state prev_state = exception_enter();
1638
dc1c1ca3
SR
1639 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1640 "%lx at %lx\n", regs->trap, regs->nip);
1641 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
ba12eede
LZ
1642
1643 exception_exit(prev_state);
dc1c1ca3 1644}
dc1c1ca3
SR
1645
1646void altivec_unavailable_exception(struct pt_regs *regs)
1647{
ba12eede
LZ
1648 enum ctx_state prev_state = exception_enter();
1649
dc1c1ca3
SR
1650 if (user_mode(regs)) {
1651 /* A user program has executed an altivec instruction,
1652 but this kernel doesn't support altivec. */
1653 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
ba12eede 1654 goto bail;
dc1c1ca3 1655 }
6c4841c2 1656
dc1c1ca3
SR
1657 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1658 "%lx at %lx\n", regs->trap, regs->nip);
1659 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
ba12eede
LZ
1660
1661bail:
1662 exception_exit(prev_state);
dc1c1ca3
SR
1663}
1664
ce48b210
MN
1665void vsx_unavailable_exception(struct pt_regs *regs)
1666{
1667 if (user_mode(regs)) {
1668 /* A user program has executed an vsx instruction,
1669 but this kernel doesn't support vsx. */
1670 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1671 return;
1672 }
1673
1674 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1675 "%lx at %lx\n", regs->trap, regs->nip);
1676 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1677}
1678
2517617e 1679#ifdef CONFIG_PPC64
172f7aaa
CB
1680static void tm_unavailable(struct pt_regs *regs)
1681{
5d176f75
CB
1682#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1683 if (user_mode(regs)) {
1684 current->thread.load_tm++;
1685 regs->msr |= MSR_TM;
1686 tm_enable();
1687 tm_restore_sprs(&current->thread);
1688 return;
1689 }
1690#endif
172f7aaa
CB
1691 pr_emerg("Unrecoverable TM Unavailable Exception "
1692 "%lx at %lx\n", regs->trap, regs->nip);
1693 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1694}
1695
021424a1 1696void facility_unavailable_exception(struct pt_regs *regs)
d0c0c9a1 1697{
021424a1 1698 static char *facility_strings[] = {
2517617e
MN
1699 [FSCR_FP_LG] = "FPU",
1700 [FSCR_VECVSX_LG] = "VMX/VSX",
1701 [FSCR_DSCR_LG] = "DSCR",
1702 [FSCR_PM_LG] = "PMU SPRs",
1703 [FSCR_BHRB_LG] = "BHRB",
1704 [FSCR_TM_LG] = "TM",
1705 [FSCR_EBB_LG] = "EBB",
1706 [FSCR_TAR_LG] = "TAR",
794464f4 1707 [FSCR_MSGP_LG] = "MSGP",
9b7ff0c6 1708 [FSCR_SCV_LG] = "SCV",
021424a1 1709 };
2517617e 1710 char *facility = "unknown";
021424a1 1711 u64 value;
c952c1c4 1712 u32 instword, rd;
2517617e
MN
1713 u8 status;
1714 bool hv;
021424a1 1715
2271db20 1716 hv = (TRAP(regs) == 0xf80);
2517617e 1717 if (hv)
b14b6260 1718 value = mfspr(SPRN_HFSCR);
2517617e
MN
1719 else
1720 value = mfspr(SPRN_FSCR);
1721
1722 status = value >> 56;
709b973c
AK
1723 if ((hv || status >= 2) &&
1724 (status < ARRAY_SIZE(facility_strings)) &&
1725 facility_strings[status])
1726 facility = facility_strings[status];
1727
1728 /* We should not have taken this interrupt in kernel */
1729 if (!user_mode(regs)) {
1730 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1731 facility, status, regs->nip);
1732 die("Unexpected facility unavailable exception", regs, SIGABRT);
1733 }
1734
1735 /* We restore the interrupt state now */
1736 if (!arch_irq_disabled_regs(regs))
1737 local_irq_enable();
1738
2517617e 1739 if (status == FSCR_DSCR_LG) {
c952c1c4
AK
1740 /*
1741 * User is accessing the DSCR register using the problem
1742 * state only SPR number (0x03) either through a mfspr or
1743 * a mtspr instruction. If it is a write attempt through
1744 * a mtspr, then we set the inherit bit. This also allows
1745 * the user to write or read the register directly in the
1746 * future by setting via the FSCR DSCR bit. But in case it
1747 * is a read DSCR attempt through a mfspr instruction, we
1748 * just emulate the instruction instead. This code path will
1749 * always emulate all the mfspr instructions till the user
446957ba 1750 * has attempted at least one mtspr instruction. This way it
c952c1c4
AK
1751 * preserves the same behaviour when the user is accessing
1752 * the DSCR through privilege level only SPR number (0x11)
1753 * which is emulated through illegal instruction exception.
1754 * We always leave HFSCR DSCR set.
2517617e 1755 */
c952c1c4
AK
1756 if (get_user(instword, (u32 __user *)(regs->nip))) {
1757 pr_err("Failed to fetch the user instruction\n");
1758 return;
1759 }
1760
1761 /* Write into DSCR (mtspr 0x03, RS) */
1762 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1763 == PPC_INST_MTSPR_DSCR_USER) {
1764 rd = (instword >> 21) & 0x1f;
1765 current->thread.dscr = regs->gpr[rd];
1766 current->thread.dscr_inherit = 1;
b57bd2de
MN
1767 current->thread.fscr |= FSCR_DSCR;
1768 mtspr(SPRN_FSCR, current->thread.fscr);
c952c1c4
AK
1769 }
1770
1771 /* Read from DSCR (mfspr RT, 0x03) */
1772 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1773 == PPC_INST_MFSPR_DSCR_USER) {
1774 if (emulate_instruction(regs)) {
1775 pr_err("DSCR based mfspr emulation failed\n");
1776 return;
1777 }
1778 regs->nip += 4;
1779 emulate_single_step(regs);
1780 }
2517617e 1781 return;
b14b6260
ME
1782 }
1783
172f7aaa
CB
1784 if (status == FSCR_TM_LG) {
1785 /*
1786 * If we're here then the hardware is TM aware because it
1787 * generated an exception with FSRM_TM set.
1788 *
1789 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1790 * told us not to do TM, or the kernel is not built with TM
1791 * support.
1792 *
1793 * If both of those things are true, then userspace can spam the
1794 * console by triggering the printk() below just by continually
1795 * doing tbegin (or any TM instruction). So in that case just
1796 * send the process a SIGILL immediately.
1797 */
1798 if (!cpu_has_feature(CPU_FTR_TM))
1799 goto out;
1800
1801 tm_unavailable(regs);
1802 return;
1803 }
1804
93c2ec0f
BS
1805 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1806 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
d0c0c9a1 1807
172f7aaa 1808out:
709b973c 1809 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
d0c0c9a1 1810}
2517617e 1811#endif
d0c0c9a1 1812
f54db641
MN
1813#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1814
f54db641
MN
1815void fp_unavailable_tm(struct pt_regs *regs)
1816{
1817 /* Note: This does not handle any kind of FP laziness. */
1818
1819 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1820 regs->nip, regs->msr);
f54db641
MN
1821
1822 /* We can only have got here if the task started using FP after
1823 * beginning the transaction. So, the transactional regs are just a
1824 * copy of the checkpointed ones. But, we still need to recheckpoint
1825 * as we're enabling FP for the process; it will return, abort the
1826 * transaction, and probably retry but now with FP enabled. So the
1827 * checkpointed FP registers need to be loaded.
1828 */
d31626f7 1829 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
96695563
BL
1830
1831 /*
1832 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1833 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1834 *
1835 * At this point, ck{fp,vr}_state contains the exact values we want to
1836 * recheckpoint.
1837 */
f54db641
MN
1838
1839 /* Enable FP for the task: */
a7771176 1840 current->thread.load_fp = 1;
f54db641 1841
96695563
BL
1842 /*
1843 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
f54db641 1844 */
eb5c3f1c 1845 tm_recheckpoint(&current->thread);
f54db641
MN
1846}
1847
f54db641
MN
1848void altivec_unavailable_tm(struct pt_regs *regs)
1849{
1850 /* See the comments in fp_unavailable_tm(). This function operates
1851 * the same way.
1852 */
1853
1854 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1855 "MSR=%lx\n",
1856 regs->nip, regs->msr);
d31626f7 1857 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
a7771176 1858 current->thread.load_vec = 1;
eb5c3f1c 1859 tm_recheckpoint(&current->thread);
f54db641
MN
1860 current->thread.used_vr = 1;
1861}
f54db641 1862
f54db641
MN
1863void vsx_unavailable_tm(struct pt_regs *regs)
1864{
1865 /* See the comments in fp_unavailable_tm(). This works similarly,
1866 * though we're loading both FP and VEC registers in here.
1867 *
1868 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1869 * regs. Either way, set MSR_VSX.
1870 */
1871
1872 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1873 "MSR=%lx\n",
1874 regs->nip, regs->msr);
1875
3ac8ff1c
PM
1876 current->thread.used_vsr = 1;
1877
f54db641 1878 /* This reclaims FP and/or VR regs if they're already enabled */
d31626f7 1879 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
f54db641 1880
a7771176
CB
1881 current->thread.load_vec = 1;
1882 current->thread.load_fp = 1;
3ac8ff1c 1883
eb5c3f1c 1884 tm_recheckpoint(&current->thread);
f54db641 1885}
f54db641
MN
1886#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1887
dc1c1ca3
SR
1888void performance_monitor_exception(struct pt_regs *regs)
1889{
69111bac 1890 __this_cpu_inc(irq_stat.pmu_irqs);
89713ed1 1891
dc1c1ca3
SR
1892 perf_irq(regs);
1893}
dc1c1ca3 1894
172ae2e7 1895#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3bffb652
DK
1896static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1897{
1898 int changed = 0;
1899 /*
1900 * Determine the cause of the debug event, clear the
1901 * event flags and send a trap to the handler. Torez
1902 */
1903 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1904 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1905#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
51ae8d4a 1906 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
3bffb652 1907#endif
47355040 1908 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
3bffb652
DK
1909 5);
1910 changed |= 0x01;
1911 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1912 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
47355040 1913 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
3bffb652
DK
1914 6);
1915 changed |= 0x01;
1916 } else if (debug_status & DBSR_IAC1) {
51ae8d4a 1917 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
3bffb652 1918 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
47355040 1919 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
3bffb652
DK
1920 1);
1921 changed |= 0x01;
1922 } else if (debug_status & DBSR_IAC2) {
51ae8d4a 1923 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
47355040 1924 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
3bffb652
DK
1925 2);
1926 changed |= 0x01;
1927 } else if (debug_status & DBSR_IAC3) {
51ae8d4a 1928 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
3bffb652 1929 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
47355040 1930 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
3bffb652
DK
1931 3);
1932 changed |= 0x01;
1933 } else if (debug_status & DBSR_IAC4) {
51ae8d4a 1934 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
47355040 1935 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
3bffb652
DK
1936 4);
1937 changed |= 0x01;
1938 }
1939 /*
1940 * At the point this routine was called, the MSR(DE) was turned off.
1941 * Check all other debug flags and see if that bit needs to be turned
1942 * back on or not.
1943 */
51ae8d4a 1944 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
95791988 1945 current->thread.debug.dbcr1))
3bffb652
DK
1946 regs->msr |= MSR_DE;
1947 else
1948 /* Make sure the IDM flag is off */
51ae8d4a 1949 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
3bffb652
DK
1950
1951 if (changed & 0x01)
51ae8d4a 1952 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
3bffb652 1953}
14cf11af 1954
03465f89 1955void DebugException(struct pt_regs *regs, unsigned long debug_status)
14cf11af 1956{
51ae8d4a 1957 current->thread.debug.dbsr = debug_status;
3bffb652 1958
ec097c84
RM
1959 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1960 * on server, it stops on the target of the branch. In order to simulate
1961 * the server behaviour, we thus restart right away with a single step
1962 * instead of stopping here when hitting a BT
1963 */
1964 if (debug_status & DBSR_BT) {
1965 regs->msr &= ~MSR_DE;
1966
1967 /* Disable BT */
1968 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1969 /* Clear the BT event */
1970 mtspr(SPRN_DBSR, DBSR_BT);
1971
1972 /* Do the single step trick only when coming from userspace */
1973 if (user_mode(regs)) {
51ae8d4a
BB
1974 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1975 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
ec097c84
RM
1976 regs->msr |= MSR_DE;
1977 return;
1978 }
1979
6cc89bad
NR
1980 if (kprobe_post_handler(regs))
1981 return;
1982
ec097c84
RM
1983 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1984 5, SIGTRAP) == NOTIFY_STOP) {
1985 return;
1986 }
1987 if (debugger_sstep(regs))
1988 return;
1989 } else if (debug_status & DBSR_IC) { /* Instruction complete */
14cf11af 1990 regs->msr &= ~MSR_DE;
f8279621
KG
1991
1992 /* Disable instruction completion */
1993 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1994 /* Clear the instruction completion event */
1995 mtspr(SPRN_DBSR, DBSR_IC);
1996
6cc89bad
NR
1997 if (kprobe_post_handler(regs))
1998 return;
1999
f8279621
KG
2000 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
2001 5, SIGTRAP) == NOTIFY_STOP) {
2002 return;
2003 }
2004
2005 if (debugger_sstep(regs))
2006 return;
2007
d6a61bfc 2008 if (user_mode(regs)) {
51ae8d4a
BB
2009 current->thread.debug.dbcr0 &= ~DBCR0_IC;
2010 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2011 current->thread.debug.dbcr1))
3bffb652
DK
2012 regs->msr |= MSR_DE;
2013 else
2014 /* Make sure the IDM bit is off */
51ae8d4a 2015 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
d6a61bfc 2016 }
3bffb652
DK
2017
2018 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
2019 } else
2020 handle_debug(regs, debug_status);
14cf11af 2021}
03465f89 2022NOKPROBE_SYMBOL(DebugException);
172ae2e7 2023#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
14cf11af
PM
2024
2025#if !defined(CONFIG_TAU_INT)
2026void TAUException(struct pt_regs *regs)
2027{
2028 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
2029 regs->nip, regs->msr, regs->trap, print_tainted());
2030}
2031#endif /* CONFIG_INT_TAU */
14cf11af
PM
2032
2033#ifdef CONFIG_ALTIVEC
dc1c1ca3 2034void altivec_assist_exception(struct pt_regs *regs)
14cf11af
PM
2035{
2036 int err;
2037
14cf11af
PM
2038 if (!user_mode(regs)) {
2039 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
2040 " at %lx\n", regs->nip);
8dad3f92 2041 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
14cf11af
PM
2042 }
2043
dc1c1ca3 2044 flush_altivec_to_thread(current);
dc1c1ca3 2045
eecff81d 2046 PPC_WARN_EMULATED(altivec, regs);
14cf11af
PM
2047 err = emulate_altivec(regs);
2048 if (err == 0) {
2049 regs->nip += 4; /* skip emulated instruction */
2050 emulate_single_step(regs);
2051 return;
2052 }
2053
2054 if (err == -EFAULT) {
2055 /* got an error reading the instruction */
2056 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2057 } else {
2058 /* didn't recognize the instruction */
2059 /* XXX quick hack for now: set the non-Java bit in the VSCR */
76462232
CD
2060 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
2061 "in %s at %lx\n", current->comm, regs->nip);
de79f7b9 2062 current->thread.vr_state.vscr.u[3] |= 0x10000;
14cf11af
PM
2063 }
2064}
2065#endif /* CONFIG_ALTIVEC */
2066
14cf11af
PM
2067#ifdef CONFIG_FSL_BOOKE
2068void CacheLockingException(struct pt_regs *regs, unsigned long address,
2069 unsigned long error_code)
2070{
2071 /* We treat cache locking instructions from the user
2072 * as priv ops, in the future we could try to do
2073 * something smarter
2074 */
2075 if (error_code & (ESR_DLK|ESR_ILK))
2076 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
2077 return;
2078}
2079#endif /* CONFIG_FSL_BOOKE */
2080
2081#ifdef CONFIG_SPE
2082void SPEFloatingPointException(struct pt_regs *regs)
2083{
6a800f36 2084 extern int do_spe_mathemu(struct pt_regs *regs);
14cf11af
PM
2085 unsigned long spefscr;
2086 int fpexc_mode;
aeb1c0f6 2087 int code = FPE_FLTUNK;
6a800f36
LY
2088 int err;
2089
685659ee 2090 flush_spe_to_thread(current);
14cf11af
PM
2091
2092 spefscr = current->thread.spefscr;
2093 fpexc_mode = current->thread.fpexc_mode;
2094
14cf11af
PM
2095 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2096 code = FPE_FLTOVF;
14cf11af
PM
2097 }
2098 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2099 code = FPE_FLTUND;
14cf11af
PM
2100 }
2101 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2102 code = FPE_FLTDIV;
2103 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2104 code = FPE_FLTINV;
14cf11af
PM
2105 }
2106 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2107 code = FPE_FLTRES;
2108
6a800f36
LY
2109 err = do_spe_mathemu(regs);
2110 if (err == 0) {
2111 regs->nip += 4; /* skip emulated instruction */
2112 emulate_single_step(regs);
2113 return;
2114 }
2115
2116 if (err == -EFAULT) {
2117 /* got an error reading the instruction */
2118 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2119 } else if (err == -EINVAL) {
2120 /* didn't recognize the instruction */
2121 printk(KERN_ERR "unrecognized spe instruction "
2122 "in %s at %lx\n", current->comm, regs->nip);
2123 } else {
2124 _exception(SIGFPE, regs, code, regs->nip);
2125 }
14cf11af 2126
14cf11af
PM
2127 return;
2128}
6a800f36
LY
2129
2130void SPEFloatingPointRoundException(struct pt_regs *regs)
2131{
2132 extern int speround_handler(struct pt_regs *regs);
2133 int err;
2134
2135 preempt_disable();
2136 if (regs->msr & MSR_SPE)
2137 giveup_spe(current);
2138 preempt_enable();
2139
2140 regs->nip -= 4;
2141 err = speround_handler(regs);
2142 if (err == 0) {
2143 regs->nip += 4; /* skip emulated instruction */
2144 emulate_single_step(regs);
2145 return;
2146 }
2147
2148 if (err == -EFAULT) {
2149 /* got an error reading the instruction */
2150 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2151 } else if (err == -EINVAL) {
2152 /* didn't recognize the instruction */
2153 printk(KERN_ERR "unrecognized spe instruction "
2154 "in %s at %lx\n", current->comm, regs->nip);
2155 } else {
aeb1c0f6 2156 _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
6a800f36
LY
2157 return;
2158 }
2159}
14cf11af
PM
2160#endif
2161
dc1c1ca3
SR
2162/*
2163 * We enter here if we get an unrecoverable exception, that is, one
2164 * that happened at a point where the RI (recoverable interrupt) bit
2165 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2166 * we therefore lost state by taking this exception.
2167 */
2168void unrecoverable_exception(struct pt_regs *regs)
2169{
51423a9c
CL
2170 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2171 regs->trap, regs->nip, regs->msr);
dc1c1ca3
SR
2172 die("Unrecoverable exception", regs, SIGABRT);
2173}
15770a13 2174NOKPROBE_SYMBOL(unrecoverable_exception);
dc1c1ca3 2175
1e18c17a 2176#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
14cf11af
PM
2177/*
2178 * Default handler for a Watchdog exception,
2179 * spins until a reboot occurs
2180 */
2181void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2182{
2183 /* Generic WatchdogHandler, implement your own */
2184 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2185 return;
2186}
2187
2188void WatchdogException(struct pt_regs *regs)
2189{
2190 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2191 WatchdogHandler(regs);
2192}
2193#endif
dc1c1ca3 2194
dc1c1ca3
SR
2195/*
2196 * We enter here if we discover during exception entry that we are
2197 * running in supervisor mode with a userspace value in the stack pointer.
2198 */
2199void kernel_bad_stack(struct pt_regs *regs)
2200{
2201 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2202 regs->gpr[1], regs->nip);
2203 die("Bad kernel stack pointer", regs, SIGABRT);
2204}
15770a13 2205NOKPROBE_SYMBOL(kernel_bad_stack);
14cf11af
PM
2206
2207void __init trap_init(void)
2208{
2209}
80947e7c
GU
2210
2211
2212#ifdef CONFIG_PPC_EMULATED_STATS
2213
2214#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2215
2216struct ppc_emulated ppc_emulated = {
2217#ifdef CONFIG_ALTIVEC
2218 WARN_EMULATED_SETUP(altivec),
2219#endif
2220 WARN_EMULATED_SETUP(dcba),
2221 WARN_EMULATED_SETUP(dcbz),
2222 WARN_EMULATED_SETUP(fp_pair),
2223 WARN_EMULATED_SETUP(isel),
2224 WARN_EMULATED_SETUP(mcrxr),
2225 WARN_EMULATED_SETUP(mfpvr),
2226 WARN_EMULATED_SETUP(multiple),
2227 WARN_EMULATED_SETUP(popcntb),
2228 WARN_EMULATED_SETUP(spe),
2229 WARN_EMULATED_SETUP(string),
a3821b2a 2230 WARN_EMULATED_SETUP(sync),
80947e7c
GU
2231 WARN_EMULATED_SETUP(unaligned),
2232#ifdef CONFIG_MATH_EMULATION
2233 WARN_EMULATED_SETUP(math),
80947e7c
GU
2234#endif
2235#ifdef CONFIG_VSX
2236 WARN_EMULATED_SETUP(vsx),
2237#endif
efcac658
AK
2238#ifdef CONFIG_PPC64
2239 WARN_EMULATED_SETUP(mfdscr),
2240 WARN_EMULATED_SETUP(mtdscr),
f83319d7 2241 WARN_EMULATED_SETUP(lq_stq),
5080332c
MN
2242 WARN_EMULATED_SETUP(lxvw4x),
2243 WARN_EMULATED_SETUP(lxvh8x),
2244 WARN_EMULATED_SETUP(lxvd2x),
2245 WARN_EMULATED_SETUP(lxvb16x),
efcac658 2246#endif
80947e7c
GU
2247};
2248
2249u32 ppc_warn_emulated;
2250
2251void ppc_warn_emulated_print(const char *type)
2252{
76462232
CD
2253 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2254 type);
80947e7c
GU
2255}
2256
2257static int __init ppc_warn_emulated_init(void)
2258{
2259 struct dentry *dir, *d;
2260 unsigned int i;
2261 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2262
2263 if (!powerpc_debugfs_root)
2264 return -ENODEV;
2265
2266 dir = debugfs_create_dir("emulated_instructions",
2267 powerpc_debugfs_root);
2268 if (!dir)
2269 return -ENOMEM;
2270
57ad583f 2271 d = debugfs_create_u32("do_warn", 0644, dir,
80947e7c
GU
2272 &ppc_warn_emulated);
2273 if (!d)
2274 goto fail;
2275
2276 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
57ad583f 2277 d = debugfs_create_u32(entries[i].name, 0644, dir,
80947e7c
GU
2278 (u32 *)&entries[i].val.counter);
2279 if (!d)
2280 goto fail;
2281 }
2282
2283 return 0;
2284
2285fail:
2286 debugfs_remove_recursive(dir);
2287 return -ENOMEM;
2288}
2289
2290device_initcall(ppc_warn_emulated_init);
2291
2292#endif /* CONFIG_PPC_EMULATED_STATS */