]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/mips/kernel/traps.c
MIPS: Add new option for unique RI/XI exceptions
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
36ccf1c0 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
1da177e4
LT
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
60b0d655 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
2a0b24f5 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
b08a9c95 13 * Copyright (C) 2014, Imagination Technologies Ltd.
1da177e4 14 */
8e8a52ed 15#include <linux/bug.h>
60b0d655 16#include <linux/compiler.h>
c3fc5cd5 17#include <linux/context_tracking.h>
ae4ce454 18#include <linux/cpu_pm.h>
7aa1c8f4 19#include <linux/kexec.h>
1da177e4 20#include <linux/init.h>
8742cd23 21#include <linux/kernel.h>
f9ded569 22#include <linux/module.h>
1da177e4 23#include <linux/mm.h>
1da177e4
LT
24#include <linux/sched.h>
25#include <linux/smp.h>
1da177e4
LT
26#include <linux/spinlock.h>
27#include <linux/kallsyms.h>
e01402b1 28#include <linux/bootmem.h>
d4fd1989 29#include <linux/interrupt.h>
39b8d525 30#include <linux/ptrace.h>
88547001
JW
31#include <linux/kgdb.h>
32#include <linux/kdebug.h>
c1bf207d 33#include <linux/kprobes.h>
69f3a7de 34#include <linux/notifier.h>
5dd11d5d 35#include <linux/kdb.h>
ca4d3e67 36#include <linux/irq.h>
7f788d2d 37#include <linux/perf_event.h>
1da177e4
LT
38
39#include <asm/bootinfo.h>
40#include <asm/branch.h>
41#include <asm/break.h>
69f3a7de 42#include <asm/cop2.h>
1da177e4 43#include <asm/cpu.h>
69f24d17 44#include <asm/cpu-type.h>
e50c0a8f 45#include <asm/dsp.h>
1da177e4 46#include <asm/fpu.h>
ba3049ed 47#include <asm/fpu_emulator.h>
bdc92d74 48#include <asm/idle.h>
340ee4b9
RB
49#include <asm/mipsregs.h>
50#include <asm/mipsmtregs.h>
1da177e4 51#include <asm/module.h>
1db1af84 52#include <asm/msa.h>
1da177e4
LT
53#include <asm/pgtable.h>
54#include <asm/ptrace.h>
55#include <asm/sections.h>
1da177e4
LT
56#include <asm/tlbdebug.h>
57#include <asm/traps.h>
58#include <asm/uaccess.h>
b67b2b70 59#include <asm/watch.h>
1da177e4 60#include <asm/mmu_context.h>
1da177e4 61#include <asm/types.h>
1df0f0ff 62#include <asm/stacktrace.h>
92bbe1b9 63#include <asm/uasm.h>
1da177e4 64
c65a5480 65extern void check_wait(void);
c65a5480 66extern asmlinkage void rollback_handle_int(void);
e4ac58af 67extern asmlinkage void handle_int(void);
86a1708a
RB
68extern u32 handle_tlbl[];
69extern u32 handle_tlbs[];
70extern u32 handle_tlbm[];
1da177e4
LT
71extern asmlinkage void handle_adel(void);
72extern asmlinkage void handle_ades(void);
73extern asmlinkage void handle_ibe(void);
74extern asmlinkage void handle_dbe(void);
75extern asmlinkage void handle_sys(void);
76extern asmlinkage void handle_bp(void);
77extern asmlinkage void handle_ri(void);
5b10496b
AN
78extern asmlinkage void handle_ri_rdhwr_vivt(void);
79extern asmlinkage void handle_ri_rdhwr(void);
1da177e4
LT
80extern asmlinkage void handle_cpu(void);
81extern asmlinkage void handle_ov(void);
82extern asmlinkage void handle_tr(void);
2bcb3fbc 83extern asmlinkage void handle_msa_fpe(void);
1da177e4 84extern asmlinkage void handle_fpe(void);
75b5b5e0 85extern asmlinkage void handle_ftlb(void);
1db1af84 86extern asmlinkage void handle_msa(void);
1da177e4
LT
87extern asmlinkage void handle_mdmx(void);
88extern asmlinkage void handle_watch(void);
340ee4b9 89extern asmlinkage void handle_mt(void);
e50c0a8f 90extern asmlinkage void handle_dsp(void);
1da177e4
LT
91extern asmlinkage void handle_mcheck(void);
92extern asmlinkage void handle_reserved(void);
93
1da177e4
LT
94void (*board_be_init)(void);
95int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
e01402b1
RB
96void (*board_nmi_handler_setup)(void);
97void (*board_ejtag_handler_setup)(void);
98void (*board_bind_eic_interrupt)(int irq, int regset);
6fb97eff 99void (*board_ebase_setup)(void);
078a55fc 100void(*board_cache_error_setup)(void);
1da177e4 101
4d157d5e 102static void show_raw_backtrace(unsigned long reg29)
e889d78f 103{
39b8d525 104 unsigned long *sp = (unsigned long *)(reg29 & ~3);
e889d78f
AN
105 unsigned long addr;
106
107 printk("Call Trace:");
108#ifdef CONFIG_KALLSYMS
109 printk("\n");
110#endif
10220c88
TB
111 while (!kstack_end(sp)) {
112 unsigned long __user *p =
113 (unsigned long __user *)(unsigned long)sp++;
114 if (__get_user(addr, p)) {
115 printk(" (Bad stack address)");
116 break;
39b8d525 117 }
10220c88
TB
118 if (__kernel_text_address(addr))
119 print_ip_sym(addr);
e889d78f 120 }
10220c88 121 printk("\n");
e889d78f
AN
122}
123
f66686f7 124#ifdef CONFIG_KALLSYMS
1df0f0ff 125int raw_show_trace;
f66686f7
AN
126static int __init set_raw_show_trace(char *str)
127{
128 raw_show_trace = 1;
129 return 1;
130}
131__setup("raw_show_trace", set_raw_show_trace);
1df0f0ff 132#endif
4d157d5e 133
eae23f2c 134static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
f66686f7 135{
4d157d5e
FBH
136 unsigned long sp = regs->regs[29];
137 unsigned long ra = regs->regs[31];
f66686f7 138 unsigned long pc = regs->cp0_epc;
f66686f7 139
e909be82
VW
140 if (!task)
141 task = current;
142
f66686f7 143 if (raw_show_trace || !__kernel_text_address(pc)) {
87151ae3 144 show_raw_backtrace(sp);
f66686f7
AN
145 return;
146 }
147 printk("Call Trace:\n");
4d157d5e 148 do {
87151ae3 149 print_ip_sym(pc);
1924600c 150 pc = unwind_stack(task, &sp, pc, &ra);
4d157d5e 151 } while (pc);
f66686f7
AN
152 printk("\n");
153}
f66686f7 154
1da177e4
LT
155/*
156 * This routine abuses get_user()/put_user() to reference pointers
157 * with at least a bit of error checking ...
158 */
eae23f2c
RB
159static void show_stacktrace(struct task_struct *task,
160 const struct pt_regs *regs)
1da177e4
LT
161{
162 const int field = 2 * sizeof(unsigned long);
163 long stackdata;
164 int i;
5e0373b8 165 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
1da177e4
LT
166
167 printk("Stack :");
168 i = 0;
169 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
170 if (i && ((i % (64 / field)) == 0))
70342287 171 printk("\n ");
1da177e4
LT
172 if (i > 39) {
173 printk(" ...");
174 break;
175 }
176
177 if (__get_user(stackdata, sp++)) {
178 printk(" (Bad stack address)");
179 break;
180 }
181
182 printk(" %0*lx", field, stackdata);
183 i++;
184 }
185 printk("\n");
87151ae3 186 show_backtrace(task, regs);
f66686f7
AN
187}
188
f66686f7
AN
189void show_stack(struct task_struct *task, unsigned long *sp)
190{
191 struct pt_regs regs;
192 if (sp) {
193 regs.regs[29] = (unsigned long)sp;
194 regs.regs[31] = 0;
195 regs.cp0_epc = 0;
196 } else {
197 if (task && task != current) {
198 regs.regs[29] = task->thread.reg29;
199 regs.regs[31] = 0;
200 regs.cp0_epc = task->thread.reg31;
5dd11d5d
JW
201#ifdef CONFIG_KGDB_KDB
202 } else if (atomic_read(&kgdb_active) != -1 &&
203 kdb_current_regs) {
204 memcpy(&regs, kdb_current_regs, sizeof(regs));
205#endif /* CONFIG_KGDB_KDB */
f66686f7
AN
206 } else {
207 prepare_frametrace(&regs);
208 }
209 }
210 show_stacktrace(task, &regs);
1da177e4
LT
211}
212
e1bb8289 213static void show_code(unsigned int __user *pc)
1da177e4
LT
214{
215 long i;
39b8d525 216 unsigned short __user *pc16 = NULL;
1da177e4
LT
217
218 printk("\nCode:");
219
39b8d525
RB
220 if ((unsigned long)pc & 1)
221 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
1da177e4
LT
222 for(i = -3 ; i < 6 ; i++) {
223 unsigned int insn;
39b8d525 224 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
1da177e4
LT
225 printk(" (Bad address in epc)\n");
226 break;
227 }
39b8d525 228 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
1da177e4
LT
229 }
230}
231
eae23f2c 232static void __show_regs(const struct pt_regs *regs)
1da177e4
LT
233{
234 const int field = 2 * sizeof(unsigned long);
235 unsigned int cause = regs->cp0_cause;
236 int i;
237
a43cb95d 238 show_regs_print_info(KERN_DEFAULT);
1da177e4
LT
239
240 /*
241 * Saved main processor registers
242 */
243 for (i = 0; i < 32; ) {
244 if ((i % 4) == 0)
245 printk("$%2d :", i);
246 if (i == 0)
247 printk(" %0*lx", field, 0UL);
248 else if (i == 26 || i == 27)
249 printk(" %*s", field, "");
250 else
251 printk(" %0*lx", field, regs->regs[i]);
252
253 i++;
254 if ((i % 4) == 0)
255 printk("\n");
256 }
257
9693a853
FBH
258#ifdef CONFIG_CPU_HAS_SMARTMIPS
259 printk("Acx : %0*lx\n", field, regs->acx);
260#endif
1da177e4
LT
261 printk("Hi : %0*lx\n", field, regs->hi);
262 printk("Lo : %0*lx\n", field, regs->lo);
263
264 /*
265 * Saved cp0 registers
266 */
b012cffe
RB
267 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
268 (void *) regs->cp0_epc);
1da177e4 269 printk(" %s\n", print_tainted());
b012cffe
RB
270 printk("ra : %0*lx %pS\n", field, regs->regs[31],
271 (void *) regs->regs[31]);
1da177e4 272
70342287 273 printk("Status: %08x ", (uint32_t) regs->cp0_status);
1da177e4 274
1990e542 275 if (cpu_has_3kex) {
3b2396d9
MR
276 if (regs->cp0_status & ST0_KUO)
277 printk("KUo ");
278 if (regs->cp0_status & ST0_IEO)
279 printk("IEo ");
280 if (regs->cp0_status & ST0_KUP)
281 printk("KUp ");
282 if (regs->cp0_status & ST0_IEP)
283 printk("IEp ");
284 if (regs->cp0_status & ST0_KUC)
285 printk("KUc ");
286 if (regs->cp0_status & ST0_IEC)
287 printk("IEc ");
1990e542 288 } else if (cpu_has_4kex) {
3b2396d9
MR
289 if (regs->cp0_status & ST0_KX)
290 printk("KX ");
291 if (regs->cp0_status & ST0_SX)
292 printk("SX ");
293 if (regs->cp0_status & ST0_UX)
294 printk("UX ");
295 switch (regs->cp0_status & ST0_KSU) {
296 case KSU_USER:
297 printk("USER ");
298 break;
299 case KSU_SUPERVISOR:
300 printk("SUPERVISOR ");
301 break;
302 case KSU_KERNEL:
303 printk("KERNEL ");
304 break;
305 default:
306 printk("BAD_MODE ");
307 break;
308 }
309 if (regs->cp0_status & ST0_ERL)
310 printk("ERL ");
311 if (regs->cp0_status & ST0_EXL)
312 printk("EXL ");
313 if (regs->cp0_status & ST0_IE)
314 printk("IE ");
1da177e4 315 }
1da177e4
LT
316 printk("\n");
317
318 printk("Cause : %08x\n", cause);
319
320 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
321 if (1 <= cause && cause <= 5)
322 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
323
9966db25
RB
324 printk("PrId : %08x (%s)\n", read_c0_prid(),
325 cpu_name_string());
1da177e4
LT
326}
327
eae23f2c
RB
328/*
329 * FIXME: really the generic show_regs should take a const pointer argument.
330 */
331void show_regs(struct pt_regs *regs)
332{
333 __show_regs((struct pt_regs *)regs);
334}
335
c1bf207d 336void show_registers(struct pt_regs *regs)
1da177e4 337{
39b8d525 338 const int field = 2 * sizeof(unsigned long);
83e4da1e 339 mm_segment_t old_fs = get_fs();
39b8d525 340
eae23f2c 341 __show_regs(regs);
1da177e4 342 print_modules();
39b8d525
RB
343 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
344 current->comm, current->pid, current_thread_info(), current,
345 field, current_thread_info()->tp_value);
346 if (cpu_has_userlocal) {
347 unsigned long tls;
348
349 tls = read_c0_userlocal();
350 if (tls != current_thread_info()->tp_value)
351 printk("*HwTLS: %0*lx\n", field, tls);
352 }
353
83e4da1e
LY
354 if (!user_mode(regs))
355 /* Necessary for getting the correct stack content */
356 set_fs(KERNEL_DS);
f66686f7 357 show_stacktrace(current, regs);
e1bb8289 358 show_code((unsigned int __user *) regs->cp0_epc);
1da177e4 359 printk("\n");
83e4da1e 360 set_fs(old_fs);
1da177e4
LT
361}
362
70dc6f04
DD
363static int regs_to_trapnr(struct pt_regs *regs)
364{
365 return (regs->cp0_cause >> 2) & 0x1f;
366}
367
4d85f6af 368static DEFINE_RAW_SPINLOCK(die_lock);
1da177e4 369
70dc6f04 370void __noreturn die(const char *str, struct pt_regs *regs)
1da177e4
LT
371{
372 static int die_counter;
ce384d83 373 int sig = SIGSEGV;
1da177e4 374
8742cd23
NL
375 oops_enter();
376
dc73e4c1
RB
377 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
378 SIGSEGV) == NOTIFY_STOP)
10423c91 379 sig = 0;
5dd11d5d 380
1da177e4 381 console_verbose();
4d85f6af 382 raw_spin_lock_irq(&die_lock);
41c594ab 383 bust_spinlocks(1);
ce384d83 384
178086c8 385 printk("%s[#%d]:\n", str, ++die_counter);
1da177e4 386 show_registers(regs);
373d4d09 387 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
4d85f6af 388 raw_spin_unlock_irq(&die_lock);
d4fd1989 389
8742cd23
NL
390 oops_exit();
391
d4fd1989
MB
392 if (in_interrupt())
393 panic("Fatal exception in interrupt");
394
395 if (panic_on_oops) {
ab75dc02 396 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
d4fd1989
MB
397 ssleep(5);
398 panic("Fatal exception");
399 }
400
7aa1c8f4
RB
401 if (regs && kexec_should_crash(current))
402 crash_kexec(regs);
403
ce384d83 404 do_exit(sig);
1da177e4
LT
405}
406
0510617b
TB
407extern struct exception_table_entry __start___dbe_table[];
408extern struct exception_table_entry __stop___dbe_table[];
1da177e4 409
b6dcec9b
RB
410__asm__(
411" .section __dbe_table, \"a\"\n"
412" .previous \n");
1da177e4
LT
413
414/* Given an address, look for it in the exception tables. */
415static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
416{
417 const struct exception_table_entry *e;
418
419 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
420 if (!e)
421 e = search_module_dbetables(addr);
422 return e;
423}
424
425asmlinkage void do_be(struct pt_regs *regs)
426{
427 const int field = 2 * sizeof(unsigned long);
428 const struct exception_table_entry *fixup = NULL;
429 int data = regs->cp0_cause & 4;
430 int action = MIPS_BE_FATAL;
c3fc5cd5 431 enum ctx_state prev_state;
1da177e4 432
c3fc5cd5 433 prev_state = exception_enter();
70342287 434 /* XXX For now. Fixme, this searches the wrong table ... */
1da177e4
LT
435 if (data && !user_mode(regs))
436 fixup = search_dbe_tables(exception_epc(regs));
437
438 if (fixup)
439 action = MIPS_BE_FIXUP;
440
441 if (board_be_handler)
28fc582c 442 action = board_be_handler(regs, fixup != NULL);
1da177e4
LT
443
444 switch (action) {
445 case MIPS_BE_DISCARD:
c3fc5cd5 446 goto out;
1da177e4
LT
447 case MIPS_BE_FIXUP:
448 if (fixup) {
449 regs->cp0_epc = fixup->nextinsn;
c3fc5cd5 450 goto out;
1da177e4
LT
451 }
452 break;
453 default:
454 break;
455 }
456
457 /*
458 * Assume it would be too dangerous to continue ...
459 */
460 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
461 data ? "Data" : "Instruction",
462 field, regs->cp0_epc, field, regs->regs[31]);
dc73e4c1
RB
463 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
464 SIGBUS) == NOTIFY_STOP)
c3fc5cd5 465 goto out;
88547001 466
1da177e4
LT
467 die_if_kernel("Oops", regs);
468 force_sig(SIGBUS, current);
c3fc5cd5
RB
469
470out:
471 exception_exit(prev_state);
1da177e4
LT
472}
473
1da177e4 474/*
60b0d655 475 * ll/sc, rdhwr, sync emulation
1da177e4
LT
476 */
477
478#define OPCODE 0xfc000000
479#define BASE 0x03e00000
480#define RT 0x001f0000
481#define OFFSET 0x0000ffff
482#define LL 0xc0000000
483#define SC 0xe0000000
60b0d655 484#define SPEC0 0x00000000
3c37026d
RB
485#define SPEC3 0x7c000000
486#define RD 0x0000f800
487#define FUNC 0x0000003f
60b0d655 488#define SYNC 0x0000000f
3c37026d 489#define RDHWR 0x0000003b
1da177e4 490
2a0b24f5
SH
491/* microMIPS definitions */
492#define MM_POOL32A_FUNC 0xfc00ffff
493#define MM_RDHWR 0x00006b3c
494#define MM_RS 0x001f0000
495#define MM_RT 0x03e00000
496
1da177e4
LT
497/*
498 * The ll_bit is cleared by r*_switch.S
499 */
500
f1e39a4a
RB
501unsigned int ll_bit;
502struct task_struct *ll_task;
1da177e4 503
60b0d655 504static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
1da177e4 505{
fe00f943 506 unsigned long value, __user *vaddr;
1da177e4 507 long offset;
1da177e4
LT
508
509 /*
510 * analyse the ll instruction that just caused a ri exception
511 * and put the referenced address to addr.
512 */
513
514 /* sign extend offset */
515 offset = opcode & OFFSET;
516 offset <<= 16;
517 offset >>= 16;
518
fe00f943 519 vaddr = (unsigned long __user *)
b9688310 520 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
1da177e4 521
60b0d655
MR
522 if ((unsigned long)vaddr & 3)
523 return SIGBUS;
524 if (get_user(value, vaddr))
525 return SIGSEGV;
1da177e4
LT
526
527 preempt_disable();
528
529 if (ll_task == NULL || ll_task == current) {
530 ll_bit = 1;
531 } else {
532 ll_bit = 0;
533 }
534 ll_task = current;
535
536 preempt_enable();
537
538 regs->regs[(opcode & RT) >> 16] = value;
539
60b0d655 540 return 0;
1da177e4
LT
541}
542
60b0d655 543static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
1da177e4 544{
fe00f943
RB
545 unsigned long __user *vaddr;
546 unsigned long reg;
1da177e4 547 long offset;
1da177e4
LT
548
549 /*
550 * analyse the sc instruction that just caused a ri exception
551 * and put the referenced address to addr.
552 */
553
554 /* sign extend offset */
555 offset = opcode & OFFSET;
556 offset <<= 16;
557 offset >>= 16;
558
fe00f943 559 vaddr = (unsigned long __user *)
b9688310 560 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
1da177e4
LT
561 reg = (opcode & RT) >> 16;
562
60b0d655
MR
563 if ((unsigned long)vaddr & 3)
564 return SIGBUS;
1da177e4
LT
565
566 preempt_disable();
567
568 if (ll_bit == 0 || ll_task != current) {
569 regs->regs[reg] = 0;
570 preempt_enable();
60b0d655 571 return 0;
1da177e4
LT
572 }
573
574 preempt_enable();
575
60b0d655
MR
576 if (put_user(regs->regs[reg], vaddr))
577 return SIGSEGV;
1da177e4
LT
578
579 regs->regs[reg] = 1;
580
60b0d655 581 return 0;
1da177e4
LT
582}
583
584/*
585 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
586 * opcodes are supposed to result in coprocessor unusable exceptions if
587 * executed on ll/sc-less processors. That's the theory. In practice a
588 * few processors such as NEC's VR4100 throw reserved instruction exceptions
589 * instead, so we're doing the emulation thing in both exception handlers.
590 */
60b0d655 591static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
1da177e4 592{
7f788d2d
DCZ
593 if ((opcode & OPCODE) == LL) {
594 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
a8b0ca17 595 1, regs, 0);
60b0d655 596 return simulate_ll(regs, opcode);
7f788d2d
DCZ
597 }
598 if ((opcode & OPCODE) == SC) {
599 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
a8b0ca17 600 1, regs, 0);
60b0d655 601 return simulate_sc(regs, opcode);
7f788d2d 602 }
1da177e4 603
60b0d655 604 return -1; /* Must be something else ... */
1da177e4
LT
605}
606
3c37026d
RB
607/*
608 * Simulate trapping 'rdhwr' instructions to provide user accessible
1f5826bd 609 * registers not implemented in hardware.
3c37026d 610 */
2a0b24f5 611static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
3c37026d 612{
dc8f6029 613 struct thread_info *ti = task_thread_info(current);
3c37026d 614
2a0b24f5
SH
615 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
616 1, regs, 0);
617 switch (rd) {
618 case 0: /* CPU number */
619 regs->regs[rt] = smp_processor_id();
620 return 0;
621 case 1: /* SYNCI length */
622 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
623 current_cpu_data.icache.linesz);
624 return 0;
625 case 2: /* Read count register */
626 regs->regs[rt] = read_c0_count();
627 return 0;
628 case 3: /* Count register resolution */
69f24d17 629 switch (current_cpu_type()) {
2a0b24f5
SH
630 case CPU_20KC:
631 case CPU_25KF:
632 regs->regs[rt] = 1;
633 break;
634 default:
635 regs->regs[rt] = 2;
636 }
637 return 0;
638 case 29:
639 regs->regs[rt] = ti->tp_value;
640 return 0;
641 default:
642 return -1;
643 }
644}
645
646static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
647{
3c37026d
RB
648 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
649 int rd = (opcode & RD) >> 11;
650 int rt = (opcode & RT) >> 16;
2a0b24f5
SH
651
652 simulate_rdhwr(regs, rd, rt);
653 return 0;
654 }
655
656 /* Not ours. */
657 return -1;
658}
659
660static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
661{
662 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
663 int rd = (opcode & MM_RS) >> 16;
664 int rt = (opcode & MM_RT) >> 21;
665 simulate_rdhwr(regs, rd, rt);
666 return 0;
3c37026d
RB
667 }
668
56ebd51b 669 /* Not ours. */
60b0d655
MR
670 return -1;
671}
e5679882 672
60b0d655
MR
673static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
674{
7f788d2d
DCZ
675 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
676 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
a8b0ca17 677 1, regs, 0);
60b0d655 678 return 0;
7f788d2d 679 }
60b0d655
MR
680
681 return -1; /* Must be something else ... */
3c37026d
RB
682}
683
1da177e4
LT
684asmlinkage void do_ov(struct pt_regs *regs)
685{
c3fc5cd5 686 enum ctx_state prev_state;
1da177e4
LT
687 siginfo_t info;
688
c3fc5cd5 689 prev_state = exception_enter();
36ccf1c0
RB
690 die_if_kernel("Integer overflow", regs);
691
1da177e4
LT
692 info.si_code = FPE_INTOVF;
693 info.si_signo = SIGFPE;
694 info.si_errno = 0;
fe00f943 695 info.si_addr = (void __user *) regs->cp0_epc;
1da177e4 696 force_sig_info(SIGFPE, &info, current);
c3fc5cd5 697 exception_exit(prev_state);
1da177e4
LT
698}
699
102cedc3 700int process_fpemu_return(int sig, void __user *fault_addr)
515b029d
DD
701{
702 if (sig == SIGSEGV || sig == SIGBUS) {
703 struct siginfo si = {0};
704 si.si_addr = fault_addr;
705 si.si_signo = sig;
706 if (sig == SIGSEGV) {
f7a89f1b 707 down_read(&current->mm->mmap_sem);
515b029d
DD
708 if (find_vma(current->mm, (unsigned long)fault_addr))
709 si.si_code = SEGV_ACCERR;
710 else
711 si.si_code = SEGV_MAPERR;
f7a89f1b 712 up_read(&current->mm->mmap_sem);
515b029d
DD
713 } else {
714 si.si_code = BUS_ADRERR;
715 }
716 force_sig_info(sig, &si, current);
717 return 1;
718 } else if (sig) {
719 force_sig(sig, current);
720 return 1;
721 } else {
722 return 0;
723 }
724}
725
1da177e4
LT
726/*
727 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
728 */
729asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
730{
c3fc5cd5 731 enum ctx_state prev_state;
515b029d 732 siginfo_t info = {0};
948a34cf 733
c3fc5cd5 734 prev_state = exception_enter();
dc73e4c1
RB
735 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
736 SIGFPE) == NOTIFY_STOP)
c3fc5cd5 737 goto out;
57725f9e
CD
738 die_if_kernel("FP exception in kernel code", regs);
739
1da177e4
LT
740 if (fcr31 & FPU_CSR_UNI_X) {
741 int sig;
515b029d 742 void __user *fault_addr = NULL;
1da177e4 743
1da177e4 744 /*
a3dddd56 745 * Unimplemented operation exception. If we've got the full
1da177e4
LT
746 * software emulator on-board, let's use it...
747 *
748 * Force FPU to dump state into task/thread context. We're
749 * moving a lot of data here for what is probably a single
750 * instruction, but the alternative is to pre-decode the FP
751 * register operands before invoking the emulator, which seems
752 * a bit extreme for what should be an infrequent event.
753 */
cd21dfcf 754 /* Ensure 'resume' not overwrite saved fp context again. */
53dc8028 755 lose_fpu(1);
1da177e4
LT
756
757 /* Run the emulator */
515b029d
DD
758 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
759 &fault_addr);
1da177e4
LT
760
761 /*
762 * We can't allow the emulated instruction to leave any of
763 * the cause bit set in $fcr31.
764 */
eae89076 765 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1da177e4
LT
766
767 /* Restore the hardware register state */
70342287 768 own_fpu(1); /* Using the FPU again. */
1da177e4
LT
769
770 /* If something went wrong, signal */
515b029d 771 process_fpemu_return(sig, fault_addr);
1da177e4 772
c3fc5cd5 773 goto out;
948a34cf
TS
774 } else if (fcr31 & FPU_CSR_INV_X)
775 info.si_code = FPE_FLTINV;
776 else if (fcr31 & FPU_CSR_DIV_X)
777 info.si_code = FPE_FLTDIV;
778 else if (fcr31 & FPU_CSR_OVF_X)
779 info.si_code = FPE_FLTOVF;
780 else if (fcr31 & FPU_CSR_UDF_X)
781 info.si_code = FPE_FLTUND;
782 else if (fcr31 & FPU_CSR_INE_X)
783 info.si_code = FPE_FLTRES;
784 else
785 info.si_code = __SI_FAULT;
786 info.si_signo = SIGFPE;
787 info.si_errno = 0;
788 info.si_addr = (void __user *) regs->cp0_epc;
789 force_sig_info(SIGFPE, &info, current);
c3fc5cd5
RB
790
791out:
792 exception_exit(prev_state);
1da177e4
LT
793}
794
df270051
RB
795static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
796 const char *str)
1da177e4 797{
1da177e4 798 siginfo_t info;
df270051 799 char b[40];
1da177e4 800
5dd11d5d 801#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
70dc6f04 802 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
5dd11d5d
JW
803 return;
804#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
805
dc73e4c1
RB
806 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
807 SIGTRAP) == NOTIFY_STOP)
88547001
JW
808 return;
809
1da177e4 810 /*
df270051
RB
811 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
812 * insns, even for trap and break codes that indicate arithmetic
813 * failures. Weird ...
1da177e4
LT
814 * But should we continue the brokenness??? --macro
815 */
df270051
RB
816 switch (code) {
817 case BRK_OVERFLOW:
818 case BRK_DIVZERO:
819 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
820 die_if_kernel(b, regs);
821 if (code == BRK_DIVZERO)
1da177e4
LT
822 info.si_code = FPE_INTDIV;
823 else
824 info.si_code = FPE_INTOVF;
825 info.si_signo = SIGFPE;
826 info.si_errno = 0;
fe00f943 827 info.si_addr = (void __user *) regs->cp0_epc;
1da177e4
LT
828 force_sig_info(SIGFPE, &info, current);
829 break;
63dc68a8 830 case BRK_BUG:
df270051
RB
831 die_if_kernel("Kernel bug detected", regs);
832 force_sig(SIGTRAP, current);
63dc68a8 833 break;
ba3049ed
RB
834 case BRK_MEMU:
835 /*
836 * Address errors may be deliberately induced by the FPU
837 * emulator to retake control of the CPU after executing the
838 * instruction in the delay slot of an emulated branch.
839 *
840 * Terminate if exception was recognized as a delay slot return
841 * otherwise handle as normal.
842 */
843 if (do_dsemulret(regs))
844 return;
845
846 die_if_kernel("Math emu break/trap", regs);
847 force_sig(SIGTRAP, current);
848 break;
1da177e4 849 default:
df270051
RB
850 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
851 die_if_kernel(b, regs);
1da177e4
LT
852 force_sig(SIGTRAP, current);
853 }
df270051
RB
854}
855
856asmlinkage void do_bp(struct pt_regs *regs)
857{
858 unsigned int opcode, bcode;
c3fc5cd5 859 enum ctx_state prev_state;
2a0b24f5
SH
860 unsigned long epc;
861 u16 instr[2];
078dde5e
LY
862 mm_segment_t seg;
863
864 seg = get_fs();
865 if (!user_mode(regs))
866 set_fs(KERNEL_DS);
2a0b24f5 867
c3fc5cd5 868 prev_state = exception_enter();
2a0b24f5
SH
869 if (get_isa16_mode(regs->cp0_epc)) {
870 /* Calculate EPC. */
871 epc = exception_epc(regs);
872 if (cpu_has_mmips) {
873 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
874 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
875 goto out_sigsegv;
b08a9c95 876 opcode = (instr[0] << 16) | instr[1];
2a0b24f5 877 } else {
b08a9c95
MC
878 /* MIPS16e mode */
879 if (__get_user(instr[0],
880 (u16 __user *)msk_isa16_mode(epc)))
2a0b24f5 881 goto out_sigsegv;
b08a9c95
MC
882 bcode = (instr[0] >> 6) & 0x3f;
883 do_trap_or_bp(regs, bcode, "Break");
884 goto out;
2a0b24f5
SH
885 }
886 } else {
b08a9c95
MC
887 if (__get_user(opcode,
888 (unsigned int __user *) exception_epc(regs)))
2a0b24f5
SH
889 goto out_sigsegv;
890 }
df270051
RB
891
892 /*
893 * There is the ancient bug in the MIPS assemblers that the break
894 * code starts left to bit 16 instead to bit 6 in the opcode.
895 * Gas is bug-compatible, but not always, grrr...
896 * We handle both cases with a simple heuristics. --macro
897 */
898 bcode = ((opcode >> 6) & ((1 << 20) - 1));
899 if (bcode >= (1 << 10))
900 bcode >>= 10;
901
c1bf207d
DD
902 /*
903 * notify the kprobe handlers, if instruction is likely to
904 * pertain to them.
905 */
906 switch (bcode) {
907 case BRK_KPROBE_BP:
dc73e4c1
RB
908 if (notify_die(DIE_BREAK, "debug", regs, bcode,
909 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
c3fc5cd5 910 goto out;
c1bf207d
DD
911 else
912 break;
913 case BRK_KPROBE_SSTEPBP:
dc73e4c1
RB
914 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
915 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
c3fc5cd5 916 goto out;
c1bf207d
DD
917 else
918 break;
919 default:
920 break;
921 }
922
df270051 923 do_trap_or_bp(regs, bcode, "Break");
c3fc5cd5
RB
924
925out:
078dde5e 926 set_fs(seg);
c3fc5cd5 927 exception_exit(prev_state);
90fccb13 928 return;
e5679882
RB
929
930out_sigsegv:
931 force_sig(SIGSEGV, current);
c3fc5cd5 932 goto out;
1da177e4
LT
933}
934
935asmlinkage void do_tr(struct pt_regs *regs)
936{
a9a6e7a0 937 u32 opcode, tcode = 0;
c3fc5cd5 938 enum ctx_state prev_state;
2a0b24f5 939 u16 instr[2];
078dde5e 940 mm_segment_t seg;
a9a6e7a0 941 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1da177e4 942
078dde5e
LY
943 seg = get_fs();
944 if (!user_mode(regs))
945 set_fs(get_ds());
946
c3fc5cd5 947 prev_state = exception_enter();
a9a6e7a0
MR
948 if (get_isa16_mode(regs->cp0_epc)) {
949 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
950 __get_user(instr[1], (u16 __user *)(epc + 2)))
2a0b24f5 951 goto out_sigsegv;
a9a6e7a0
MR
952 opcode = (instr[0] << 16) | instr[1];
953 /* Immediate versions don't provide a code. */
954 if (!(opcode & OPCODE))
955 tcode = (opcode >> 12) & ((1 << 4) - 1);
956 } else {
957 if (__get_user(opcode, (u32 __user *)epc))
958 goto out_sigsegv;
959 /* Immediate versions don't provide a code. */
960 if (!(opcode & OPCODE))
961 tcode = (opcode >> 6) & ((1 << 10) - 1);
2a0b24f5 962 }
1da177e4 963
df270051 964 do_trap_or_bp(regs, tcode, "Trap");
c3fc5cd5
RB
965
966out:
078dde5e 967 set_fs(seg);
c3fc5cd5 968 exception_exit(prev_state);
90fccb13 969 return;
e5679882
RB
970
971out_sigsegv:
972 force_sig(SIGSEGV, current);
c3fc5cd5 973 goto out;
1da177e4
LT
974}
975
976asmlinkage void do_ri(struct pt_regs *regs)
977{
60b0d655
MR
978 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
979 unsigned long old_epc = regs->cp0_epc;
2a0b24f5 980 unsigned long old31 = regs->regs[31];
c3fc5cd5 981 enum ctx_state prev_state;
60b0d655
MR
982 unsigned int opcode = 0;
983 int status = -1;
1da177e4 984
c3fc5cd5 985 prev_state = exception_enter();
dc73e4c1
RB
986 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
987 SIGILL) == NOTIFY_STOP)
c3fc5cd5 988 goto out;
88547001 989
60b0d655 990 die_if_kernel("Reserved instruction in kernel code", regs);
1da177e4 991
60b0d655 992 if (unlikely(compute_return_epc(regs) < 0))
c3fc5cd5 993 goto out;
3c37026d 994
2a0b24f5
SH
995 if (get_isa16_mode(regs->cp0_epc)) {
996 unsigned short mmop[2] = { 0 };
60b0d655 997
2a0b24f5
SH
998 if (unlikely(get_user(mmop[0], epc) < 0))
999 status = SIGSEGV;
1000 if (unlikely(get_user(mmop[1], epc) < 0))
1001 status = SIGSEGV;
1002 opcode = (mmop[0] << 16) | mmop[1];
60b0d655 1003
2a0b24f5
SH
1004 if (status < 0)
1005 status = simulate_rdhwr_mm(regs, opcode);
1006 } else {
1007 if (unlikely(get_user(opcode, epc) < 0))
1008 status = SIGSEGV;
60b0d655 1009
2a0b24f5
SH
1010 if (!cpu_has_llsc && status < 0)
1011 status = simulate_llsc(regs, opcode);
1012
1013 if (status < 0)
1014 status = simulate_rdhwr_normal(regs, opcode);
1015
1016 if (status < 0)
1017 status = simulate_sync(regs, opcode);
1018 }
60b0d655
MR
1019
1020 if (status < 0)
1021 status = SIGILL;
1022
1023 if (unlikely(status > 0)) {
1024 regs->cp0_epc = old_epc; /* Undo skip-over. */
2a0b24f5 1025 regs->regs[31] = old31;
60b0d655
MR
1026 force_sig(status, current);
1027 }
c3fc5cd5
RB
1028
1029out:
1030 exception_exit(prev_state);
1da177e4
LT
1031}
1032
d223a861
RB
1033/*
1034 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1035 * emulated more than some threshold number of instructions, force migration to
1036 * a "CPU" that has FP support.
1037 */
1038static void mt_ase_fp_affinity(void)
1039{
1040#ifdef CONFIG_MIPS_MT_FPAFF
1041 if (mt_fpemul_threshold > 0 &&
1042 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1043 /*
1044 * If there's no FPU present, or if the application has already
1045 * restricted the allowed set to exclude any CPUs with FPUs,
1046 * we'll skip the procedure.
1047 */
1048 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
1049 cpumask_t tmask;
1050
9cc12363
KK
1051 current->thread.user_cpus_allowed
1052 = current->cpus_allowed;
1053 cpus_and(tmask, current->cpus_allowed,
1054 mt_fpu_cpumask);
ed1bbdef 1055 set_cpus_allowed_ptr(current, &tmask);
293c5bd1 1056 set_thread_flag(TIF_FPUBOUND);
d223a861
RB
1057 }
1058 }
1059#endif /* CONFIG_MIPS_MT_FPAFF */
1060}
1061
69f3a7de
RB
1062/*
1063 * No lock; only written during early bootup by CPU 0.
1064 */
1065static RAW_NOTIFIER_HEAD(cu2_chain);
1066
1067int __ref register_cu2_notifier(struct notifier_block *nb)
1068{
1069 return raw_notifier_chain_register(&cu2_chain, nb);
1070}
1071
1072int cu2_notifier_call_chain(unsigned long val, void *v)
1073{
1074 return raw_notifier_call_chain(&cu2_chain, val, v);
1075}
1076
1077static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
70342287 1078 void *data)
69f3a7de
RB
1079{
1080 struct pt_regs *regs = data;
1081
83bee792 1082 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
69f3a7de 1083 "instruction", regs);
83bee792 1084 force_sig(SIGILL, current);
69f3a7de
RB
1085
1086 return NOTIFY_OK;
1087}
1088
1db1af84
PB
1089static int enable_restore_fp_context(int msa)
1090{
1091 int err, was_fpu_owner;
1092
1093 if (!used_math()) {
1094 /* First time FP context user. */
1095 err = init_fpu();
1096 if (msa && !err)
1097 enable_msa();
1098 if (!err)
1099 set_used_math();
1100 return err;
1101 }
1102
1103 /*
1104 * This task has formerly used the FP context.
1105 *
1106 * If this thread has no live MSA vector context then we can simply
1107 * restore the scalar FP context. If it has live MSA vector context
1108 * (that is, it has or may have used MSA since last performing a
1109 * function call) then we'll need to restore the vector context. This
1110 * applies even if we're currently only executing a scalar FP
1111 * instruction. This is because if we were to later execute an MSA
1112 * instruction then we'd either have to:
1113 *
1114 * - Restore the vector context & clobber any registers modified by
1115 * scalar FP instructions between now & then.
1116 *
1117 * or
1118 *
1119 * - Not restore the vector context & lose the most significant bits
1120 * of all vector registers.
1121 *
1122 * Neither of those options is acceptable. We cannot restore the least
1123 * significant bits of the registers now & only restore the most
1124 * significant bits later because the most significant bits of any
1125 * vector registers whose aliased FP register is modified now will have
1126 * been zeroed. We'd have no way to know that when restoring the vector
1127 * context & thus may load an outdated value for the most significant
1128 * bits of a vector register.
1129 */
1130 if (!msa && !thread_msa_context_live())
1131 return own_fpu(1);
1132
1133 /*
1134 * This task is using or has previously used MSA. Thus we require
1135 * that Status.FR == 1.
1136 */
1137 was_fpu_owner = is_fpu_owner();
1138 err = own_fpu(0);
1139 if (err)
1140 return err;
1141
1142 enable_msa();
1143 write_msa_csr(current->thread.fpu.msacsr);
1144 set_thread_flag(TIF_USEDMSA);
1145
1146 /*
1147 * If this is the first time that the task is using MSA and it has
1148 * previously used scalar FP in this time slice then we already nave
1149 * FP context which we shouldn't clobber.
1150 */
1151 if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
1152 return 0;
1153
1154 /* We need to restore the vector context. */
1155 restore_msa(current);
1156 return 0;
1157}
1158
1da177e4
LT
1159asmlinkage void do_cpu(struct pt_regs *regs)
1160{
c3fc5cd5 1161 enum ctx_state prev_state;
60b0d655 1162 unsigned int __user *epc;
2a0b24f5 1163 unsigned long old_epc, old31;
60b0d655 1164 unsigned int opcode;
1da177e4 1165 unsigned int cpid;
597ce172 1166 int status, err;
f9bb4cf3 1167 unsigned long __maybe_unused flags;
1da177e4 1168
c3fc5cd5 1169 prev_state = exception_enter();
1da177e4
LT
1170 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1171
83bee792
J
1172 if (cpid != 2)
1173 die_if_kernel("do_cpu invoked from kernel context!", regs);
1174
1da177e4
LT
1175 switch (cpid) {
1176 case 0:
60b0d655
MR
1177 epc = (unsigned int __user *)exception_epc(regs);
1178 old_epc = regs->cp0_epc;
2a0b24f5 1179 old31 = regs->regs[31];
60b0d655
MR
1180 opcode = 0;
1181 status = -1;
1da177e4 1182
60b0d655 1183 if (unlikely(compute_return_epc(regs) < 0))
c3fc5cd5 1184 goto out;
3c37026d 1185
2a0b24f5
SH
1186 if (get_isa16_mode(regs->cp0_epc)) {
1187 unsigned short mmop[2] = { 0 };
60b0d655 1188
2a0b24f5
SH
1189 if (unlikely(get_user(mmop[0], epc) < 0))
1190 status = SIGSEGV;
1191 if (unlikely(get_user(mmop[1], epc) < 0))
1192 status = SIGSEGV;
1193 opcode = (mmop[0] << 16) | mmop[1];
60b0d655 1194
2a0b24f5
SH
1195 if (status < 0)
1196 status = simulate_rdhwr_mm(regs, opcode);
1197 } else {
1198 if (unlikely(get_user(opcode, epc) < 0))
1199 status = SIGSEGV;
1200
1201 if (!cpu_has_llsc && status < 0)
1202 status = simulate_llsc(regs, opcode);
1203
1204 if (status < 0)
1205 status = simulate_rdhwr_normal(regs, opcode);
1206 }
60b0d655
MR
1207
1208 if (status < 0)
1209 status = SIGILL;
1210
1211 if (unlikely(status > 0)) {
1212 regs->cp0_epc = old_epc; /* Undo skip-over. */
2a0b24f5 1213 regs->regs[31] = old31;
60b0d655
MR
1214 force_sig(status, current);
1215 }
1216
c3fc5cd5 1217 goto out;
1da177e4 1218
051ff44a
MR
1219 case 3:
1220 /*
1221 * Old (MIPS I and MIPS II) processors will set this code
1222 * for COP1X opcode instructions that replaced the original
70342287 1223 * COP3 space. We don't limit COP1 space instructions in
051ff44a
MR
1224 * the emulator according to the CPU ISA, so we want to
1225 * treat COP1X instructions consistently regardless of which
70342287 1226 * code the CPU chose. Therefore we redirect this trap to
051ff44a
MR
1227 * the FP emulator too.
1228 *
1229 * Then some newer FPU-less processors use this code
1230 * erroneously too, so they are covered by this choice
1231 * as well.
1232 */
1233 if (raw_cpu_has_fpu)
1234 break;
1235 /* Fall through. */
1236
1da177e4 1237 case 1:
1db1af84 1238 err = enable_restore_fp_context(0);
1da177e4 1239
597ce172 1240 if (!raw_cpu_has_fpu || err) {
e04582b7 1241 int sig;
515b029d 1242 void __user *fault_addr = NULL;
e04582b7 1243 sig = fpu_emulator_cop1Handler(regs,
515b029d
DD
1244 &current->thread.fpu,
1245 0, &fault_addr);
597ce172 1246 if (!process_fpemu_return(sig, fault_addr) && !err)
d223a861 1247 mt_ase_fp_affinity();
1da177e4
LT
1248 }
1249
c3fc5cd5 1250 goto out;
1da177e4
LT
1251
1252 case 2:
69f3a7de 1253 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
c3fc5cd5 1254 goto out;
1da177e4
LT
1255 }
1256
1257 force_sig(SIGILL, current);
c3fc5cd5
RB
1258
1259out:
1260 exception_exit(prev_state);
1da177e4
LT
1261}
1262
2bcb3fbc
PB
1263asmlinkage void do_msa_fpe(struct pt_regs *regs)
1264{
1265 enum ctx_state prev_state;
1266
1267 prev_state = exception_enter();
1268 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1269 force_sig(SIGFPE, current);
1270 exception_exit(prev_state);
1271}
1272
1db1af84
PB
1273asmlinkage void do_msa(struct pt_regs *regs)
1274{
1275 enum ctx_state prev_state;
1276 int err;
1277
1278 prev_state = exception_enter();
1279
1280 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1281 force_sig(SIGILL, current);
1282 goto out;
1283 }
1284
1285 die_if_kernel("do_msa invoked from kernel context!", regs);
1286
1287 err = enable_restore_fp_context(1);
1288 if (err)
1289 force_sig(SIGILL, current);
1290out:
1291 exception_exit(prev_state);
1292}
1293
1da177e4
LT
1294asmlinkage void do_mdmx(struct pt_regs *regs)
1295{
c3fc5cd5
RB
1296 enum ctx_state prev_state;
1297
1298 prev_state = exception_enter();
1da177e4 1299 force_sig(SIGILL, current);
c3fc5cd5 1300 exception_exit(prev_state);
1da177e4
LT
1301}
1302
8bc6d05b
DD
1303/*
1304 * Called with interrupts disabled.
1305 */
1da177e4
LT
1306asmlinkage void do_watch(struct pt_regs *regs)
1307{
c3fc5cd5 1308 enum ctx_state prev_state;
b67b2b70
DD
1309 u32 cause;
1310
c3fc5cd5 1311 prev_state = exception_enter();
1da177e4 1312 /*
b67b2b70
DD
1313 * Clear WP (bit 22) bit of cause register so we don't loop
1314 * forever.
1da177e4 1315 */
b67b2b70
DD
1316 cause = read_c0_cause();
1317 cause &= ~(1 << 22);
1318 write_c0_cause(cause);
1319
1320 /*
1321 * If the current thread has the watch registers loaded, save
1322 * their values and send SIGTRAP. Otherwise another thread
1323 * left the registers set, clear them and continue.
1324 */
1325 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1326 mips_read_watch_registers();
8bc6d05b 1327 local_irq_enable();
b67b2b70 1328 force_sig(SIGTRAP, current);
8bc6d05b 1329 } else {
b67b2b70 1330 mips_clear_watch_registers();
8bc6d05b
DD
1331 local_irq_enable();
1332 }
c3fc5cd5 1333 exception_exit(prev_state);
1da177e4
LT
1334}
1335
1336asmlinkage void do_mcheck(struct pt_regs *regs)
1337{
cac4bcbc
RB
1338 const int field = 2 * sizeof(unsigned long);
1339 int multi_match = regs->cp0_status & ST0_TS;
c3fc5cd5 1340 enum ctx_state prev_state;
cac4bcbc 1341
c3fc5cd5 1342 prev_state = exception_enter();
1da177e4 1343 show_regs(regs);
cac4bcbc
RB
1344
1345 if (multi_match) {
70342287 1346 printk("Index : %0x\n", read_c0_index());
cac4bcbc
RB
1347 printk("Pagemask: %0x\n", read_c0_pagemask());
1348 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
1349 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1350 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1351 printk("\n");
1352 dump_tlb_all();
1353 }
1354
e1bb8289 1355 show_code((unsigned int __user *) regs->cp0_epc);
cac4bcbc 1356
1da177e4
LT
1357 /*
1358 * Some chips may have other causes of machine check (e.g. SB1
1359 * graduation timer)
1360 */
1361 panic("Caught Machine Check exception - %scaused by multiple "
1362 "matching entries in the TLB.",
cac4bcbc 1363 (multi_match) ? "" : "not ");
1da177e4
LT
1364}
1365
340ee4b9
RB
1366asmlinkage void do_mt(struct pt_regs *regs)
1367{
41c594ab
RB
1368 int subcode;
1369
41c594ab
RB
1370 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1371 >> VPECONTROL_EXCPT_SHIFT;
1372 switch (subcode) {
1373 case 0:
e35a5e35 1374 printk(KERN_DEBUG "Thread Underflow\n");
41c594ab
RB
1375 break;
1376 case 1:
e35a5e35 1377 printk(KERN_DEBUG "Thread Overflow\n");
41c594ab
RB
1378 break;
1379 case 2:
e35a5e35 1380 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
41c594ab
RB
1381 break;
1382 case 3:
e35a5e35 1383 printk(KERN_DEBUG "Gating Storage Exception\n");
41c594ab
RB
1384 break;
1385 case 4:
e35a5e35 1386 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
41c594ab
RB
1387 break;
1388 case 5:
f232c7e8 1389 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
41c594ab
RB
1390 break;
1391 default:
e35a5e35 1392 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
41c594ab
RB
1393 subcode);
1394 break;
1395 }
340ee4b9
RB
1396 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1397
1398 force_sig(SIGILL, current);
1399}
1400
1401
e50c0a8f
RB
1402asmlinkage void do_dsp(struct pt_regs *regs)
1403{
1404 if (cpu_has_dsp)
ab75dc02 1405 panic("Unexpected DSP exception");
e50c0a8f
RB
1406
1407 force_sig(SIGILL, current);
1408}
1409
1da177e4
LT
1410asmlinkage void do_reserved(struct pt_regs *regs)
1411{
1412 /*
70342287 1413 * Game over - no way to handle this if it ever occurs. Most probably
1da177e4
LT
1414 * caused by a new unknown cpu type or after another deadly
1415 * hard/software error.
1416 */
1417 show_regs(regs);
1418 panic("Caught reserved exception %ld - should not happen.",
1419 (regs->cp0_cause & 0x7f) >> 2);
1420}
1421
39b8d525
RB
1422static int __initdata l1parity = 1;
1423static int __init nol1parity(char *s)
1424{
1425 l1parity = 0;
1426 return 1;
1427}
1428__setup("nol1par", nol1parity);
1429static int __initdata l2parity = 1;
1430static int __init nol2parity(char *s)
1431{
1432 l2parity = 0;
1433 return 1;
1434}
1435__setup("nol2par", nol2parity);
1436
1da177e4
LT
1437/*
1438 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1439 * it different ways.
1440 */
1441static inline void parity_protection_init(void)
1442{
10cc3529 1443 switch (current_cpu_type()) {
1da177e4 1444 case CPU_24K:
98a41de9 1445 case CPU_34K:
39b8d525
RB
1446 case CPU_74K:
1447 case CPU_1004K:
442e14a2 1448 case CPU_1074K:
26ab96df 1449 case CPU_INTERAPTIV:
708ac4b8 1450 case CPU_PROAPTIV:
aced4cbd 1451 case CPU_P5600:
39b8d525
RB
1452 {
1453#define ERRCTL_PE 0x80000000
1454#define ERRCTL_L2P 0x00800000
1455 unsigned long errctl;
1456 unsigned int l1parity_present, l2parity_present;
1457
1458 errctl = read_c0_ecc();
1459 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1460
1461 /* probe L1 parity support */
1462 write_c0_ecc(errctl | ERRCTL_PE);
1463 back_to_back_c0_hazard();
1464 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1465
1466 /* probe L2 parity support */
1467 write_c0_ecc(errctl|ERRCTL_L2P);
1468 back_to_back_c0_hazard();
1469 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1470
1471 if (l1parity_present && l2parity_present) {
1472 if (l1parity)
1473 errctl |= ERRCTL_PE;
1474 if (l1parity ^ l2parity)
1475 errctl |= ERRCTL_L2P;
1476 } else if (l1parity_present) {
1477 if (l1parity)
1478 errctl |= ERRCTL_PE;
1479 } else if (l2parity_present) {
1480 if (l2parity)
1481 errctl |= ERRCTL_L2P;
1482 } else {
1483 /* No parity available */
1484 }
1485
1486 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1487
1488 write_c0_ecc(errctl);
1489 back_to_back_c0_hazard();
1490 errctl = read_c0_ecc();
1491 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1492
1493 if (l1parity_present)
1494 printk(KERN_INFO "Cache parity protection %sabled\n",
1495 (errctl & ERRCTL_PE) ? "en" : "dis");
1496
1497 if (l2parity_present) {
1498 if (l1parity_present && l1parity)
1499 errctl ^= ERRCTL_L2P;
1500 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1501 (errctl & ERRCTL_L2P) ? "en" : "dis");
1502 }
1503 }
1504 break;
1505
1da177e4 1506 case CPU_5KC:
78d4803f 1507 case CPU_5KE:
2fa36399 1508 case CPU_LOONGSON1:
14f18b7f
RB
1509 write_c0_ecc(0x80000000);
1510 back_to_back_c0_hazard();
1511 /* Set the PE bit (bit 31) in the c0_errctl register. */
1512 printk(KERN_INFO "Cache parity protection %sabled\n",
1513 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1da177e4
LT
1514 break;
1515 case CPU_20KC:
1516 case CPU_25KF:
1517 /* Clear the DE bit (bit 16) in the c0_status register. */
1518 printk(KERN_INFO "Enable cache parity protection for "
1519 "MIPS 20KC/25KF CPUs.\n");
1520 clear_c0_status(ST0_DE);
1521 break;
1522 default:
1523 break;
1524 }
1525}
1526
1527asmlinkage void cache_parity_error(void)
1528{
1529 const int field = 2 * sizeof(unsigned long);
1530 unsigned int reg_val;
1531
1532 /* For the moment, report the problem and hang. */
1533 printk("Cache error exception:\n");
1534 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1535 reg_val = read_c0_cacheerr();
1536 printk("c0_cacheerr == %08x\n", reg_val);
1537
1538 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1539 reg_val & (1<<30) ? "secondary" : "primary",
1540 reg_val & (1<<31) ? "data" : "insn");
6de20451 1541 if (cpu_has_mips_r2 &&
721a9205 1542 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
6de20451
LY
1543 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1544 reg_val & (1<<29) ? "ED " : "",
1545 reg_val & (1<<28) ? "ET " : "",
1546 reg_val & (1<<27) ? "ES " : "",
1547 reg_val & (1<<26) ? "EE " : "",
1548 reg_val & (1<<25) ? "EB " : "",
1549 reg_val & (1<<24) ? "EI " : "",
1550 reg_val & (1<<23) ? "E1 " : "",
1551 reg_val & (1<<22) ? "E0 " : "");
1552 } else {
1553 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1554 reg_val & (1<<29) ? "ED " : "",
1555 reg_val & (1<<28) ? "ET " : "",
1556 reg_val & (1<<26) ? "EE " : "",
1557 reg_val & (1<<25) ? "EB " : "",
1558 reg_val & (1<<24) ? "EI " : "",
1559 reg_val & (1<<23) ? "E1 " : "",
1560 reg_val & (1<<22) ? "E0 " : "");
1561 }
1da177e4
LT
1562 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1563
ec917c2c 1564#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1da177e4
LT
1565 if (reg_val & (1<<22))
1566 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1567
1568 if (reg_val & (1<<23))
1569 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1570#endif
1571
1572 panic("Can't handle the cache error!");
1573}
1574
75b5b5e0
LY
1575asmlinkage void do_ftlb(void)
1576{
1577 const int field = 2 * sizeof(unsigned long);
1578 unsigned int reg_val;
1579
1580 /* For the moment, report the problem and hang. */
1581 if (cpu_has_mips_r2 &&
721a9205 1582 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
75b5b5e0
LY
1583 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1584 read_c0_ecc());
1585 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1586 reg_val = read_c0_cacheerr();
1587 pr_err("c0_cacheerr == %08x\n", reg_val);
1588
1589 if ((reg_val & 0xc0000000) == 0xc0000000) {
1590 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1591 } else {
1592 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1593 reg_val & (1<<30) ? "secondary" : "primary",
1594 reg_val & (1<<31) ? "data" : "insn");
1595 }
1596 } else {
1597 pr_err("FTLB error exception\n");
1598 }
1599 /* Just print the cacheerr bits for now */
1600 cache_parity_error();
1601}
1602
1da177e4
LT
1603/*
1604 * SDBBP EJTAG debug exception handler.
1605 * We skip the instruction and return to the next instruction.
1606 */
1607void ejtag_exception_handler(struct pt_regs *regs)
1608{
1609 const int field = 2 * sizeof(unsigned long);
2a0b24f5 1610 unsigned long depc, old_epc, old_ra;
1da177e4
LT
1611 unsigned int debug;
1612
70ae6126 1613 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1da177e4
LT
1614 depc = read_c0_depc();
1615 debug = read_c0_debug();
70ae6126 1616 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1da177e4
LT
1617 if (debug & 0x80000000) {
1618 /*
1619 * In branch delay slot.
1620 * We cheat a little bit here and use EPC to calculate the
1621 * debug return address (DEPC). EPC is restored after the
1622 * calculation.
1623 */
1624 old_epc = regs->cp0_epc;
2a0b24f5 1625 old_ra = regs->regs[31];
1da177e4 1626 regs->cp0_epc = depc;
2a0b24f5 1627 compute_return_epc(regs);
1da177e4
LT
1628 depc = regs->cp0_epc;
1629 regs->cp0_epc = old_epc;
2a0b24f5 1630 regs->regs[31] = old_ra;
1da177e4
LT
1631 } else
1632 depc += 4;
1633 write_c0_depc(depc);
1634
1635#if 0
70ae6126 1636 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1da177e4
LT
1637 write_c0_debug(debug | 0x100);
1638#endif
1639}
1640
1641/*
1642 * NMI exception handler.
34bd92e2 1643 * No lock; only written during early bootup by CPU 0.
1da177e4 1644 */
34bd92e2
KC
1645static RAW_NOTIFIER_HEAD(nmi_chain);
1646
1647int register_nmi_notifier(struct notifier_block *nb)
1648{
1649 return raw_notifier_chain_register(&nmi_chain, nb);
1650}
1651
ff2d8b19 1652void __noreturn nmi_exception_handler(struct pt_regs *regs)
1da177e4 1653{
83e4da1e
LY
1654 char str[100];
1655
34bd92e2 1656 raw_notifier_call_chain(&nmi_chain, 0, regs);
41c594ab 1657 bust_spinlocks(1);
83e4da1e
LY
1658 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1659 smp_processor_id(), regs->cp0_epc);
1660 regs->cp0_epc = read_c0_errorepc();
1661 die(str, regs);
1da177e4
LT
1662}
1663
e01402b1
RB
1664#define VECTORSPACING 0x100 /* for EI/VI mode */
1665
1666unsigned long ebase;
1da177e4 1667unsigned long exception_handlers[32];
e01402b1 1668unsigned long vi_handlers[64];
1da177e4 1669
2d1b6e95 1670void __init *set_except_vector(int n, void *addr)
1da177e4
LT
1671{
1672 unsigned long handler = (unsigned long) addr;
b22d1b6a 1673 unsigned long old_handler;
1da177e4 1674
2a0b24f5
SH
1675#ifdef CONFIG_CPU_MICROMIPS
1676 /*
1677 * Only the TLB handlers are cache aligned with an even
1678 * address. All other handlers are on an odd address and
1679 * require no modification. Otherwise, MIPS32 mode will
1680 * be entered when handling any TLB exceptions. That
1681 * would be bad...since we must stay in microMIPS mode.
1682 */
1683 if (!(handler & 0x1))
1684 handler |= 1;
1685#endif
b22d1b6a 1686 old_handler = xchg(&exception_handlers[n], handler);
1da177e4 1687
1da177e4 1688 if (n == 0 && cpu_has_divec) {
2a0b24f5
SH
1689#ifdef CONFIG_CPU_MICROMIPS
1690 unsigned long jump_mask = ~((1 << 27) - 1);
1691#else
92bbe1b9 1692 unsigned long jump_mask = ~((1 << 28) - 1);
2a0b24f5 1693#endif
92bbe1b9
FF
1694 u32 *buf = (u32 *)(ebase + 0x200);
1695 unsigned int k0 = 26;
1696 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1697 uasm_i_j(&buf, handler & ~jump_mask);
1698 uasm_i_nop(&buf);
1699 } else {
1700 UASM_i_LA(&buf, k0, handler);
1701 uasm_i_jr(&buf, k0);
1702 uasm_i_nop(&buf);
1703 }
1704 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
e01402b1
RB
1705 }
1706 return (void *)old_handler;
1707}
1708
86a1708a 1709static void do_default_vi(void)
6ba07e59
AN
1710{
1711 show_regs(get_irq_regs());
1712 panic("Caught unexpected vectored interrupt.");
1713}
1714
ef300e42 1715static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
e01402b1
RB
1716{
1717 unsigned long handler;
1718 unsigned long old_handler = vi_handlers[n];
f6771dbb 1719 int srssets = current_cpu_data.srsets;
2a0b24f5 1720 u16 *h;
e01402b1
RB
1721 unsigned char *b;
1722
b72b7092 1723 BUG_ON(!cpu_has_veic && !cpu_has_vint);
e01402b1
RB
1724
1725 if (addr == NULL) {
1726 handler = (unsigned long) do_default_vi;
1727 srs = 0;
41c594ab 1728 } else
e01402b1 1729 handler = (unsigned long) addr;
2a0b24f5 1730 vi_handlers[n] = handler;
e01402b1
RB
1731
1732 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1733
f6771dbb 1734 if (srs >= srssets)
e01402b1
RB
1735 panic("Shadow register set %d not supported", srs);
1736
1737 if (cpu_has_veic) {
1738 if (board_bind_eic_interrupt)
49a89efb 1739 board_bind_eic_interrupt(n, srs);
41c594ab 1740 } else if (cpu_has_vint) {
e01402b1 1741 /* SRSMap is only defined if shadow sets are implemented */
f6771dbb 1742 if (srssets > 1)
49a89efb 1743 change_c0_srsmap(0xf << n*4, srs << n*4);
e01402b1
RB
1744 }
1745
1746 if (srs == 0) {
1747 /*
1748 * If no shadow set is selected then use the default handler
2a0b24f5 1749 * that does normal register saving and standard interrupt exit
e01402b1 1750 */
e01402b1
RB
1751 extern char except_vec_vi, except_vec_vi_lui;
1752 extern char except_vec_vi_ori, except_vec_vi_end;
c65a5480 1753 extern char rollback_except_vec_vi;
f94d9a8e 1754 char *vec_start = using_rollback_handler() ?
c65a5480 1755 &rollback_except_vec_vi : &except_vec_vi;
2a0b24f5
SH
1756#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1757 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1758 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1759#else
c65a5480
AN
1760 const int lui_offset = &except_vec_vi_lui - vec_start;
1761 const int ori_offset = &except_vec_vi_ori - vec_start;
2a0b24f5
SH
1762#endif
1763 const int handler_len = &except_vec_vi_end - vec_start;
e01402b1
RB
1764
1765 if (handler_len > VECTORSPACING) {
1766 /*
1767 * Sigh... panicing won't help as the console
1768 * is probably not configured :(
1769 */
49a89efb 1770 panic("VECTORSPACING too small");
e01402b1
RB
1771 }
1772
2a0b24f5
SH
1773 set_handler(((unsigned long)b - ebase), vec_start,
1774#ifdef CONFIG_CPU_MICROMIPS
1775 (handler_len - 1));
1776#else
1777 handler_len);
1778#endif
2a0b24f5
SH
1779 h = (u16 *)(b + lui_offset);
1780 *h = (handler >> 16) & 0xffff;
1781 h = (u16 *)(b + ori_offset);
1782 *h = (handler & 0xffff);
e0cee3ee
TB
1783 local_flush_icache_range((unsigned long)b,
1784 (unsigned long)(b+handler_len));
e01402b1
RB
1785 }
1786 else {
1787 /*
2a0b24f5
SH
1788 * In other cases jump directly to the interrupt handler. It
1789 * is the handler's responsibility to save registers if required
1790 * (eg hi/lo) and return from the exception using "eret".
e01402b1 1791 */
2a0b24f5
SH
1792 u32 insn;
1793
1794 h = (u16 *)b;
1795 /* j handler */
1796#ifdef CONFIG_CPU_MICROMIPS
1797 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1798#else
1799 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1800#endif
1801 h[0] = (insn >> 16) & 0xffff;
1802 h[1] = insn & 0xffff;
1803 h[2] = 0;
1804 h[3] = 0;
e0cee3ee
TB
1805 local_flush_icache_range((unsigned long)b,
1806 (unsigned long)(b+8));
1da177e4 1807 }
e01402b1 1808
1da177e4
LT
1809 return (void *)old_handler;
1810}
1811
ef300e42 1812void *set_vi_handler(int n, vi_handler_t addr)
e01402b1 1813{
ff3eab2a 1814 return set_vi_srs_handler(n, addr, 0);
e01402b1 1815}
f41ae0b2 1816
1da177e4
LT
1817extern void tlb_init(void);
1818
42f77542
RB
1819/*
1820 * Timer interrupt
1821 */
1822int cp0_compare_irq;
68b6352c 1823EXPORT_SYMBOL_GPL(cp0_compare_irq);
010c108d 1824int cp0_compare_irq_shift;
42f77542
RB
1825
1826/*
1827 * Performance counter IRQ or -1 if shared with timer
1828 */
1829int cp0_perfcount_irq;
1830EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1831
078a55fc 1832static int noulri;
bdc94eb4
CD
1833
1834static int __init ulri_disable(char *s)
1835{
1836 pr_info("Disabling ulri\n");
1837 noulri = 1;
1838
1839 return 1;
1840}
1841__setup("noulri", ulri_disable);
1842
ae4ce454
JH
1843/* configure STATUS register */
1844static void configure_status(void)
1da177e4 1845{
1da177e4
LT
1846 /*
1847 * Disable coprocessors and select 32-bit or 64-bit addressing
1848 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1849 * flag that some firmware may have left set and the TS bit (for
1850 * IP27). Set XX for ISA IV code to work.
1851 */
ae4ce454 1852 unsigned int status_set = ST0_CU0;
875d43e7 1853#ifdef CONFIG_64BIT
1da177e4
LT
1854 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1855#endif
adb37892 1856 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1da177e4 1857 status_set |= ST0_XX;
bbaf238b
CD
1858 if (cpu_has_dsp)
1859 status_set |= ST0_MX;
1860
b38c7399 1861 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1da177e4 1862 status_set);
ae4ce454
JH
1863}
1864
1865/* configure HWRENA register */
1866static void configure_hwrena(void)
1867{
1868 unsigned int hwrena = cpu_hwrena_impl_bits;
1da177e4 1869
18d693b3
KC
1870 if (cpu_has_mips_r2)
1871 hwrena |= 0x0000000f;
a3692020 1872
18d693b3
KC
1873 if (!noulri && cpu_has_userlocal)
1874 hwrena |= (1 << 29);
a3692020 1875
18d693b3
KC
1876 if (hwrena)
1877 write_c0_hwrena(hwrena);
ae4ce454 1878}
e01402b1 1879
ae4ce454
JH
1880static void configure_exception_vector(void)
1881{
e01402b1 1882 if (cpu_has_veic || cpu_has_vint) {
9fb4c2b9 1883 unsigned long sr = set_c0_status(ST0_BEV);
49a89efb 1884 write_c0_ebase(ebase);
9fb4c2b9 1885 write_c0_status(sr);
e01402b1 1886 /* Setting vector spacing enables EI/VI mode */
49a89efb 1887 change_c0_intctl(0x3e0, VECTORSPACING);
e01402b1 1888 }
d03d0a57
RB
1889 if (cpu_has_divec) {
1890 if (cpu_has_mipsmt) {
1891 unsigned int vpflags = dvpe();
1892 set_c0_cause(CAUSEF_IV);
1893 evpe(vpflags);
1894 } else
1895 set_c0_cause(CAUSEF_IV);
1896 }
ae4ce454
JH
1897}
1898
1899void per_cpu_trap_init(bool is_boot_cpu)
1900{
1901 unsigned int cpu = smp_processor_id();
ae4ce454
JH
1902
1903 configure_status();
1904 configure_hwrena();
1905
ae4ce454 1906 configure_exception_vector();
3b1d4ed5
RB
1907
1908 /*
1909 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1910 *
1911 * o read IntCtl.IPTI to determine the timer interrupt
1912 * o read IntCtl.IPPCI to determine the performance counter interrupt
1913 */
1914 if (cpu_has_mips_r2) {
010c108d
DV
1915 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
1916 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
1917 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
c3e838a2 1918 if (cp0_perfcount_irq == cp0_compare_irq)
3b1d4ed5 1919 cp0_perfcount_irq = -1;
c3e838a2
CD
1920 } else {
1921 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
c6a4ebb9 1922 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
c3e838a2 1923 cp0_perfcount_irq = -1;
3b1d4ed5
RB
1924 }
1925
48c4ac97
DD
1926 if (!cpu_data[cpu].asid_cache)
1927 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1da177e4
LT
1928
1929 atomic_inc(&init_mm.mm_count);
1930 current->active_mm = &init_mm;
1931 BUG_ON(current->mm);
1932 enter_lazy_tlb(&init_mm, current);
1933
6650df3c
DD
1934 /* Boot CPU's cache setup in setup_arch(). */
1935 if (!is_boot_cpu)
1936 cpu_cache_init();
41c594ab 1937 tlb_init();
3d8bfdd0 1938 TLBMISS_HANDLER_SETUP();
1da177e4
LT
1939}
1940
e01402b1 1941/* Install CPU exception handler */
078a55fc 1942void set_handler(unsigned long offset, void *addr, unsigned long size)
e01402b1 1943{
2a0b24f5
SH
1944#ifdef CONFIG_CPU_MICROMIPS
1945 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
1946#else
e01402b1 1947 memcpy((void *)(ebase + offset), addr, size);
2a0b24f5 1948#endif
e0cee3ee 1949 local_flush_icache_range(ebase + offset, ebase + offset + size);
e01402b1
RB
1950}
1951
078a55fc 1952static char panic_null_cerr[] =
641e97f3
RB
1953 "Trying to set NULL cache error exception handler";
1954
42fe7ee3
RB
1955/*
1956 * Install uncached CPU exception handler.
1957 * This is suitable only for the cache error exception which is the only
1958 * exception handler that is being run uncached.
1959 */
078a55fc 1960void set_uncached_handler(unsigned long offset, void *addr,
234fcd14 1961 unsigned long size)
e01402b1 1962{
4f81b01a 1963 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
e01402b1 1964
641e97f3
RB
1965 if (!addr)
1966 panic(panic_null_cerr);
1967
e01402b1
RB
1968 memcpy((void *)(uncached_ebase + offset), addr, size);
1969}
1970
5b10496b
AN
1971static int __initdata rdhwr_noopt;
1972static int __init set_rdhwr_noopt(char *str)
1973{
1974 rdhwr_noopt = 1;
1975 return 1;
1976}
1977
1978__setup("rdhwr_noopt", set_rdhwr_noopt);
1979
1da177e4
LT
1980void __init trap_init(void)
1981{
2a0b24f5 1982 extern char except_vec3_generic;
1da177e4 1983 extern char except_vec4;
2a0b24f5 1984 extern char except_vec3_r4000;
1da177e4 1985 unsigned long i;
c65a5480
AN
1986
1987 check_wait();
1da177e4 1988
88547001
JW
1989#if defined(CONFIG_KGDB)
1990 if (kgdb_early_setup)
70342287 1991 return; /* Already done */
88547001
JW
1992#endif
1993
9fb4c2b9
CD
1994 if (cpu_has_veic || cpu_has_vint) {
1995 unsigned long size = 0x200 + VECTORSPACING*64;
1996 ebase = (unsigned long)
1997 __alloc_bootmem(size, 1 << fls(size), 0);
1998 } else {
9843b030
SL
1999#ifdef CONFIG_KVM_GUEST
2000#define KVM_GUEST_KSEG0 0x40000000
2001 ebase = KVM_GUEST_KSEG0;
2002#else
2003 ebase = CKSEG0;
2004#endif
566f74f6
DD
2005 if (cpu_has_mips_r2)
2006 ebase += (read_c0_ebase() & 0x3ffff000);
2007 }
e01402b1 2008
c6213c6c
SH
2009 if (cpu_has_mmips) {
2010 unsigned int config3 = read_c0_config3();
2011
2012 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2013 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2014 else
2015 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2016 }
2017
6fb97eff
KC
2018 if (board_ebase_setup)
2019 board_ebase_setup();
6650df3c 2020 per_cpu_trap_init(true);
1da177e4
LT
2021
2022 /*
2023 * Copy the generic exception handlers to their final destination.
2024 * This will be overriden later as suitable for a particular
2025 * configuration.
2026 */
e01402b1 2027 set_handler(0x180, &except_vec3_generic, 0x80);
1da177e4
LT
2028
2029 /*
2030 * Setup default vectors
2031 */
2032 for (i = 0; i <= 31; i++)
2033 set_except_vector(i, handle_reserved);
2034
2035 /*
2036 * Copy the EJTAG debug exception vector handler code to it's final
2037 * destination.
2038 */
e01402b1 2039 if (cpu_has_ejtag && board_ejtag_handler_setup)
49a89efb 2040 board_ejtag_handler_setup();
1da177e4
LT
2041
2042 /*
2043 * Only some CPUs have the watch exceptions.
2044 */
2045 if (cpu_has_watch)
2046 set_except_vector(23, handle_watch);
2047
2048 /*
e01402b1 2049 * Initialise interrupt handlers
1da177e4 2050 */
e01402b1
RB
2051 if (cpu_has_veic || cpu_has_vint) {
2052 int nvec = cpu_has_veic ? 64 : 8;
2053 for (i = 0; i < nvec; i++)
ff3eab2a 2054 set_vi_handler(i, NULL);
e01402b1
RB
2055 }
2056 else if (cpu_has_divec)
2057 set_handler(0x200, &except_vec4, 0x8);
1da177e4
LT
2058
2059 /*
2060 * Some CPUs can enable/disable for cache parity detection, but does
2061 * it different ways.
2062 */
2063 parity_protection_init();
2064
2065 /*
2066 * The Data Bus Errors / Instruction Bus Errors are signaled
2067 * by external hardware. Therefore these two exceptions
2068 * may have board specific handlers.
2069 */
2070 if (board_be_init)
2071 board_be_init();
2072
f94d9a8e
RB
2073 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2074 : handle_int);
1da177e4
LT
2075 set_except_vector(1, handle_tlbm);
2076 set_except_vector(2, handle_tlbl);
2077 set_except_vector(3, handle_tlbs);
2078
2079 set_except_vector(4, handle_adel);
2080 set_except_vector(5, handle_ades);
2081
2082 set_except_vector(6, handle_ibe);
2083 set_except_vector(7, handle_dbe);
2084
2085 set_except_vector(8, handle_sys);
2086 set_except_vector(9, handle_bp);
5b10496b
AN
2087 set_except_vector(10, rdhwr_noopt ? handle_ri :
2088 (cpu_has_vtag_icache ?
2089 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1da177e4
LT
2090 set_except_vector(11, handle_cpu);
2091 set_except_vector(12, handle_ov);
2092 set_except_vector(13, handle_tr);
2bcb3fbc 2093 set_except_vector(14, handle_msa_fpe);
1da177e4 2094
10cc3529
RB
2095 if (current_cpu_type() == CPU_R6000 ||
2096 current_cpu_type() == CPU_R6000A) {
1da177e4
LT
2097 /*
2098 * The R6000 is the only R-series CPU that features a machine
2099 * check exception (similar to the R4000 cache error) and
2100 * unaligned ldc1/sdc1 exception. The handlers have not been
70342287 2101 * written yet. Well, anyway there is no R6000 machine on the
1da177e4
LT
2102 * current list of targets for Linux/MIPS.
2103 * (Duh, crap, there is someone with a triple R6k machine)
2104 */
2105 //set_except_vector(14, handle_mc);
2106 //set_except_vector(15, handle_ndc);
2107 }
2108
e01402b1
RB
2109
2110 if (board_nmi_handler_setup)
2111 board_nmi_handler_setup();
2112
e50c0a8f
RB
2113 if (cpu_has_fpu && !cpu_has_nofpuex)
2114 set_except_vector(15, handle_fpe);
2115
75b5b5e0 2116 set_except_vector(16, handle_ftlb);
1db1af84 2117 set_except_vector(21, handle_msa);
e50c0a8f
RB
2118 set_except_vector(22, handle_mdmx);
2119
2120 if (cpu_has_mcheck)
2121 set_except_vector(24, handle_mcheck);
2122
340ee4b9
RB
2123 if (cpu_has_mipsmt)
2124 set_except_vector(25, handle_mt);
2125
acaec427 2126 set_except_vector(26, handle_dsp);
e50c0a8f 2127
fcbf1dfd
DD
2128 if (board_cache_error_setup)
2129 board_cache_error_setup();
2130
e50c0a8f
RB
2131 if (cpu_has_vce)
2132 /* Special exception: R4[04]00 uses also the divec space. */
2a0b24f5 2133 set_handler(0x180, &except_vec3_r4000, 0x100);
e50c0a8f 2134 else if (cpu_has_4kex)
2a0b24f5 2135 set_handler(0x180, &except_vec3_generic, 0x80);
e50c0a8f 2136 else
2a0b24f5 2137 set_handler(0x080, &except_vec3_generic, 0x80);
e50c0a8f 2138
e0cee3ee 2139 local_flush_icache_range(ebase, ebase + 0x400);
0510617b
TB
2140
2141 sort_extable(__start___dbe_table, __stop___dbe_table);
69f3a7de 2142
4483b159 2143 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
1da177e4 2144}
ae4ce454
JH
2145
2146static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2147 void *v)
2148{
2149 switch (cmd) {
2150 case CPU_PM_ENTER_FAILED:
2151 case CPU_PM_EXIT:
2152 configure_status();
2153 configure_hwrena();
2154 configure_exception_vector();
2155
2156 /* Restore register with CPU number for TLB handlers */
2157 TLBMISS_HANDLER_RESTORE();
2158
2159 break;
2160 }
2161
2162 return NOTIFY_OK;
2163}
2164
2165static struct notifier_block trap_pm_notifier_block = {
2166 .notifier_call = trap_pm_notifier,
2167};
2168
2169static int __init trap_pm_init(void)
2170{
2171 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2172}
2173arch_initcall(trap_pm_init);