1 /* arch/sparc64/kernel/traps.c
3 * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
8 * I like traps on v9, :))))
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/linkage.h>
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
18 #include <linux/init.h>
19 #include <linux/kdebug.h>
20 #include <linux/gfp.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
42 #include <asm/memctrl.h>
47 /* When an irrecoverable trap occurs at tl > 0, the trap entry
48 * code logs the trap state registers at every level in the trap
49 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
62 static void dump_tl1_traplog(struct tl1_traplog
*p
)
66 printk(KERN_EMERG
"TRAPLOG: Error at trap level 0x%lx, "
67 "dumping track stack.\n", p
->tl
);
69 limit
= (tlb_type
== hypervisor
) ? 2 : 4;
70 for (i
= 0; i
< limit
; i
++) {
72 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
73 "TNPC[%016lx] TT[%lx]\n",
75 p
->trapstack
[i
].tstate
, p
->trapstack
[i
].tpc
,
76 p
->trapstack
[i
].tnpc
, p
->trapstack
[i
].tt
);
77 printk("TRAPLOG: TPC<%pS>\n", (void *) p
->trapstack
[i
].tpc
);
81 void bad_trap(struct pt_regs
*regs
, long lvl
)
86 if (notify_die(DIE_TRAP
, "bad trap", regs
,
87 0, lvl
, SIGTRAP
) == NOTIFY_STOP
)
91 sprintf(buffer
, "Bad hw trap %lx at tl0\n", lvl
);
92 die_if_kernel(buffer
, regs
);
96 if (regs
->tstate
& TSTATE_PRIV
) {
97 sprintf(buffer
, "Kernel bad sw trap %lx", lvl
);
98 die_if_kernel(buffer
, regs
);
100 if (test_thread_flag(TIF_32BIT
)) {
101 regs
->tpc
&= 0xffffffff;
102 regs
->tnpc
&= 0xffffffff;
104 info
.si_signo
= SIGILL
;
106 info
.si_code
= ILL_ILLTRP
;
107 info
.si_addr
= (void __user
*)regs
->tpc
;
108 info
.si_trapno
= lvl
;
109 force_sig_info(SIGILL
, &info
, current
);
112 void bad_trap_tl1(struct pt_regs
*regs
, long lvl
)
116 if (notify_die(DIE_TRAP_TL1
, "bad trap tl1", regs
,
117 0, lvl
, SIGTRAP
) == NOTIFY_STOP
)
120 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
122 sprintf (buffer
, "Bad trap %lx at tl>0", lvl
);
123 die_if_kernel (buffer
, regs
);
126 #ifdef CONFIG_DEBUG_BUGVERBOSE
127 void do_BUG(const char *file
, int line
)
130 printk("kernel BUG at %s:%d!\n", file
, line
);
132 EXPORT_SYMBOL(do_BUG
);
135 static DEFINE_SPINLOCK(dimm_handler_lock
);
136 static dimm_printer_t dimm_handler
;
138 static int sprintf_dimm(int synd_code
, unsigned long paddr
, char *buf
, int buflen
)
143 spin_lock_irqsave(&dimm_handler_lock
, flags
);
145 ret
= dimm_handler(synd_code
, paddr
, buf
, buflen
);
146 } else if (tlb_type
== spitfire
) {
147 if (prom_getunumber(synd_code
, paddr
, buf
, buflen
) == -1)
153 spin_unlock_irqrestore(&dimm_handler_lock
, flags
);
158 int register_dimm_printer(dimm_printer_t func
)
163 spin_lock_irqsave(&dimm_handler_lock
, flags
);
168 spin_unlock_irqrestore(&dimm_handler_lock
, flags
);
172 EXPORT_SYMBOL_GPL(register_dimm_printer
);
174 void unregister_dimm_printer(dimm_printer_t func
)
178 spin_lock_irqsave(&dimm_handler_lock
, flags
);
179 if (dimm_handler
== func
)
181 spin_unlock_irqrestore(&dimm_handler_lock
, flags
);
183 EXPORT_SYMBOL_GPL(unregister_dimm_printer
);
185 void spitfire_insn_access_exception(struct pt_regs
*regs
, unsigned long sfsr
, unsigned long sfar
)
189 if (notify_die(DIE_TRAP
, "instruction access exception", regs
,
190 0, 0x8, SIGTRAP
) == NOTIFY_STOP
)
193 if (regs
->tstate
& TSTATE_PRIV
) {
194 printk("spitfire_insn_access_exception: SFSR[%016lx] "
195 "SFAR[%016lx], going.\n", sfsr
, sfar
);
196 die_if_kernel("Iax", regs
);
198 if (test_thread_flag(TIF_32BIT
)) {
199 regs
->tpc
&= 0xffffffff;
200 regs
->tnpc
&= 0xffffffff;
202 info
.si_signo
= SIGSEGV
;
204 info
.si_code
= SEGV_MAPERR
;
205 info
.si_addr
= (void __user
*)regs
->tpc
;
207 force_sig_info(SIGSEGV
, &info
, current
);
210 void spitfire_insn_access_exception_tl1(struct pt_regs
*regs
, unsigned long sfsr
, unsigned long sfar
)
212 if (notify_die(DIE_TRAP_TL1
, "instruction access exception tl1", regs
,
213 0, 0x8, SIGTRAP
) == NOTIFY_STOP
)
216 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
217 spitfire_insn_access_exception(regs
, sfsr
, sfar
);
220 void sun4v_insn_access_exception(struct pt_regs
*regs
, unsigned long addr
, unsigned long type_ctx
)
222 unsigned short type
= (type_ctx
>> 16);
223 unsigned short ctx
= (type_ctx
& 0xffff);
226 if (notify_die(DIE_TRAP
, "instruction access exception", regs
,
227 0, 0x8, SIGTRAP
) == NOTIFY_STOP
)
230 if (regs
->tstate
& TSTATE_PRIV
) {
231 printk("sun4v_insn_access_exception: ADDR[%016lx] "
232 "CTX[%04x] TYPE[%04x], going.\n",
234 die_if_kernel("Iax", regs
);
237 if (test_thread_flag(TIF_32BIT
)) {
238 regs
->tpc
&= 0xffffffff;
239 regs
->tnpc
&= 0xffffffff;
241 info
.si_signo
= SIGSEGV
;
243 info
.si_code
= SEGV_MAPERR
;
244 info
.si_addr
= (void __user
*) addr
;
246 force_sig_info(SIGSEGV
, &info
, current
);
249 void sun4v_insn_access_exception_tl1(struct pt_regs
*regs
, unsigned long addr
, unsigned long type_ctx
)
251 if (notify_die(DIE_TRAP_TL1
, "instruction access exception tl1", regs
,
252 0, 0x8, SIGTRAP
) == NOTIFY_STOP
)
255 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
256 sun4v_insn_access_exception(regs
, addr
, type_ctx
);
259 void spitfire_data_access_exception(struct pt_regs
*regs
, unsigned long sfsr
, unsigned long sfar
)
263 if (notify_die(DIE_TRAP
, "data access exception", regs
,
264 0, 0x30, SIGTRAP
) == NOTIFY_STOP
)
267 if (regs
->tstate
& TSTATE_PRIV
) {
268 /* Test if this comes from uaccess places. */
269 const struct exception_table_entry
*entry
;
271 entry
= search_exception_tables(regs
->tpc
);
273 /* Ouch, somebody is trying VM hole tricks on us... */
274 #ifdef DEBUG_EXCEPTIONS
275 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs
->tpc
);
276 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
277 regs
->tpc
, entry
->fixup
);
279 regs
->tpc
= entry
->fixup
;
280 regs
->tnpc
= regs
->tpc
+ 4;
284 printk("spitfire_data_access_exception: SFSR[%016lx] "
285 "SFAR[%016lx], going.\n", sfsr
, sfar
);
286 die_if_kernel("Dax", regs
);
289 info
.si_signo
= SIGSEGV
;
291 info
.si_code
= SEGV_MAPERR
;
292 info
.si_addr
= (void __user
*)sfar
;
294 force_sig_info(SIGSEGV
, &info
, current
);
297 void spitfire_data_access_exception_tl1(struct pt_regs
*regs
, unsigned long sfsr
, unsigned long sfar
)
299 if (notify_die(DIE_TRAP_TL1
, "data access exception tl1", regs
,
300 0, 0x30, SIGTRAP
) == NOTIFY_STOP
)
303 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
304 spitfire_data_access_exception(regs
, sfsr
, sfar
);
307 void sun4v_data_access_exception(struct pt_regs
*regs
, unsigned long addr
, unsigned long type_ctx
)
309 unsigned short type
= (type_ctx
>> 16);
310 unsigned short ctx
= (type_ctx
& 0xffff);
313 if (notify_die(DIE_TRAP
, "data access exception", regs
,
314 0, 0x8, SIGTRAP
) == NOTIFY_STOP
)
317 if (regs
->tstate
& TSTATE_PRIV
) {
318 /* Test if this comes from uaccess places. */
319 const struct exception_table_entry
*entry
;
321 entry
= search_exception_tables(regs
->tpc
);
323 /* Ouch, somebody is trying VM hole tricks on us... */
324 #ifdef DEBUG_EXCEPTIONS
325 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs
->tpc
);
326 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
327 regs
->tpc
, entry
->fixup
);
329 regs
->tpc
= entry
->fixup
;
330 regs
->tnpc
= regs
->tpc
+ 4;
333 printk("sun4v_data_access_exception: ADDR[%016lx] "
334 "CTX[%04x] TYPE[%04x], going.\n",
336 die_if_kernel("Dax", regs
);
339 if (test_thread_flag(TIF_32BIT
)) {
340 regs
->tpc
&= 0xffffffff;
341 regs
->tnpc
&= 0xffffffff;
343 info
.si_signo
= SIGSEGV
;
345 info
.si_code
= SEGV_MAPERR
;
346 info
.si_addr
= (void __user
*) addr
;
348 force_sig_info(SIGSEGV
, &info
, current
);
351 void sun4v_data_access_exception_tl1(struct pt_regs
*regs
, unsigned long addr
, unsigned long type_ctx
)
353 if (notify_die(DIE_TRAP_TL1
, "data access exception tl1", regs
,
354 0, 0x8, SIGTRAP
) == NOTIFY_STOP
)
357 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
358 sun4v_data_access_exception(regs
, addr
, type_ctx
);
362 #include "pci_impl.h"
365 /* When access exceptions happen, we must do this. */
366 static void spitfire_clean_and_reenable_l1_caches(void)
370 if (tlb_type
!= spitfire
)
374 for (va
= 0; va
< (PAGE_SIZE
<< 1); va
+= 32) {
375 spitfire_put_icache_tag(va
, 0x0);
376 spitfire_put_dcache_tag(va
, 0x0);
379 /* Re-enable in LSU. */
380 __asm__
__volatile__("flush %%g6\n\t"
382 "stxa %0, [%%g0] %1\n\t"
385 : "r" (LSU_CONTROL_IC
| LSU_CONTROL_DC
|
386 LSU_CONTROL_IM
| LSU_CONTROL_DM
),
387 "i" (ASI_LSU_CONTROL
)
391 static void spitfire_enable_estate_errors(void)
393 __asm__
__volatile__("stxa %0, [%%g0] %1\n\t"
396 : "r" (ESTATE_ERR_ALL
),
397 "i" (ASI_ESTATE_ERROR_EN
));
400 static char ecc_syndrome_table
[] = {
401 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
402 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
403 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
404 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
405 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
406 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
407 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
408 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
409 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
410 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
411 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
412 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
413 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
414 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
415 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
416 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
417 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
418 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
419 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
420 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
421 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
422 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
423 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
424 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
425 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
426 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
427 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
428 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
429 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
430 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
431 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
432 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
435 static char *syndrome_unknown
= "<Unknown>";
437 static void spitfire_log_udb_syndrome(unsigned long afar
, unsigned long udbh
, unsigned long udbl
, unsigned long bit
)
439 unsigned short scode
;
440 char memmod_str
[64], *p
;
443 scode
= ecc_syndrome_table
[udbl
& 0xff];
444 if (sprintf_dimm(scode
, afar
, memmod_str
, sizeof(memmod_str
)) < 0)
445 p
= syndrome_unknown
;
448 printk(KERN_WARNING
"CPU[%d]: UDBL Syndrome[%x] "
449 "Memory Module \"%s\"\n",
450 smp_processor_id(), scode
, p
);
454 scode
= ecc_syndrome_table
[udbh
& 0xff];
455 if (sprintf_dimm(scode
, afar
, memmod_str
, sizeof(memmod_str
)) < 0)
456 p
= syndrome_unknown
;
459 printk(KERN_WARNING
"CPU[%d]: UDBH Syndrome[%x] "
460 "Memory Module \"%s\"\n",
461 smp_processor_id(), scode
, p
);
466 static void spitfire_cee_log(unsigned long afsr
, unsigned long afar
, unsigned long udbh
, unsigned long udbl
, int tl1
, struct pt_regs
*regs
)
469 printk(KERN_WARNING
"CPU[%d]: Correctable ECC Error "
470 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
471 smp_processor_id(), afsr
, afar
, udbl
, udbh
, tl1
);
473 spitfire_log_udb_syndrome(afar
, udbh
, udbl
, UDBE_CE
);
475 /* We always log it, even if someone is listening for this
478 notify_die(DIE_TRAP
, "Correctable ECC Error", regs
,
479 0, TRAP_TYPE_CEE
, SIGTRAP
);
481 /* The Correctable ECC Error trap does not disable I/D caches. So
482 * we only have to restore the ESTATE Error Enable register.
484 spitfire_enable_estate_errors();
487 static void spitfire_ue_log(unsigned long afsr
, unsigned long afar
, unsigned long udbh
, unsigned long udbl
, unsigned long tt
, int tl1
, struct pt_regs
*regs
)
491 printk(KERN_WARNING
"CPU[%d]: Uncorrectable Error AFSR[%lx] "
492 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
493 smp_processor_id(), afsr
, afar
, udbl
, udbh
, tt
, tl1
);
495 /* XXX add more human friendly logging of the error status
496 * XXX as is implemented for cheetah
499 spitfire_log_udb_syndrome(afar
, udbh
, udbl
, UDBE_UE
);
501 /* We always log it, even if someone is listening for this
504 notify_die(DIE_TRAP
, "Uncorrectable Error", regs
,
507 if (regs
->tstate
& TSTATE_PRIV
) {
509 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
510 die_if_kernel("UE", regs
);
513 /* XXX need more intelligent processing here, such as is implemented
514 * XXX for cheetah errors, in fact if the E-cache still holds the
515 * XXX line with bad parity this will loop
518 spitfire_clean_and_reenable_l1_caches();
519 spitfire_enable_estate_errors();
521 if (test_thread_flag(TIF_32BIT
)) {
522 regs
->tpc
&= 0xffffffff;
523 regs
->tnpc
&= 0xffffffff;
525 info
.si_signo
= SIGBUS
;
527 info
.si_code
= BUS_OBJERR
;
528 info
.si_addr
= (void *)0;
530 force_sig_info(SIGBUS
, &info
, current
);
533 void spitfire_access_error(struct pt_regs
*regs
, unsigned long status_encoded
, unsigned long afar
)
535 unsigned long afsr
, tt
, udbh
, udbl
;
538 afsr
= (status_encoded
& SFSTAT_AFSR_MASK
) >> SFSTAT_AFSR_SHIFT
;
539 tt
= (status_encoded
& SFSTAT_TRAP_TYPE
) >> SFSTAT_TRAP_TYPE_SHIFT
;
540 tl1
= (status_encoded
& SFSTAT_TL_GT_ONE
) ? 1 : 0;
541 udbl
= (status_encoded
& SFSTAT_UDBL_MASK
) >> SFSTAT_UDBL_SHIFT
;
542 udbh
= (status_encoded
& SFSTAT_UDBH_MASK
) >> SFSTAT_UDBH_SHIFT
;
545 if (tt
== TRAP_TYPE_DAE
&&
546 pci_poke_in_progress
&& pci_poke_cpu
== smp_processor_id()) {
547 spitfire_clean_and_reenable_l1_caches();
548 spitfire_enable_estate_errors();
550 pci_poke_faulted
= 1;
551 regs
->tnpc
= regs
->tpc
+ 4;
556 if (afsr
& SFAFSR_UE
)
557 spitfire_ue_log(afsr
, afar
, udbh
, udbl
, tt
, tl1
, regs
);
559 if (tt
== TRAP_TYPE_CEE
) {
560 /* Handle the case where we took a CEE trap, but ACK'd
561 * only the UE state in the UDB error registers.
563 if (afsr
& SFAFSR_UE
) {
564 if (udbh
& UDBE_CE
) {
565 __asm__
__volatile__(
566 "stxa %0, [%1] %2\n\t"
569 : "r" (udbh
& UDBE_CE
),
570 "r" (0x0), "i" (ASI_UDB_ERROR_W
));
572 if (udbl
& UDBE_CE
) {
573 __asm__
__volatile__(
574 "stxa %0, [%1] %2\n\t"
577 : "r" (udbl
& UDBE_CE
),
578 "r" (0x18), "i" (ASI_UDB_ERROR_W
));
582 spitfire_cee_log(afsr
, afar
, udbh
, udbl
, tl1
, regs
);
586 int cheetah_pcache_forced_on
;
588 void cheetah_enable_pcache(void)
592 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
595 __asm__
__volatile__("ldxa [%%g0] %1, %0"
597 : "i" (ASI_DCU_CONTROL_REG
));
598 dcr
|= (DCU_PE
| DCU_HPE
| DCU_SPE
| DCU_SL
);
599 __asm__
__volatile__("stxa %0, [%%g0] %1\n\t"
602 : "r" (dcr
), "i" (ASI_DCU_CONTROL_REG
));
605 /* Cheetah error trap handling. */
606 static unsigned long ecache_flush_physbase
;
607 static unsigned long ecache_flush_linesize
;
608 static unsigned long ecache_flush_size
;
610 /* This table is ordered in priority of errors and matches the
611 * AFAR overwrite policy as well.
614 struct afsr_error_table
{
619 static const char CHAFSR_PERR_msg
[] =
620 "System interface protocol error";
621 static const char CHAFSR_IERR_msg
[] =
622 "Internal processor error";
623 static const char CHAFSR_ISAP_msg
[] =
624 "System request parity error on incoming addresss";
625 static const char CHAFSR_UCU_msg
[] =
626 "Uncorrectable E-cache ECC error for ifetch/data";
627 static const char CHAFSR_UCC_msg
[] =
628 "SW Correctable E-cache ECC error for ifetch/data";
629 static const char CHAFSR_UE_msg
[] =
630 "Uncorrectable system bus data ECC error for read";
631 static const char CHAFSR_EDU_msg
[] =
632 "Uncorrectable E-cache ECC error for stmerge/blkld";
633 static const char CHAFSR_EMU_msg
[] =
634 "Uncorrectable system bus MTAG error";
635 static const char CHAFSR_WDU_msg
[] =
636 "Uncorrectable E-cache ECC error for writeback";
637 static const char CHAFSR_CPU_msg
[] =
638 "Uncorrectable ECC error for copyout";
639 static const char CHAFSR_CE_msg
[] =
640 "HW corrected system bus data ECC error for read";
641 static const char CHAFSR_EDC_msg
[] =
642 "HW corrected E-cache ECC error for stmerge/blkld";
643 static const char CHAFSR_EMC_msg
[] =
644 "HW corrected system bus MTAG ECC error";
645 static const char CHAFSR_WDC_msg
[] =
646 "HW corrected E-cache ECC error for writeback";
647 static const char CHAFSR_CPC_msg
[] =
648 "HW corrected ECC error for copyout";
649 static const char CHAFSR_TO_msg
[] =
650 "Unmapped error from system bus";
651 static const char CHAFSR_BERR_msg
[] =
652 "Bus error response from system bus";
653 static const char CHAFSR_IVC_msg
[] =
654 "HW corrected system bus data ECC error for ivec read";
655 static const char CHAFSR_IVU_msg
[] =
656 "Uncorrectable system bus data ECC error for ivec read";
657 static struct afsr_error_table __cheetah_error_table
[] = {
658 { CHAFSR_PERR
, CHAFSR_PERR_msg
},
659 { CHAFSR_IERR
, CHAFSR_IERR_msg
},
660 { CHAFSR_ISAP
, CHAFSR_ISAP_msg
},
661 { CHAFSR_UCU
, CHAFSR_UCU_msg
},
662 { CHAFSR_UCC
, CHAFSR_UCC_msg
},
663 { CHAFSR_UE
, CHAFSR_UE_msg
},
664 { CHAFSR_EDU
, CHAFSR_EDU_msg
},
665 { CHAFSR_EMU
, CHAFSR_EMU_msg
},
666 { CHAFSR_WDU
, CHAFSR_WDU_msg
},
667 { CHAFSR_CPU
, CHAFSR_CPU_msg
},
668 { CHAFSR_CE
, CHAFSR_CE_msg
},
669 { CHAFSR_EDC
, CHAFSR_EDC_msg
},
670 { CHAFSR_EMC
, CHAFSR_EMC_msg
},
671 { CHAFSR_WDC
, CHAFSR_WDC_msg
},
672 { CHAFSR_CPC
, CHAFSR_CPC_msg
},
673 { CHAFSR_TO
, CHAFSR_TO_msg
},
674 { CHAFSR_BERR
, CHAFSR_BERR_msg
},
675 /* These two do not update the AFAR. */
676 { CHAFSR_IVC
, CHAFSR_IVC_msg
},
677 { CHAFSR_IVU
, CHAFSR_IVU_msg
},
680 static const char CHPAFSR_DTO_msg
[] =
681 "System bus unmapped error for prefetch/storequeue-read";
682 static const char CHPAFSR_DBERR_msg
[] =
683 "System bus error for prefetch/storequeue-read";
684 static const char CHPAFSR_THCE_msg
[] =
685 "Hardware corrected E-cache Tag ECC error";
686 static const char CHPAFSR_TSCE_msg
[] =
687 "SW handled correctable E-cache Tag ECC error";
688 static const char CHPAFSR_TUE_msg
[] =
689 "Uncorrectable E-cache Tag ECC error";
690 static const char CHPAFSR_DUE_msg
[] =
691 "System bus uncorrectable data ECC error due to prefetch/store-fill";
692 static struct afsr_error_table __cheetah_plus_error_table
[] = {
693 { CHAFSR_PERR
, CHAFSR_PERR_msg
},
694 { CHAFSR_IERR
, CHAFSR_IERR_msg
},
695 { CHAFSR_ISAP
, CHAFSR_ISAP_msg
},
696 { CHAFSR_UCU
, CHAFSR_UCU_msg
},
697 { CHAFSR_UCC
, CHAFSR_UCC_msg
},
698 { CHAFSR_UE
, CHAFSR_UE_msg
},
699 { CHAFSR_EDU
, CHAFSR_EDU_msg
},
700 { CHAFSR_EMU
, CHAFSR_EMU_msg
},
701 { CHAFSR_WDU
, CHAFSR_WDU_msg
},
702 { CHAFSR_CPU
, CHAFSR_CPU_msg
},
703 { CHAFSR_CE
, CHAFSR_CE_msg
},
704 { CHAFSR_EDC
, CHAFSR_EDC_msg
},
705 { CHAFSR_EMC
, CHAFSR_EMC_msg
},
706 { CHAFSR_WDC
, CHAFSR_WDC_msg
},
707 { CHAFSR_CPC
, CHAFSR_CPC_msg
},
708 { CHAFSR_TO
, CHAFSR_TO_msg
},
709 { CHAFSR_BERR
, CHAFSR_BERR_msg
},
710 { CHPAFSR_DTO
, CHPAFSR_DTO_msg
},
711 { CHPAFSR_DBERR
, CHPAFSR_DBERR_msg
},
712 { CHPAFSR_THCE
, CHPAFSR_THCE_msg
},
713 { CHPAFSR_TSCE
, CHPAFSR_TSCE_msg
},
714 { CHPAFSR_TUE
, CHPAFSR_TUE_msg
},
715 { CHPAFSR_DUE
, CHPAFSR_DUE_msg
},
716 /* These two do not update the AFAR. */
717 { CHAFSR_IVC
, CHAFSR_IVC_msg
},
718 { CHAFSR_IVU
, CHAFSR_IVU_msg
},
721 static const char JPAFSR_JETO_msg
[] =
722 "System interface protocol error, hw timeout caused";
723 static const char JPAFSR_SCE_msg
[] =
724 "Parity error on system snoop results";
725 static const char JPAFSR_JEIC_msg
[] =
726 "System interface protocol error, illegal command detected";
727 static const char JPAFSR_JEIT_msg
[] =
728 "System interface protocol error, illegal ADTYPE detected";
729 static const char JPAFSR_OM_msg
[] =
730 "Out of range memory error has occurred";
731 static const char JPAFSR_ETP_msg
[] =
732 "Parity error on L2 cache tag SRAM";
733 static const char JPAFSR_UMS_msg
[] =
734 "Error due to unsupported store";
735 static const char JPAFSR_RUE_msg
[] =
736 "Uncorrectable ECC error from remote cache/memory";
737 static const char JPAFSR_RCE_msg
[] =
738 "Correctable ECC error from remote cache/memory";
739 static const char JPAFSR_BP_msg
[] =
740 "JBUS parity error on returned read data";
741 static const char JPAFSR_WBP_msg
[] =
742 "JBUS parity error on data for writeback or block store";
743 static const char JPAFSR_FRC_msg
[] =
744 "Foreign read to DRAM incurring correctable ECC error";
745 static const char JPAFSR_FRU_msg
[] =
746 "Foreign read to DRAM incurring uncorrectable ECC error";
747 static struct afsr_error_table __jalapeno_error_table
[] = {
748 { JPAFSR_JETO
, JPAFSR_JETO_msg
},
749 { JPAFSR_SCE
, JPAFSR_SCE_msg
},
750 { JPAFSR_JEIC
, JPAFSR_JEIC_msg
},
751 { JPAFSR_JEIT
, JPAFSR_JEIT_msg
},
752 { CHAFSR_PERR
, CHAFSR_PERR_msg
},
753 { CHAFSR_IERR
, CHAFSR_IERR_msg
},
754 { CHAFSR_ISAP
, CHAFSR_ISAP_msg
},
755 { CHAFSR_UCU
, CHAFSR_UCU_msg
},
756 { CHAFSR_UCC
, CHAFSR_UCC_msg
},
757 { CHAFSR_UE
, CHAFSR_UE_msg
},
758 { CHAFSR_EDU
, CHAFSR_EDU_msg
},
759 { JPAFSR_OM
, JPAFSR_OM_msg
},
760 { CHAFSR_WDU
, CHAFSR_WDU_msg
},
761 { CHAFSR_CPU
, CHAFSR_CPU_msg
},
762 { CHAFSR_CE
, CHAFSR_CE_msg
},
763 { CHAFSR_EDC
, CHAFSR_EDC_msg
},
764 { JPAFSR_ETP
, JPAFSR_ETP_msg
},
765 { CHAFSR_WDC
, CHAFSR_WDC_msg
},
766 { CHAFSR_CPC
, CHAFSR_CPC_msg
},
767 { CHAFSR_TO
, CHAFSR_TO_msg
},
768 { CHAFSR_BERR
, CHAFSR_BERR_msg
},
769 { JPAFSR_UMS
, JPAFSR_UMS_msg
},
770 { JPAFSR_RUE
, JPAFSR_RUE_msg
},
771 { JPAFSR_RCE
, JPAFSR_RCE_msg
},
772 { JPAFSR_BP
, JPAFSR_BP_msg
},
773 { JPAFSR_WBP
, JPAFSR_WBP_msg
},
774 { JPAFSR_FRC
, JPAFSR_FRC_msg
},
775 { JPAFSR_FRU
, JPAFSR_FRU_msg
},
776 /* These two do not update the AFAR. */
777 { CHAFSR_IVU
, CHAFSR_IVU_msg
},
780 static struct afsr_error_table
*cheetah_error_table
;
781 static unsigned long cheetah_afsr_errors
;
783 struct cheetah_err_info
*cheetah_error_log
;
785 static inline struct cheetah_err_info
*cheetah_get_error_log(unsigned long afsr
)
787 struct cheetah_err_info
*p
;
788 int cpu
= smp_processor_id();
790 if (!cheetah_error_log
)
793 p
= cheetah_error_log
+ (cpu
* 2);
794 if ((afsr
& CHAFSR_TL1
) != 0UL)
800 extern unsigned int tl0_icpe
[], tl1_icpe
[];
801 extern unsigned int tl0_dcpe
[], tl1_dcpe
[];
802 extern unsigned int tl0_fecc
[], tl1_fecc
[];
803 extern unsigned int tl0_cee
[], tl1_cee
[];
804 extern unsigned int tl0_iae
[], tl1_iae
[];
805 extern unsigned int tl0_dae
[], tl1_dae
[];
806 extern unsigned int cheetah_plus_icpe_trap_vector
[], cheetah_plus_icpe_trap_vector_tl1
[];
807 extern unsigned int cheetah_plus_dcpe_trap_vector
[], cheetah_plus_dcpe_trap_vector_tl1
[];
808 extern unsigned int cheetah_fecc_trap_vector
[], cheetah_fecc_trap_vector_tl1
[];
809 extern unsigned int cheetah_cee_trap_vector
[], cheetah_cee_trap_vector_tl1
[];
810 extern unsigned int cheetah_deferred_trap_vector
[], cheetah_deferred_trap_vector_tl1
[];
812 void __init
cheetah_ecache_flush_init(void)
814 unsigned long largest_size
, smallest_linesize
, order
, ver
;
817 /* Scan all cpu device tree nodes, note two values:
818 * 1) largest E-cache size
819 * 2) smallest E-cache line size
822 smallest_linesize
= ~0UL;
824 for (i
= 0; i
< NR_CPUS
; i
++) {
827 val
= cpu_data(i
).ecache_size
;
831 if (val
> largest_size
)
834 val
= cpu_data(i
).ecache_line_size
;
835 if (val
< smallest_linesize
)
836 smallest_linesize
= val
;
840 if (largest_size
== 0UL || smallest_linesize
== ~0UL) {
841 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
846 ecache_flush_size
= (2 * largest_size
);
847 ecache_flush_linesize
= smallest_linesize
;
849 ecache_flush_physbase
= find_ecache_flush_span(ecache_flush_size
);
851 if (ecache_flush_physbase
== ~0UL) {
852 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
853 "contiguous physical memory.\n",
858 /* Now allocate error trap reporting scoreboard. */
859 sz
= NR_CPUS
* (2 * sizeof(struct cheetah_err_info
));
860 for (order
= 0; order
< MAX_ORDER
; order
++) {
861 if ((PAGE_SIZE
<< order
) >= sz
)
864 cheetah_error_log
= (struct cheetah_err_info
*)
865 __get_free_pages(GFP_KERNEL
, order
);
866 if (!cheetah_error_log
) {
867 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
868 "error logging scoreboard (%d bytes).\n", sz
);
871 memset(cheetah_error_log
, 0, PAGE_SIZE
<< order
);
873 /* Mark all AFSRs as invalid so that the trap handler will
874 * log new new information there.
876 for (i
= 0; i
< 2 * NR_CPUS
; i
++)
877 cheetah_error_log
[i
].afsr
= CHAFSR_INVALID
;
879 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
880 if ((ver
>> 32) == __JALAPENO_ID
||
881 (ver
>> 32) == __SERRANO_ID
) {
882 cheetah_error_table
= &__jalapeno_error_table
[0];
883 cheetah_afsr_errors
= JPAFSR_ERRORS
;
884 } else if ((ver
>> 32) == 0x003e0015) {
885 cheetah_error_table
= &__cheetah_plus_error_table
[0];
886 cheetah_afsr_errors
= CHPAFSR_ERRORS
;
888 cheetah_error_table
= &__cheetah_error_table
[0];
889 cheetah_afsr_errors
= CHAFSR_ERRORS
;
892 /* Now patch trap tables. */
893 memcpy(tl0_fecc
, cheetah_fecc_trap_vector
, (8 * 4));
894 memcpy(tl1_fecc
, cheetah_fecc_trap_vector_tl1
, (8 * 4));
895 memcpy(tl0_cee
, cheetah_cee_trap_vector
, (8 * 4));
896 memcpy(tl1_cee
, cheetah_cee_trap_vector_tl1
, (8 * 4));
897 memcpy(tl0_iae
, cheetah_deferred_trap_vector
, (8 * 4));
898 memcpy(tl1_iae
, cheetah_deferred_trap_vector_tl1
, (8 * 4));
899 memcpy(tl0_dae
, cheetah_deferred_trap_vector
, (8 * 4));
900 memcpy(tl1_dae
, cheetah_deferred_trap_vector_tl1
, (8 * 4));
901 if (tlb_type
== cheetah_plus
) {
902 memcpy(tl0_dcpe
, cheetah_plus_dcpe_trap_vector
, (8 * 4));
903 memcpy(tl1_dcpe
, cheetah_plus_dcpe_trap_vector_tl1
, (8 * 4));
904 memcpy(tl0_icpe
, cheetah_plus_icpe_trap_vector
, (8 * 4));
905 memcpy(tl1_icpe
, cheetah_plus_icpe_trap_vector_tl1
, (8 * 4));
910 static void cheetah_flush_ecache(void)
912 unsigned long flush_base
= ecache_flush_physbase
;
913 unsigned long flush_linesize
= ecache_flush_linesize
;
914 unsigned long flush_size
= ecache_flush_size
;
916 __asm__
__volatile__("1: subcc %0, %4, %0\n\t"
917 " bne,pt %%xcc, 1b\n\t"
918 " ldxa [%2 + %0] %3, %%g0\n\t"
920 : "0" (flush_size
), "r" (flush_base
),
921 "i" (ASI_PHYS_USE_EC
), "r" (flush_linesize
));
924 static void cheetah_flush_ecache_line(unsigned long physaddr
)
928 physaddr
&= ~(8UL - 1UL);
929 physaddr
= (ecache_flush_physbase
+
930 (physaddr
& ((ecache_flush_size
>>1UL) - 1UL)));
931 alias
= physaddr
+ (ecache_flush_size
>> 1UL);
932 __asm__
__volatile__("ldxa [%0] %2, %%g0\n\t"
933 "ldxa [%1] %2, %%g0\n\t"
936 : "r" (physaddr
), "r" (alias
),
937 "i" (ASI_PHYS_USE_EC
));
940 /* Unfortunately, the diagnostic access to the I-cache tags we need to
941 * use to clear the thing interferes with I-cache coherency transactions.
943 * So we must only flush the I-cache when it is disabled.
945 static void __cheetah_flush_icache(void)
947 unsigned int icache_size
, icache_line_size
;
950 icache_size
= local_cpu_data().icache_size
;
951 icache_line_size
= local_cpu_data().icache_line_size
;
953 /* Clear the valid bits in all the tags. */
954 for (addr
= 0; addr
< icache_size
; addr
+= icache_line_size
) {
955 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
958 : "r" (addr
| (2 << 3)),
963 static void cheetah_flush_icache(void)
965 unsigned long dcu_save
;
967 /* Save current DCU, disable I-cache. */
968 __asm__
__volatile__("ldxa [%%g0] %1, %0\n\t"
969 "or %0, %2, %%g1\n\t"
970 "stxa %%g1, [%%g0] %1\n\t"
973 : "i" (ASI_DCU_CONTROL_REG
), "i" (DCU_IC
)
976 __cheetah_flush_icache();
978 /* Restore DCU register */
979 __asm__
__volatile__("stxa %0, [%%g0] %1\n\t"
982 : "r" (dcu_save
), "i" (ASI_DCU_CONTROL_REG
));
985 static void cheetah_flush_dcache(void)
987 unsigned int dcache_size
, dcache_line_size
;
990 dcache_size
= local_cpu_data().dcache_size
;
991 dcache_line_size
= local_cpu_data().dcache_line_size
;
993 for (addr
= 0; addr
< dcache_size
; addr
+= dcache_line_size
) {
994 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
997 : "r" (addr
), "i" (ASI_DCACHE_TAG
));
1001 /* In order to make the even parity correct we must do two things.
1002 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1003 * Next, we clear out all 32-bytes of data for that line. Data of
1004 * all-zero + tag parity value of zero == correct parity.
1006 static void cheetah_plus_zap_dcache_parity(void)
1008 unsigned int dcache_size
, dcache_line_size
;
1011 dcache_size
= local_cpu_data().dcache_size
;
1012 dcache_line_size
= local_cpu_data().dcache_line_size
;
1014 for (addr
= 0; addr
< dcache_size
; addr
+= dcache_line_size
) {
1015 unsigned long tag
= (addr
>> 14);
1018 __asm__
__volatile__("membar #Sync\n\t"
1019 "stxa %0, [%1] %2\n\t"
1022 : "r" (tag
), "r" (addr
),
1023 "i" (ASI_DCACHE_UTAG
));
1024 for (line
= addr
; line
< addr
+ dcache_line_size
; line
+= 8)
1025 __asm__
__volatile__("membar #Sync\n\t"
1026 "stxa %%g0, [%0] %1\n\t"
1030 "i" (ASI_DCACHE_DATA
));
1034 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1035 * something palatable to the memory controller driver get_unumber
1059 static unsigned char cheetah_ecc_syntab
[] = {
1060 /*00*/NONE
, C0
, C1
, M2
, C2
, M2
, M3
, 47, C3
, M2
, M2
, 53, M2
, 41, 29, M
,
1061 /*01*/C4
, M
, M
, 50, M2
, 38, 25, M2
, M2
, 33, 24, M2
, 11, M
, M2
, 16,
1062 /*02*/C5
, M
, M
, 46, M2
, 37, 19, M2
, M
, 31, 32, M
, 7, M2
, M2
, 10,
1063 /*03*/M2
, 40, 13, M2
, 59, M
, M2
, 66, M
, M2
, M2
, 0, M2
, 67, 71, M
,
1064 /*04*/C6
, M
, M
, 43, M
, 36, 18, M
, M2
, 49, 15, M
, 63, M2
, M2
, 6,
1065 /*05*/M2
, 44, 28, M2
, M
, M2
, M2
, 52, 68, M2
, M2
, 62, M2
, M3
, M3
, M4
,
1066 /*06*/M2
, 26, 106, M2
, 64, M
, M2
, 2, 120, M
, M2
, M3
, M
, M3
, M3
, M4
,
1067 /*07*/116, M2
, M2
, M3
, M2
, M3
, M
, M4
, M2
, 58, 54, M2
, M
, M4
, M4
, M3
,
1068 /*08*/C7
, M2
, M
, 42, M
, 35, 17, M2
, M
, 45, 14, M2
, 21, M2
, M2
, 5,
1069 /*09*/M
, 27, M
, M
, 99, M
, M
, 3, 114, M2
, M2
, 20, M2
, M3
, M3
, M
,
1070 /*0a*/M2
, 23, 113, M2
, 112, M2
, M
, 51, 95, M
, M2
, M3
, M2
, M3
, M3
, M2
,
1071 /*0b*/103, M
, M2
, M3
, M2
, M3
, M3
, M4
, M2
, 48, M
, M
, 73, M2
, M
, M3
,
1072 /*0c*/M2
, 22, 110, M2
, 109, M2
, M
, 9, 108, M2
, M
, M3
, M2
, M3
, M3
, M
,
1073 /*0d*/102, M2
, M
, M
, M2
, M3
, M3
, M
, M2
, M3
, M3
, M2
, M
, M4
, M
, M3
,
1074 /*0e*/98, M
, M2
, M3
, M2
, M
, M3
, M4
, M2
, M3
, M3
, M4
, M3
, M
, M
, M
,
1075 /*0f*/M2
, M3
, M3
, M
, M3
, M
, M
, M
, 56, M4
, M
, M3
, M4
, M
, M
, M
,
1076 /*10*/C8
, M
, M2
, 39, M
, 34, 105, M2
, M
, 30, 104, M
, 101, M
, M
, 4,
1077 /*11*/M
, M
, 100, M
, 83, M
, M2
, 12, 87, M
, M
, 57, M2
, M
, M3
, M
,
1078 /*12*/M2
, 97, 82, M2
, 78, M2
, M2
, 1, 96, M
, M
, M
, M
, M
, M3
, M2
,
1079 /*13*/94, M
, M2
, M3
, M2
, M
, M3
, M
, M2
, M
, 79, M
, 69, M
, M4
, M
,
1080 /*14*/M2
, 93, 92, M
, 91, M
, M2
, 8, 90, M2
, M2
, M
, M
, M
, M
, M4
,
1081 /*15*/89, M
, M
, M3
, M2
, M3
, M3
, M
, M
, M
, M3
, M2
, M3
, M2
, M
, M3
,
1082 /*16*/86, M
, M2
, M3
, M2
, M
, M3
, M
, M2
, M
, M3
, M
, M3
, M
, M
, M3
,
1083 /*17*/M
, M
, M3
, M2
, M3
, M2
, M4
, M
, 60, M
, M2
, M3
, M4
, M
, M
, M2
,
1084 /*18*/M2
, 88, 85, M2
, 84, M
, M2
, 55, 81, M2
, M2
, M3
, M2
, M3
, M3
, M4
,
1085 /*19*/77, M
, M
, M
, M2
, M3
, M
, M
, M2
, M3
, M3
, M4
, M3
, M2
, M
, M
,
1086 /*1a*/74, M
, M2
, M3
, M
, M
, M3
, M
, M
, M
, M3
, M
, M3
, M
, M4
, M3
,
1087 /*1b*/M2
, 70, 107, M4
, 65, M2
, M2
, M
, 127, M
, M
, M
, M2
, M3
, M3
, M
,
1088 /*1c*/80, M2
, M2
, 72, M
, 119, 118, M
, M2
, 126, 76, M
, 125, M
, M4
, M3
,
1089 /*1d*/M2
, 115, 124, M
, 75, M
, M
, M3
, 61, M
, M4
, M
, M4
, M
, M
, M
,
1090 /*1e*/M
, 123, 122, M4
, 121, M4
, M
, M3
, 117, M2
, M2
, M3
, M4
, M3
, M
, M
,
1091 /*1f*/111, M
, M
, M
, M4
, M3
, M3
, M
, M
, M
, M3
, M
, M3
, M2
, M
, M
1093 static unsigned char cheetah_mtag_syntab
[] = {
1104 /* Return the highest priority error conditon mentioned. */
1105 static inline unsigned long cheetah_get_hipri(unsigned long afsr
)
1107 unsigned long tmp
= 0;
1110 for (i
= 0; cheetah_error_table
[i
].mask
; i
++) {
1111 if ((tmp
= (afsr
& cheetah_error_table
[i
].mask
)) != 0UL)
1117 static const char *cheetah_get_string(unsigned long bit
)
1121 for (i
= 0; cheetah_error_table
[i
].mask
; i
++) {
1122 if ((bit
& cheetah_error_table
[i
].mask
) != 0UL)
1123 return cheetah_error_table
[i
].name
;
1128 static void cheetah_log_errors(struct pt_regs
*regs
, struct cheetah_err_info
*info
,
1129 unsigned long afsr
, unsigned long afar
, int recoverable
)
1131 unsigned long hipri
;
1134 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1135 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1137 (afsr
& CHAFSR_TL1
) ? 1 : 0);
1138 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1139 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1140 regs
->tpc
, regs
->tnpc
, regs
->u_regs
[UREG_I7
], regs
->tstate
);
1141 printk("%s" "ERROR(%d): ",
1142 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id());
1143 printk("TPC<%pS>\n", (void *) regs
->tpc
);
1144 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1145 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1146 (afsr
& CHAFSR_M_SYNDROME
) >> CHAFSR_M_SYNDROME_SHIFT
,
1147 (afsr
& CHAFSR_E_SYNDROME
) >> CHAFSR_E_SYNDROME_SHIFT
,
1148 (afsr
& CHAFSR_ME
) ? ", Multiple Errors" : "",
1149 (afsr
& CHAFSR_PRIV
) ? ", Privileged" : "");
1150 hipri
= cheetah_get_hipri(afsr
);
1151 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1152 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1153 hipri
, cheetah_get_string(hipri
));
1155 /* Try to get unumber if relevant. */
1156 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1157 CHAFSR_CPC | CHAFSR_CPU | \
1158 CHAFSR_UE | CHAFSR_CE | \
1159 CHAFSR_EDC | CHAFSR_EDU | \
1160 CHAFSR_UCC | CHAFSR_UCU | \
1161 CHAFSR_WDU | CHAFSR_WDC)
1162 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1163 if (afsr
& ESYND_ERRORS
) {
1167 syndrome
= (afsr
& CHAFSR_E_SYNDROME
) >> CHAFSR_E_SYNDROME_SHIFT
;
1168 syndrome
= cheetah_ecc_syntab
[syndrome
];
1169 ret
= sprintf_dimm(syndrome
, afar
, unum
, sizeof(unum
));
1171 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1172 (recoverable
? KERN_WARNING
: KERN_CRIT
),
1173 smp_processor_id(), unum
);
1174 } else if (afsr
& MSYND_ERRORS
) {
1178 syndrome
= (afsr
& CHAFSR_M_SYNDROME
) >> CHAFSR_M_SYNDROME_SHIFT
;
1179 syndrome
= cheetah_mtag_syntab
[syndrome
];
1180 ret
= sprintf_dimm(syndrome
, afar
, unum
, sizeof(unum
));
1182 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1183 (recoverable
? KERN_WARNING
: KERN_CRIT
),
1184 smp_processor_id(), unum
);
1187 /* Now dump the cache snapshots. */
1188 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1189 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1190 (int) info
->dcache_index
,
1194 printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1195 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1196 info
->dcache_data
[0],
1197 info
->dcache_data
[1],
1198 info
->dcache_data
[2],
1199 info
->dcache_data
[3]);
1200 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1201 "u[%016llx] l[%016llx]\n",
1202 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1203 (int) info
->icache_index
,
1208 info
->icache_lower
);
1209 printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1210 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1211 info
->icache_data
[0],
1212 info
->icache_data
[1],
1213 info
->icache_data
[2],
1214 info
->icache_data
[3]);
1215 printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1216 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1217 info
->icache_data
[4],
1218 info
->icache_data
[5],
1219 info
->icache_data
[6],
1220 info
->icache_data
[7]);
1221 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1222 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1223 (int) info
->ecache_index
, info
->ecache_tag
);
1224 printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1225 (recoverable
? KERN_WARNING
: KERN_CRIT
), smp_processor_id(),
1226 info
->ecache_data
[0],
1227 info
->ecache_data
[1],
1228 info
->ecache_data
[2],
1229 info
->ecache_data
[3]);
1231 afsr
= (afsr
& ~hipri
) & cheetah_afsr_errors
;
1232 while (afsr
!= 0UL) {
1233 unsigned long bit
= cheetah_get_hipri(afsr
);
1235 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1236 (recoverable
? KERN_WARNING
: KERN_CRIT
),
1237 bit
, cheetah_get_string(bit
));
1243 printk(KERN_CRIT
"ERROR: This condition is not recoverable.\n");
1246 static int cheetah_recheck_errors(struct cheetah_err_info
*logp
)
1248 unsigned long afsr
, afar
;
1251 __asm__
__volatile__("ldxa [%%g0] %1, %0\n\t"
1254 if ((afsr
& cheetah_afsr_errors
) != 0) {
1256 __asm__
__volatile__("ldxa [%%g0] %1, %0\n\t"
1264 __asm__
__volatile__("stxa %0, [%%g0] %1\n\t"
1266 : : "r" (afsr
), "i" (ASI_AFSR
));
1271 void cheetah_fecc_handler(struct pt_regs
*regs
, unsigned long afsr
, unsigned long afar
)
1273 struct cheetah_err_info local_snapshot
, *p
;
1277 cheetah_flush_ecache();
1279 p
= cheetah_get_error_log(afsr
);
1281 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1283 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1284 smp_processor_id(), regs
->tpc
, regs
->tnpc
, regs
->tstate
);
1288 /* Grab snapshot of logged error. */
1289 memcpy(&local_snapshot
, p
, sizeof(local_snapshot
));
1291 /* If the current trap snapshot does not match what the
1292 * trap handler passed along into our args, big trouble.
1293 * In such a case, mark the local copy as invalid.
1295 * Else, it matches and we mark the afsr in the non-local
1296 * copy as invalid so we may log new error traps there.
1298 if (p
->afsr
!= afsr
|| p
->afar
!= afar
)
1299 local_snapshot
.afsr
= CHAFSR_INVALID
;
1301 p
->afsr
= CHAFSR_INVALID
;
1303 cheetah_flush_icache();
1304 cheetah_flush_dcache();
1306 /* Re-enable I-cache/D-cache */
1307 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1308 "or %%g1, %1, %%g1\n\t"
1309 "stxa %%g1, [%%g0] %0\n\t"
1312 : "i" (ASI_DCU_CONTROL_REG
),
1313 "i" (DCU_DC
| DCU_IC
)
1316 /* Re-enable error reporting */
1317 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1318 "or %%g1, %1, %%g1\n\t"
1319 "stxa %%g1, [%%g0] %0\n\t"
1322 : "i" (ASI_ESTATE_ERROR_EN
),
1323 "i" (ESTATE_ERROR_NCEEN
| ESTATE_ERROR_CEEN
)
1326 /* Decide if we can continue after handling this trap and
1327 * logging the error.
1330 if (afsr
& (CHAFSR_PERR
| CHAFSR_IERR
| CHAFSR_ISAP
))
1333 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1334 * error was logged while we had error reporting traps disabled.
1336 if (cheetah_recheck_errors(&local_snapshot
)) {
1337 unsigned long new_afsr
= local_snapshot
.afsr
;
1339 /* If we got a new asynchronous error, die... */
1340 if (new_afsr
& (CHAFSR_EMU
| CHAFSR_EDU
|
1341 CHAFSR_WDU
| CHAFSR_CPU
|
1342 CHAFSR_IVU
| CHAFSR_UE
|
1343 CHAFSR_BERR
| CHAFSR_TO
))
1348 cheetah_log_errors(regs
, &local_snapshot
, afsr
, afar
, recoverable
);
1351 panic("Irrecoverable Fast-ECC error trap.\n");
1353 /* Flush E-cache to kick the error trap handlers out. */
1354 cheetah_flush_ecache();
1357 /* Try to fix a correctable error by pushing the line out from
1358 * the E-cache. Recheck error reporting registers to see if the
1359 * problem is intermittent.
1361 static int cheetah_fix_ce(unsigned long physaddr
)
1363 unsigned long orig_estate
;
1364 unsigned long alias1
, alias2
;
1367 /* Make sure correctable error traps are disabled. */
1368 __asm__
__volatile__("ldxa [%%g0] %2, %0\n\t"
1369 "andn %0, %1, %%g1\n\t"
1370 "stxa %%g1, [%%g0] %2\n\t"
1372 : "=&r" (orig_estate
)
1373 : "i" (ESTATE_ERROR_CEEN
),
1374 "i" (ASI_ESTATE_ERROR_EN
)
1377 /* We calculate alias addresses that will force the
1378 * cache line in question out of the E-cache. Then
1379 * we bring it back in with an atomic instruction so
1380 * that we get it in some modified/exclusive state,
1381 * then we displace it again to try and get proper ECC
1382 * pushed back into the system.
1384 physaddr
&= ~(8UL - 1UL);
1385 alias1
= (ecache_flush_physbase
+
1386 (physaddr
& ((ecache_flush_size
>> 1) - 1)));
1387 alias2
= alias1
+ (ecache_flush_size
>> 1);
1388 __asm__
__volatile__("ldxa [%0] %3, %%g0\n\t"
1389 "ldxa [%1] %3, %%g0\n\t"
1390 "casxa [%2] %3, %%g0, %%g0\n\t"
1391 "ldxa [%0] %3, %%g0\n\t"
1392 "ldxa [%1] %3, %%g0\n\t"
1395 : "r" (alias1
), "r" (alias2
),
1396 "r" (physaddr
), "i" (ASI_PHYS_USE_EC
));
1398 /* Did that trigger another error? */
1399 if (cheetah_recheck_errors(NULL
)) {
1400 /* Try one more time. */
1401 __asm__
__volatile__("ldxa [%0] %1, %%g0\n\t"
1403 : : "r" (physaddr
), "i" (ASI_PHYS_USE_EC
));
1404 if (cheetah_recheck_errors(NULL
))
1409 /* No new error, intermittent problem. */
1413 /* Restore error enables. */
1414 __asm__
__volatile__("stxa %0, [%%g0] %1\n\t"
1416 : : "r" (orig_estate
), "i" (ASI_ESTATE_ERROR_EN
));
1421 /* Return non-zero if PADDR is a valid physical memory address. */
1422 static int cheetah_check_main_memory(unsigned long paddr
)
1424 unsigned long vaddr
= PAGE_OFFSET
+ paddr
;
1426 if (vaddr
> (unsigned long) high_memory
)
1429 return kern_addr_valid(vaddr
);
1432 void cheetah_cee_handler(struct pt_regs
*regs
, unsigned long afsr
, unsigned long afar
)
1434 struct cheetah_err_info local_snapshot
, *p
;
1435 int recoverable
, is_memory
;
1437 p
= cheetah_get_error_log(afsr
);
1439 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1441 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1442 smp_processor_id(), regs
->tpc
, regs
->tnpc
, regs
->tstate
);
1446 /* Grab snapshot of logged error. */
1447 memcpy(&local_snapshot
, p
, sizeof(local_snapshot
));
1449 /* If the current trap snapshot does not match what the
1450 * trap handler passed along into our args, big trouble.
1451 * In such a case, mark the local copy as invalid.
1453 * Else, it matches and we mark the afsr in the non-local
1454 * copy as invalid so we may log new error traps there.
1456 if (p
->afsr
!= afsr
|| p
->afar
!= afar
)
1457 local_snapshot
.afsr
= CHAFSR_INVALID
;
1459 p
->afsr
= CHAFSR_INVALID
;
1461 is_memory
= cheetah_check_main_memory(afar
);
1463 if (is_memory
&& (afsr
& CHAFSR_CE
) != 0UL) {
1464 /* XXX Might want to log the results of this operation
1465 * XXX somewhere... -DaveM
1467 cheetah_fix_ce(afar
);
1471 int flush_all
, flush_line
;
1473 flush_all
= flush_line
= 0;
1474 if ((afsr
& CHAFSR_EDC
) != 0UL) {
1475 if ((afsr
& cheetah_afsr_errors
) == CHAFSR_EDC
)
1479 } else if ((afsr
& CHAFSR_CPC
) != 0UL) {
1480 if ((afsr
& cheetah_afsr_errors
) == CHAFSR_CPC
)
1486 /* Trap handler only disabled I-cache, flush it. */
1487 cheetah_flush_icache();
1489 /* Re-enable I-cache */
1490 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1491 "or %%g1, %1, %%g1\n\t"
1492 "stxa %%g1, [%%g0] %0\n\t"
1495 : "i" (ASI_DCU_CONTROL_REG
),
1500 cheetah_flush_ecache();
1501 else if (flush_line
)
1502 cheetah_flush_ecache_line(afar
);
1505 /* Re-enable error reporting */
1506 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1507 "or %%g1, %1, %%g1\n\t"
1508 "stxa %%g1, [%%g0] %0\n\t"
1511 : "i" (ASI_ESTATE_ERROR_EN
),
1512 "i" (ESTATE_ERROR_CEEN
)
1515 /* Decide if we can continue after handling this trap and
1516 * logging the error.
1519 if (afsr
& (CHAFSR_PERR
| CHAFSR_IERR
| CHAFSR_ISAP
))
1522 /* Re-check AFSR/AFAR */
1523 (void) cheetah_recheck_errors(&local_snapshot
);
1526 cheetah_log_errors(regs
, &local_snapshot
, afsr
, afar
, recoverable
);
1529 panic("Irrecoverable Correctable-ECC error trap.\n");
1532 void cheetah_deferred_handler(struct pt_regs
*regs
, unsigned long afsr
, unsigned long afar
)
1534 struct cheetah_err_info local_snapshot
, *p
;
1535 int recoverable
, is_memory
;
1538 /* Check for the special PCI poke sequence. */
1539 if (pci_poke_in_progress
&& pci_poke_cpu
== smp_processor_id()) {
1540 cheetah_flush_icache();
1541 cheetah_flush_dcache();
1543 /* Re-enable I-cache/D-cache */
1544 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1545 "or %%g1, %1, %%g1\n\t"
1546 "stxa %%g1, [%%g0] %0\n\t"
1549 : "i" (ASI_DCU_CONTROL_REG
),
1550 "i" (DCU_DC
| DCU_IC
)
1553 /* Re-enable error reporting */
1554 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1555 "or %%g1, %1, %%g1\n\t"
1556 "stxa %%g1, [%%g0] %0\n\t"
1559 : "i" (ASI_ESTATE_ERROR_EN
),
1560 "i" (ESTATE_ERROR_NCEEN
| ESTATE_ERROR_CEEN
)
1563 (void) cheetah_recheck_errors(NULL
);
1565 pci_poke_faulted
= 1;
1567 regs
->tnpc
= regs
->tpc
+ 4;
1572 p
= cheetah_get_error_log(afsr
);
1574 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1576 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1577 smp_processor_id(), regs
->tpc
, regs
->tnpc
, regs
->tstate
);
1581 /* Grab snapshot of logged error. */
1582 memcpy(&local_snapshot
, p
, sizeof(local_snapshot
));
1584 /* If the current trap snapshot does not match what the
1585 * trap handler passed along into our args, big trouble.
1586 * In such a case, mark the local copy as invalid.
1588 * Else, it matches and we mark the afsr in the non-local
1589 * copy as invalid so we may log new error traps there.
1591 if (p
->afsr
!= afsr
|| p
->afar
!= afar
)
1592 local_snapshot
.afsr
= CHAFSR_INVALID
;
1594 p
->afsr
= CHAFSR_INVALID
;
1596 is_memory
= cheetah_check_main_memory(afar
);
1599 int flush_all
, flush_line
;
1601 flush_all
= flush_line
= 0;
1602 if ((afsr
& CHAFSR_EDU
) != 0UL) {
1603 if ((afsr
& cheetah_afsr_errors
) == CHAFSR_EDU
)
1607 } else if ((afsr
& CHAFSR_BERR
) != 0UL) {
1608 if ((afsr
& cheetah_afsr_errors
) == CHAFSR_BERR
)
1614 cheetah_flush_icache();
1615 cheetah_flush_dcache();
1617 /* Re-enable I/D caches */
1618 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1619 "or %%g1, %1, %%g1\n\t"
1620 "stxa %%g1, [%%g0] %0\n\t"
1623 : "i" (ASI_DCU_CONTROL_REG
),
1624 "i" (DCU_IC
| DCU_DC
)
1628 cheetah_flush_ecache();
1629 else if (flush_line
)
1630 cheetah_flush_ecache_line(afar
);
1633 /* Re-enable error reporting */
1634 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1635 "or %%g1, %1, %%g1\n\t"
1636 "stxa %%g1, [%%g0] %0\n\t"
1639 : "i" (ASI_ESTATE_ERROR_EN
),
1640 "i" (ESTATE_ERROR_NCEEN
| ESTATE_ERROR_CEEN
)
1643 /* Decide if we can continue after handling this trap and
1644 * logging the error.
1647 if (afsr
& (CHAFSR_PERR
| CHAFSR_IERR
| CHAFSR_ISAP
))
1650 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1651 * error was logged while we had error reporting traps disabled.
1653 if (cheetah_recheck_errors(&local_snapshot
)) {
1654 unsigned long new_afsr
= local_snapshot
.afsr
;
1656 /* If we got a new asynchronous error, die... */
1657 if (new_afsr
& (CHAFSR_EMU
| CHAFSR_EDU
|
1658 CHAFSR_WDU
| CHAFSR_CPU
|
1659 CHAFSR_IVU
| CHAFSR_UE
|
1660 CHAFSR_BERR
| CHAFSR_TO
))
1665 cheetah_log_errors(regs
, &local_snapshot
, afsr
, afar
, recoverable
);
1667 /* "Recoverable" here means we try to yank the page from ever
1668 * being newly used again. This depends upon a few things:
1669 * 1) Must be main memory, and AFAR must be valid.
1670 * 2) If we trapped from user, OK.
1671 * 3) Else, if we trapped from kernel we must find exception
1672 * table entry (ie. we have to have been accessing user
1675 * If AFAR is not in main memory, or we trapped from kernel
1676 * and cannot find an exception table entry, it is unacceptable
1677 * to try and continue.
1679 if (recoverable
&& is_memory
) {
1680 if ((regs
->tstate
& TSTATE_PRIV
) == 0UL) {
1681 /* OK, usermode access. */
1684 const struct exception_table_entry
*entry
;
1686 entry
= search_exception_tables(regs
->tpc
);
1688 /* OK, kernel access to userspace. */
1692 /* BAD, privileged state is corrupted. */
1697 if (pfn_valid(afar
>> PAGE_SHIFT
))
1698 get_page(pfn_to_page(afar
>> PAGE_SHIFT
));
1702 /* Only perform fixup if we still have a
1703 * recoverable condition.
1706 regs
->tpc
= entry
->fixup
;
1707 regs
->tnpc
= regs
->tpc
+ 4;
1716 panic("Irrecoverable deferred error trap.\n");
1719 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1721 * Bit0: 0=dcache,1=icache
1722 * Bit1: 0=recoverable,1=unrecoverable
1724 * The hardware has disabled both the I-cache and D-cache in
1725 * the %dcr register.
1727 void cheetah_plus_parity_error(int type
, struct pt_regs
*regs
)
1730 __cheetah_flush_icache();
1732 cheetah_plus_zap_dcache_parity();
1733 cheetah_flush_dcache();
1735 /* Re-enable I-cache/D-cache */
1736 __asm__
__volatile__("ldxa [%%g0] %0, %%g1\n\t"
1737 "or %%g1, %1, %%g1\n\t"
1738 "stxa %%g1, [%%g0] %0\n\t"
1741 : "i" (ASI_DCU_CONTROL_REG
),
1742 "i" (DCU_DC
| DCU_IC
)
1746 printk(KERN_EMERG
"CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1748 (type
& 0x1) ? 'I' : 'D',
1750 printk(KERN_EMERG
"TPC<%pS>\n", (void *) regs
->tpc
);
1751 panic("Irrecoverable Cheetah+ parity error.");
1754 printk(KERN_WARNING
"CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1756 (type
& 0x1) ? 'I' : 'D',
1758 printk(KERN_WARNING
"TPC<%pS>\n", (void *) regs
->tpc
);
1761 struct sun4v_error_entry
{
1766 #define SUN4V_ERR_TYPE_UNDEFINED 0
1767 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1768 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1769 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1770 #define SUN4V_ERR_TYPE_WARNING_RES 4
1773 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1774 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1775 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1776 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1777 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1778 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1779 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1780 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1788 static atomic_t sun4v_resum_oflow_cnt
= ATOMIC_INIT(0);
1789 static atomic_t sun4v_nonresum_oflow_cnt
= ATOMIC_INIT(0);
1791 static const char *sun4v_err_type_to_str(u32 type
)
1794 case SUN4V_ERR_TYPE_UNDEFINED
:
1796 case SUN4V_ERR_TYPE_UNCORRECTED_RES
:
1797 return "uncorrected resumable";
1798 case SUN4V_ERR_TYPE_PRECISE_NONRES
:
1799 return "precise nonresumable";
1800 case SUN4V_ERR_TYPE_DEFERRED_NONRES
:
1801 return "deferred nonresumable";
1802 case SUN4V_ERR_TYPE_WARNING_RES
:
1803 return "warning resumable";
1809 static void sun4v_log_error(struct pt_regs
*regs
, struct sun4v_error_entry
*ent
, int cpu
, const char *pfx
, atomic_t
*ocnt
)
1813 printk("%s: Reporting on cpu %d\n", pfx
, cpu
);
1814 printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n",
1816 ent
->err_handle
, ent
->err_stick
,
1818 sun4v_err_type_to_str(ent
->err_type
));
1819 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1822 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_PROCESSOR
) ?
1824 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_MEMORY
) ?
1826 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_PIO
) ?
1828 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_INT_REGISTERS
) ?
1829 "integer-regs" : ""),
1830 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_FPU_REGISTERS
) ?
1832 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_USER_MODE
) ?
1834 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_PRIV_MODE
) ?
1836 ((ent
->err_attrs
& SUN4V_ERR_ATTRS_RES_QUEUE_FULL
) ?
1837 "queue-full" : ""));
1838 printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n",
1840 ent
->err_raddr
, ent
->err_size
, ent
->err_cpu
);
1844 if ((cnt
= atomic_read(ocnt
)) != 0) {
1845 atomic_set(ocnt
, 0);
1847 printk("%s: Queue overflowed %d times.\n",
1852 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1853 * Log the event and clear the first word of the entry.
1855 void sun4v_resum_error(struct pt_regs
*regs
, unsigned long offset
)
1857 struct sun4v_error_entry
*ent
, local_copy
;
1858 struct trap_per_cpu
*tb
;
1859 unsigned long paddr
;
1864 tb
= &trap_block
[cpu
];
1865 paddr
= tb
->resum_kernel_buf_pa
+ offset
;
1868 memcpy(&local_copy
, ent
, sizeof(struct sun4v_error_entry
));
1870 /* We have a local copy now, so release the entry. */
1871 ent
->err_handle
= 0;
1876 if (ent
->err_type
== SUN4V_ERR_TYPE_WARNING_RES
) {
1877 /* If err_type is 0x4, it's a powerdown request. Do
1878 * not do the usual resumable error log because that
1879 * makes it look like some abnormal error.
1881 printk(KERN_INFO
"Power down request...\n");
1882 kill_cad_pid(SIGINT
, 1);
1886 sun4v_log_error(regs
, &local_copy
, cpu
,
1887 KERN_ERR
"RESUMABLE ERROR",
1888 &sun4v_resum_oflow_cnt
);
1891 /* If we try to printk() we'll probably make matters worse, by trying
1892 * to retake locks this cpu already holds or causing more errors. So
1893 * just bump a counter, and we'll report these counter bumps above.
1895 void sun4v_resum_overflow(struct pt_regs
*regs
)
1897 atomic_inc(&sun4v_resum_oflow_cnt
);
1900 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1901 * Log the event, clear the first word of the entry, and die.
1903 void sun4v_nonresum_error(struct pt_regs
*regs
, unsigned long offset
)
1905 struct sun4v_error_entry
*ent
, local_copy
;
1906 struct trap_per_cpu
*tb
;
1907 unsigned long paddr
;
1912 tb
= &trap_block
[cpu
];
1913 paddr
= tb
->nonresum_kernel_buf_pa
+ offset
;
1916 memcpy(&local_copy
, ent
, sizeof(struct sun4v_error_entry
));
1918 /* We have a local copy now, so release the entry. */
1919 ent
->err_handle
= 0;
1925 /* Check for the special PCI poke sequence. */
1926 if (pci_poke_in_progress
&& pci_poke_cpu
== cpu
) {
1927 pci_poke_faulted
= 1;
1929 regs
->tnpc
= regs
->tpc
+ 4;
1934 sun4v_log_error(regs
, &local_copy
, cpu
,
1935 KERN_EMERG
"NON-RESUMABLE ERROR",
1936 &sun4v_nonresum_oflow_cnt
);
1938 panic("Non-resumable error.");
1941 /* If we try to printk() we'll probably make matters worse, by trying
1942 * to retake locks this cpu already holds or causing more errors. So
1943 * just bump a counter, and we'll report these counter bumps above.
1945 void sun4v_nonresum_overflow(struct pt_regs
*regs
)
1947 /* XXX Actually even this can make not that much sense. Perhaps
1948 * XXX we should just pull the plug and panic directly from here?
1950 atomic_inc(&sun4v_nonresum_oflow_cnt
);
1953 unsigned long sun4v_err_itlb_vaddr
;
1954 unsigned long sun4v_err_itlb_ctx
;
1955 unsigned long sun4v_err_itlb_pte
;
1956 unsigned long sun4v_err_itlb_error
;
1958 void sun4v_itlb_error_report(struct pt_regs
*regs
, int tl
)
1961 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
1963 printk(KERN_EMERG
"SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1965 printk(KERN_EMERG
"SUN4V-ITLB: TPC<%pS>\n", (void *) regs
->tpc
);
1966 printk(KERN_EMERG
"SUN4V-ITLB: O7[%lx]\n", regs
->u_regs
[UREG_I7
]);
1967 printk(KERN_EMERG
"SUN4V-ITLB: O7<%pS>\n",
1968 (void *) regs
->u_regs
[UREG_I7
]);
1969 printk(KERN_EMERG
"SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1970 "pte[%lx] error[%lx]\n",
1971 sun4v_err_itlb_vaddr
, sun4v_err_itlb_ctx
,
1972 sun4v_err_itlb_pte
, sun4v_err_itlb_error
);
1977 unsigned long sun4v_err_dtlb_vaddr
;
1978 unsigned long sun4v_err_dtlb_ctx
;
1979 unsigned long sun4v_err_dtlb_pte
;
1980 unsigned long sun4v_err_dtlb_error
;
1982 void sun4v_dtlb_error_report(struct pt_regs
*regs
, int tl
)
1985 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
1987 printk(KERN_EMERG
"SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1989 printk(KERN_EMERG
"SUN4V-DTLB: TPC<%pS>\n", (void *) regs
->tpc
);
1990 printk(KERN_EMERG
"SUN4V-DTLB: O7[%lx]\n", regs
->u_regs
[UREG_I7
]);
1991 printk(KERN_EMERG
"SUN4V-DTLB: O7<%pS>\n",
1992 (void *) regs
->u_regs
[UREG_I7
]);
1993 printk(KERN_EMERG
"SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1994 "pte[%lx] error[%lx]\n",
1995 sun4v_err_dtlb_vaddr
, sun4v_err_dtlb_ctx
,
1996 sun4v_err_dtlb_pte
, sun4v_err_dtlb_error
);
2001 void hypervisor_tlbop_error(unsigned long err
, unsigned long op
)
2003 printk(KERN_CRIT
"SUN4V: TLB hv call error %lu for op %lu\n",
2007 void hypervisor_tlbop_error_xcall(unsigned long err
, unsigned long op
)
2009 printk(KERN_CRIT
"SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2013 void do_fpe_common(struct pt_regs
*regs
)
2015 if (regs
->tstate
& TSTATE_PRIV
) {
2016 regs
->tpc
= regs
->tnpc
;
2019 unsigned long fsr
= current_thread_info()->xfsr
[0];
2022 if (test_thread_flag(TIF_32BIT
)) {
2023 regs
->tpc
&= 0xffffffff;
2024 regs
->tnpc
&= 0xffffffff;
2026 info
.si_signo
= SIGFPE
;
2028 info
.si_addr
= (void __user
*)regs
->tpc
;
2030 info
.si_code
= __SI_FAULT
;
2031 if ((fsr
& 0x1c000) == (1 << 14)) {
2033 info
.si_code
= FPE_FLTINV
;
2034 else if (fsr
& 0x08)
2035 info
.si_code
= FPE_FLTOVF
;
2036 else if (fsr
& 0x04)
2037 info
.si_code
= FPE_FLTUND
;
2038 else if (fsr
& 0x02)
2039 info
.si_code
= FPE_FLTDIV
;
2040 else if (fsr
& 0x01)
2041 info
.si_code
= FPE_FLTRES
;
2043 force_sig_info(SIGFPE
, &info
, current
);
2047 void do_fpieee(struct pt_regs
*regs
)
2049 if (notify_die(DIE_TRAP
, "fpu exception ieee", regs
,
2050 0, 0x24, SIGFPE
) == NOTIFY_STOP
)
2053 do_fpe_common(regs
);
2056 extern int do_mathemu(struct pt_regs
*, struct fpustate
*);
2058 void do_fpother(struct pt_regs
*regs
)
2060 struct fpustate
*f
= FPUSTATE
;
2063 if (notify_die(DIE_TRAP
, "fpu exception other", regs
,
2064 0, 0x25, SIGFPE
) == NOTIFY_STOP
)
2067 switch ((current_thread_info()->xfsr
[0] & 0x1c000)) {
2068 case (2 << 14): /* unfinished_FPop */
2069 case (3 << 14): /* unimplemented_FPop */
2070 ret
= do_mathemu(regs
, f
);
2075 do_fpe_common(regs
);
2078 void do_tof(struct pt_regs
*regs
)
2082 if (notify_die(DIE_TRAP
, "tagged arithmetic overflow", regs
,
2083 0, 0x26, SIGEMT
) == NOTIFY_STOP
)
2086 if (regs
->tstate
& TSTATE_PRIV
)
2087 die_if_kernel("Penguin overflow trap from kernel mode", regs
);
2088 if (test_thread_flag(TIF_32BIT
)) {
2089 regs
->tpc
&= 0xffffffff;
2090 regs
->tnpc
&= 0xffffffff;
2092 info
.si_signo
= SIGEMT
;
2094 info
.si_code
= EMT_TAGOVF
;
2095 info
.si_addr
= (void __user
*)regs
->tpc
;
2097 force_sig_info(SIGEMT
, &info
, current
);
2100 void do_div0(struct pt_regs
*regs
)
2104 if (notify_die(DIE_TRAP
, "integer division by zero", regs
,
2105 0, 0x28, SIGFPE
) == NOTIFY_STOP
)
2108 if (regs
->tstate
& TSTATE_PRIV
)
2109 die_if_kernel("TL0: Kernel divide by zero.", regs
);
2110 if (test_thread_flag(TIF_32BIT
)) {
2111 regs
->tpc
&= 0xffffffff;
2112 regs
->tnpc
&= 0xffffffff;
2114 info
.si_signo
= SIGFPE
;
2116 info
.si_code
= FPE_INTDIV
;
2117 info
.si_addr
= (void __user
*)regs
->tpc
;
2119 force_sig_info(SIGFPE
, &info
, current
);
2122 static void instruction_dump(unsigned int *pc
)
2126 if ((((unsigned long) pc
) & 3))
2129 printk("Instruction DUMP:");
2130 for (i
= -3; i
< 6; i
++)
2131 printk("%c%08x%c",i
?' ':'<',pc
[i
],i
?' ':'>');
2135 static void user_instruction_dump(unsigned int __user
*pc
)
2138 unsigned int buf
[9];
2140 if ((((unsigned long) pc
) & 3))
2143 if (copy_from_user(buf
, pc
- 3, sizeof(buf
)))
2146 printk("Instruction DUMP:");
2147 for (i
= 0; i
< 9; i
++)
2148 printk("%c%08x%c",i
==3?' ':'<',buf
[i
],i
==3?' ':'>');
2152 void show_stack(struct task_struct
*tsk
, unsigned long *_ksp
)
2154 unsigned long fp
, thread_base
, ksp
;
2155 struct thread_info
*tp
;
2158 ksp
= (unsigned long) _ksp
;
2161 tp
= task_thread_info(tsk
);
2164 asm("mov %%fp, %0" : "=r" (ksp
));
2168 if (tp
== current_thread_info())
2171 fp
= ksp
+ STACK_BIAS
;
2172 thread_base
= (unsigned long) tp
;
2174 printk("Call Trace:\n");
2176 struct sparc_stackf
*sf
;
2177 struct pt_regs
*regs
;
2180 if (!kstack_valid(tp
, fp
))
2182 sf
= (struct sparc_stackf
*) fp
;
2183 regs
= (struct pt_regs
*) (sf
+ 1);
2185 if (kstack_is_trap_frame(tp
, regs
)) {
2186 if (!(regs
->tstate
& TSTATE_PRIV
))
2189 fp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
2191 pc
= sf
->callers_pc
;
2192 fp
= (unsigned long)sf
->fp
+ STACK_BIAS
;
2195 printk(" [%016lx] %pS\n", pc
, (void *) pc
);
2196 } while (++count
< 16);
2199 void dump_stack(void)
2201 show_stack(current
, NULL
);
2204 EXPORT_SYMBOL(dump_stack
);
2206 static inline int is_kernel_stack(struct task_struct
*task
,
2207 struct reg_window
*rw
)
2209 unsigned long rw_addr
= (unsigned long) rw
;
2210 unsigned long thread_base
, thread_end
;
2212 if (rw_addr
< PAGE_OFFSET
) {
2213 if (task
!= &init_task
)
2217 thread_base
= (unsigned long) task_stack_page(task
);
2218 thread_end
= thread_base
+ sizeof(union thread_union
);
2219 if (rw_addr
>= thread_base
&&
2220 rw_addr
< thread_end
&&
2227 static inline struct reg_window
*kernel_stack_up(struct reg_window
*rw
)
2229 unsigned long fp
= rw
->ins
[6];
2234 return (struct reg_window
*) (fp
+ STACK_BIAS
);
2237 void die_if_kernel(char *str
, struct pt_regs
*regs
)
2239 static int die_counter
;
2242 /* Amuse the user. */
2245 " \"@'/ .. \\`@\"\n"
2249 printk("%s(%d): %s [#%d]\n", current
->comm
, task_pid_nr(current
), str
, ++die_counter
);
2250 notify_die(DIE_OOPS
, str
, regs
, 0, 255, SIGSEGV
);
2251 __asm__
__volatile__("flushw");
2253 add_taint(TAINT_DIE
);
2254 if (regs
->tstate
& TSTATE_PRIV
) {
2255 struct reg_window
*rw
= (struct reg_window
*)
2256 (regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
2258 /* Stop the back trace when we hit userland or we
2259 * find some badly aligned kernel stack.
2263 is_kernel_stack(current
, rw
)) {
2264 printk("Caller[%016lx]: %pS\n", rw
->ins
[7],
2265 (void *) rw
->ins
[7]);
2267 rw
= kernel_stack_up(rw
);
2269 instruction_dump ((unsigned int *) regs
->tpc
);
2271 if (test_thread_flag(TIF_32BIT
)) {
2272 regs
->tpc
&= 0xffffffff;
2273 regs
->tnpc
&= 0xffffffff;
2275 user_instruction_dump ((unsigned int __user
*) regs
->tpc
);
2277 if (regs
->tstate
& TSTATE_PRIV
)
2281 EXPORT_SYMBOL(die_if_kernel
);
2283 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2284 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2286 extern int handle_popc(u32 insn
, struct pt_regs
*regs
);
2287 extern int handle_ldf_stq(u32 insn
, struct pt_regs
*regs
);
2289 void do_illegal_instruction(struct pt_regs
*regs
)
2291 unsigned long pc
= regs
->tpc
;
2292 unsigned long tstate
= regs
->tstate
;
2296 if (notify_die(DIE_TRAP
, "illegal instruction", regs
,
2297 0, 0x10, SIGILL
) == NOTIFY_STOP
)
2300 if (tstate
& TSTATE_PRIV
)
2301 die_if_kernel("Kernel illegal instruction", regs
);
2302 if (test_thread_flag(TIF_32BIT
))
2304 if (get_user(insn
, (u32 __user
*) pc
) != -EFAULT
) {
2305 if ((insn
& 0xc1ffc000) == 0x81700000) /* POPC */ {
2306 if (handle_popc(insn
, regs
))
2308 } else if ((insn
& 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2309 if (handle_ldf_stq(insn
, regs
))
2311 } else if (tlb_type
== hypervisor
) {
2312 if ((insn
& VIS_OPCODE_MASK
) == VIS_OPCODE_VAL
) {
2313 if (!vis_emul(regs
, insn
))
2316 struct fpustate
*f
= FPUSTATE
;
2318 /* XXX maybe verify XFSR bits like
2319 * XXX do_fpother() does?
2321 if (do_mathemu(regs
, f
))
2326 info
.si_signo
= SIGILL
;
2328 info
.si_code
= ILL_ILLOPC
;
2329 info
.si_addr
= (void __user
*)pc
;
2331 force_sig_info(SIGILL
, &info
, current
);
2334 extern void kernel_unaligned_trap(struct pt_regs
*regs
, unsigned int insn
);
2336 void mem_address_unaligned(struct pt_regs
*regs
, unsigned long sfar
, unsigned long sfsr
)
2340 if (notify_die(DIE_TRAP
, "memory address unaligned", regs
,
2341 0, 0x34, SIGSEGV
) == NOTIFY_STOP
)
2344 if (regs
->tstate
& TSTATE_PRIV
) {
2345 kernel_unaligned_trap(regs
, *((unsigned int *)regs
->tpc
));
2348 info
.si_signo
= SIGBUS
;
2350 info
.si_code
= BUS_ADRALN
;
2351 info
.si_addr
= (void __user
*)sfar
;
2353 force_sig_info(SIGBUS
, &info
, current
);
2356 void sun4v_do_mna(struct pt_regs
*regs
, unsigned long addr
, unsigned long type_ctx
)
2360 if (notify_die(DIE_TRAP
, "memory address unaligned", regs
,
2361 0, 0x34, SIGSEGV
) == NOTIFY_STOP
)
2364 if (regs
->tstate
& TSTATE_PRIV
) {
2365 kernel_unaligned_trap(regs
, *((unsigned int *)regs
->tpc
));
2368 info
.si_signo
= SIGBUS
;
2370 info
.si_code
= BUS_ADRALN
;
2371 info
.si_addr
= (void __user
*) addr
;
2373 force_sig_info(SIGBUS
, &info
, current
);
2376 void do_privop(struct pt_regs
*regs
)
2380 if (notify_die(DIE_TRAP
, "privileged operation", regs
,
2381 0, 0x11, SIGILL
) == NOTIFY_STOP
)
2384 if (test_thread_flag(TIF_32BIT
)) {
2385 regs
->tpc
&= 0xffffffff;
2386 regs
->tnpc
&= 0xffffffff;
2388 info
.si_signo
= SIGILL
;
2390 info
.si_code
= ILL_PRVOPC
;
2391 info
.si_addr
= (void __user
*)regs
->tpc
;
2393 force_sig_info(SIGILL
, &info
, current
);
2396 void do_privact(struct pt_regs
*regs
)
2401 /* Trap level 1 stuff or other traps we should never see... */
2402 void do_cee(struct pt_regs
*regs
)
2404 die_if_kernel("TL0: Cache Error Exception", regs
);
2407 void do_cee_tl1(struct pt_regs
*regs
)
2409 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2410 die_if_kernel("TL1: Cache Error Exception", regs
);
2413 void do_dae_tl1(struct pt_regs
*regs
)
2415 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2416 die_if_kernel("TL1: Data Access Exception", regs
);
2419 void do_iae_tl1(struct pt_regs
*regs
)
2421 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2422 die_if_kernel("TL1: Instruction Access Exception", regs
);
2425 void do_div0_tl1(struct pt_regs
*regs
)
2427 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2428 die_if_kernel("TL1: DIV0 Exception", regs
);
2431 void do_fpdis_tl1(struct pt_regs
*regs
)
2433 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2434 die_if_kernel("TL1: FPU Disabled", regs
);
2437 void do_fpieee_tl1(struct pt_regs
*regs
)
2439 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2440 die_if_kernel("TL1: FPU IEEE Exception", regs
);
2443 void do_fpother_tl1(struct pt_regs
*regs
)
2445 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2446 die_if_kernel("TL1: FPU Other Exception", regs
);
2449 void do_ill_tl1(struct pt_regs
*regs
)
2451 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2452 die_if_kernel("TL1: Illegal Instruction Exception", regs
);
2455 void do_irq_tl1(struct pt_regs
*regs
)
2457 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2458 die_if_kernel("TL1: IRQ Exception", regs
);
2461 void do_lddfmna_tl1(struct pt_regs
*regs
)
2463 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2464 die_if_kernel("TL1: LDDF Exception", regs
);
2467 void do_stdfmna_tl1(struct pt_regs
*regs
)
2469 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2470 die_if_kernel("TL1: STDF Exception", regs
);
2473 void do_paw(struct pt_regs
*regs
)
2475 die_if_kernel("TL0: Phys Watchpoint Exception", regs
);
2478 void do_paw_tl1(struct pt_regs
*regs
)
2480 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2481 die_if_kernel("TL1: Phys Watchpoint Exception", regs
);
2484 void do_vaw(struct pt_regs
*regs
)
2486 die_if_kernel("TL0: Virt Watchpoint Exception", regs
);
2489 void do_vaw_tl1(struct pt_regs
*regs
)
2491 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2492 die_if_kernel("TL1: Virt Watchpoint Exception", regs
);
2495 void do_tof_tl1(struct pt_regs
*regs
)
2497 dump_tl1_traplog((struct tl1_traplog
*)(regs
+ 1));
2498 die_if_kernel("TL1: Tag Overflow Exception", regs
);
2501 void do_getpsr(struct pt_regs
*regs
)
2503 regs
->u_regs
[UREG_I0
] = tstate_to_psr(regs
->tstate
);
2504 regs
->tpc
= regs
->tnpc
;
2506 if (test_thread_flag(TIF_32BIT
)) {
2507 regs
->tpc
&= 0xffffffff;
2508 regs
->tnpc
&= 0xffffffff;
2512 struct trap_per_cpu trap_block
[NR_CPUS
];
2513 EXPORT_SYMBOL(trap_block
);
2515 /* This can get invoked before sched_init() so play it super safe
2516 * and use hard_smp_processor_id().
2518 void notrace
init_cur_cpu_trap(struct thread_info
*t
)
2520 int cpu
= hard_smp_processor_id();
2521 struct trap_per_cpu
*p
= &trap_block
[cpu
];
2527 extern void thread_info_offsets_are_bolixed_dave(void);
2528 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2529 extern void tsb_config_offsets_are_bolixed_dave(void);
2531 /* Only invoked on boot processor. */
2532 void __init
trap_init(void)
2534 /* Compile time sanity check. */
2535 BUILD_BUG_ON(TI_TASK
!= offsetof(struct thread_info
, task
) ||
2536 TI_FLAGS
!= offsetof(struct thread_info
, flags
) ||
2537 TI_CPU
!= offsetof(struct thread_info
, cpu
) ||
2538 TI_FPSAVED
!= offsetof(struct thread_info
, fpsaved
) ||
2539 TI_KSP
!= offsetof(struct thread_info
, ksp
) ||
2540 TI_FAULT_ADDR
!= offsetof(struct thread_info
,
2542 TI_KREGS
!= offsetof(struct thread_info
, kregs
) ||
2543 TI_UTRAPS
!= offsetof(struct thread_info
, utraps
) ||
2544 TI_EXEC_DOMAIN
!= offsetof(struct thread_info
,
2546 TI_REG_WINDOW
!= offsetof(struct thread_info
,
2548 TI_RWIN_SPTRS
!= offsetof(struct thread_info
,
2550 TI_GSR
!= offsetof(struct thread_info
, gsr
) ||
2551 TI_XFSR
!= offsetof(struct thread_info
, xfsr
) ||
2552 TI_PRE_COUNT
!= offsetof(struct thread_info
,
2554 TI_NEW_CHILD
!= offsetof(struct thread_info
, new_child
) ||
2555 TI_SYS_NOERROR
!= offsetof(struct thread_info
,
2557 TI_RESTART_BLOCK
!= offsetof(struct thread_info
,
2559 TI_KUNA_REGS
!= offsetof(struct thread_info
,
2561 TI_KUNA_INSN
!= offsetof(struct thread_info
,
2563 TI_FPREGS
!= offsetof(struct thread_info
, fpregs
) ||
2564 (TI_FPREGS
& (64 - 1)));
2566 BUILD_BUG_ON(TRAP_PER_CPU_THREAD
!= offsetof(struct trap_per_cpu
,
2568 (TRAP_PER_CPU_PGD_PADDR
!=
2569 offsetof(struct trap_per_cpu
, pgd_paddr
)) ||
2570 (TRAP_PER_CPU_CPU_MONDO_PA
!=
2571 offsetof(struct trap_per_cpu
, cpu_mondo_pa
)) ||
2572 (TRAP_PER_CPU_DEV_MONDO_PA
!=
2573 offsetof(struct trap_per_cpu
, dev_mondo_pa
)) ||
2574 (TRAP_PER_CPU_RESUM_MONDO_PA
!=
2575 offsetof(struct trap_per_cpu
, resum_mondo_pa
)) ||
2576 (TRAP_PER_CPU_RESUM_KBUF_PA
!=
2577 offsetof(struct trap_per_cpu
, resum_kernel_buf_pa
)) ||
2578 (TRAP_PER_CPU_NONRESUM_MONDO_PA
!=
2579 offsetof(struct trap_per_cpu
, nonresum_mondo_pa
)) ||
2580 (TRAP_PER_CPU_NONRESUM_KBUF_PA
!=
2581 offsetof(struct trap_per_cpu
, nonresum_kernel_buf_pa
)) ||
2582 (TRAP_PER_CPU_FAULT_INFO
!=
2583 offsetof(struct trap_per_cpu
, fault_info
)) ||
2584 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA
!=
2585 offsetof(struct trap_per_cpu
, cpu_mondo_block_pa
)) ||
2586 (TRAP_PER_CPU_CPU_LIST_PA
!=
2587 offsetof(struct trap_per_cpu
, cpu_list_pa
)) ||
2588 (TRAP_PER_CPU_TSB_HUGE
!=
2589 offsetof(struct trap_per_cpu
, tsb_huge
)) ||
2590 (TRAP_PER_CPU_TSB_HUGE_TEMP
!=
2591 offsetof(struct trap_per_cpu
, tsb_huge_temp
)) ||
2592 (TRAP_PER_CPU_IRQ_WORKLIST_PA
!=
2593 offsetof(struct trap_per_cpu
, irq_worklist_pa
)) ||
2594 (TRAP_PER_CPU_CPU_MONDO_QMASK
!=
2595 offsetof(struct trap_per_cpu
, cpu_mondo_qmask
)) ||
2596 (TRAP_PER_CPU_DEV_MONDO_QMASK
!=
2597 offsetof(struct trap_per_cpu
, dev_mondo_qmask
)) ||
2598 (TRAP_PER_CPU_RESUM_QMASK
!=
2599 offsetof(struct trap_per_cpu
, resum_qmask
)) ||
2600 (TRAP_PER_CPU_NONRESUM_QMASK
!=
2601 offsetof(struct trap_per_cpu
, nonresum_qmask
)) ||
2602 (TRAP_PER_CPU_PER_CPU_BASE
!=
2603 offsetof(struct trap_per_cpu
, __per_cpu_base
)));
2605 BUILD_BUG_ON((TSB_CONFIG_TSB
!=
2606 offsetof(struct tsb_config
, tsb
)) ||
2607 (TSB_CONFIG_RSS_LIMIT
!=
2608 offsetof(struct tsb_config
, tsb_rss_limit
)) ||
2609 (TSB_CONFIG_NENTRIES
!=
2610 offsetof(struct tsb_config
, tsb_nentries
)) ||
2611 (TSB_CONFIG_REG_VAL
!=
2612 offsetof(struct tsb_config
, tsb_reg_val
)) ||
2613 (TSB_CONFIG_MAP_VADDR
!=
2614 offsetof(struct tsb_config
, tsb_map_vaddr
)) ||
2615 (TSB_CONFIG_MAP_PTE
!=
2616 offsetof(struct tsb_config
, tsb_map_pte
)));
2618 /* Attach to the address space of init_task. On SMP we
2619 * do this in smp.c:smp_callin for other cpus.
2621 atomic_inc(&init_mm
.mm_count
);
2622 current
->active_mm
= &init_mm
;