]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Code to handle x86 style IRQs plus some generic interrupt stuff. | |
7 | * | |
8 | * Copyright (C) 1992 Linus Torvalds | |
9 | * Copyright (C) 1994 - 2000 Ralf Baechle | |
10 | */ | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/proc_fs.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/random.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/seq_file.h> | |
21 | #include <linux/kallsyms.h> | |
22 | #include <linux/kgdb.h> | |
23 | #include <linux/ftrace.h> | |
24 | ||
25 | #include <linux/atomic.h> | |
26 | #include <linux/uaccess.h> | |
27 | ||
28 | void *irq_stack[NR_CPUS]; | |
29 | ||
30 | /* | |
31 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
32 | * each architecture has to answer this themselves. | |
33 | */ | |
34 | void ack_bad_irq(unsigned int irq) | |
35 | { | |
36 | printk("unexpected IRQ # %d\n", irq); | |
37 | } | |
38 | ||
39 | atomic_t irq_err_count; | |
40 | ||
41 | int arch_show_interrupts(struct seq_file *p, int prec) | |
42 | { | |
43 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | |
44 | return 0; | |
45 | } | |
46 | ||
47 | asmlinkage void spurious_interrupt(void) | |
48 | { | |
49 | atomic_inc(&irq_err_count); | |
50 | } | |
51 | ||
52 | void __init init_IRQ(void) | |
53 | { | |
54 | int i; | |
55 | ||
56 | for (i = 0; i < NR_IRQS; i++) | |
57 | irq_set_noprobe(i); | |
58 | ||
59 | if (cpu_has_veic) | |
60 | clear_c0_status(ST0_IM); | |
61 | ||
62 | arch_init_irq(); | |
63 | ||
64 | for_each_possible_cpu(i) { | |
65 | int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE; | |
66 | void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages); | |
67 | ||
68 | irq_stack[i] = s; | |
69 | pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, | |
70 | irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE); | |
71 | } | |
72 | } | |
73 | ||
74 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
75 | static inline void check_stack_overflow(void) | |
76 | { | |
77 | unsigned long sp; | |
78 | ||
79 | __asm__ __volatile__("move %0, $sp" : "=r" (sp)); | |
80 | sp &= THREAD_MASK; | |
81 | ||
82 | /* | |
83 | * Check for stack overflow: is there less than STACK_WARN free? | |
84 | * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. | |
85 | */ | |
86 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | |
87 | printk("do_IRQ: stack overflow: %ld\n", | |
88 | sp - sizeof(struct thread_info)); | |
89 | dump_stack(); | |
90 | } | |
91 | } | |
92 | #else | |
93 | static inline void check_stack_overflow(void) {} | |
94 | #endif | |
95 | ||
96 | ||
97 | /* | |
98 | * do_IRQ handles all normal device IRQ's (the special | |
99 | * SMP cross-CPU interrupts have their own specific | |
100 | * handlers). | |
101 | */ | |
102 | void __irq_entry do_IRQ(unsigned int irq) | |
103 | { | |
104 | irq_enter(); | |
105 | check_stack_overflow(); | |
106 | generic_handle_irq(irq); | |
107 | irq_exit(); | |
108 | } | |
109 |