]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
3 | * | |
4 | * This file contains the lowest level x86_64-specific interrupt | |
5 | * entry and irq statistics code. All the remaining irq logic is | |
6 | * done by the generic kernel/irq/ code and in the | |
7 | * x86_64-specific irq controller code. (e.g. i8259.c and | |
8 | * io_apic.c.) | |
9 | */ | |
10 | ||
11 | #include <linux/kernel_stat.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/seq_file.h> | |
14 | #include <linux/module.h> | |
76e4f660 | 15 | #include <linux/delay.h> |
bcbc4f20 | 16 | #include <linux/ftrace.h> |
5f66b2a0 JSR |
17 | #include <linux/uaccess.h> |
18 | #include <linux/smp.h> | |
1da177e4 | 19 | #include <asm/io_apic.h> |
95833c83 | 20 | #include <asm/idle.h> |
3819cd48 | 21 | #include <asm/apic.h> |
1da177e4 | 22 | |
1b437c8c BG |
23 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
24 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
25 | ||
d650a514 BG |
26 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
27 | EXPORT_PER_CPU_SYMBOL(irq_regs); | |
28 | ||
55af7796 MH |
29 | int sysctl_panic_on_stackoverflow; |
30 | ||
4961f10e ES |
31 | /* |
32 | * Probabilistic stack overflow check: | |
33 | * | |
34 | * Only check the stack in process context, because everything else | |
35 | * runs on the big interrupt stacks. Checking reliably is too expensive, | |
36 | * so we just check from interrupts. | |
37 | */ | |
38 | static inline void stack_overflow_check(struct pt_regs *regs) | |
39 | { | |
f377fa12 | 40 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
37fe6a42 MH |
41 | struct orig_ist *oist; |
42 | u64 irq_stack_top, irq_stack_bottom; | |
43 | u64 estack_top, estack_bottom; | |
c9f4f06d | 44 | u64 curbase = (u64)task_stack_page(current); |
f377fa12 | 45 | |
69682b62 MH |
46 | if (user_mode_vm(regs)) |
47 | return; | |
48 | ||
467e6b7a MH |
49 | if (regs->sp >= curbase + sizeof(struct thread_info) + |
50 | sizeof(struct pt_regs) + 128 && | |
51 | regs->sp <= curbase + THREAD_SIZE) | |
37fe6a42 MH |
52 | return; |
53 | ||
54 | irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack); | |
55 | irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); | |
56 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) | |
57 | return; | |
58 | ||
59 | oist = &__get_cpu_var(orig_ist); | |
60 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ; | |
61 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; | |
62 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) | |
63 | return; | |
f377fa12 | 64 | |
37fe6a42 MH |
65 | WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", |
66 | current->comm, curbase, regs->sp, | |
67 | irq_stack_top, irq_stack_bottom, | |
68 | estack_top, estack_bottom); | |
55af7796 MH |
69 | |
70 | if (sysctl_panic_on_stackoverflow) | |
71 | panic("low stack detected by irq handler - check messages\n"); | |
4961f10e | 72 | #endif |
f377fa12 | 73 | } |
4961f10e | 74 | |
9b2b76a3 JF |
75 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
76 | { | |
77 | struct irq_desc *desc; | |
78 | ||
79 | stack_overflow_check(regs); | |
80 | ||
81 | desc = irq_to_desc(irq); | |
82 | if (unlikely(!desc)) | |
83 | return false; | |
84 | ||
85 | generic_handle_irq_desc(irq, desc); | |
86 | return true; | |
87 | } | |
88 | ||
ed6b676c AK |
89 | |
90 | extern void call_softirq(void); | |
91 | ||
92 | asmlinkage void do_softirq(void) | |
93 | { | |
5f66b2a0 JSR |
94 | __u32 pending; |
95 | unsigned long flags; | |
ed6b676c | 96 | |
5f66b2a0 JSR |
97 | if (in_interrupt()) |
98 | return; | |
ed6b676c | 99 | |
5f66b2a0 JSR |
100 | local_irq_save(flags); |
101 | pending = local_softirq_pending(); | |
102 | /* Switch to interrupt stack */ | |
103 | if (pending) { | |
ed6b676c | 104 | call_softirq(); |
2601e64d IM |
105 | WARN_ON_ONCE(softirq_count()); |
106 | } | |
5f66b2a0 | 107 | local_irq_restore(flags); |
ed6b676c | 108 | } |