]>
Commit | Line | Data |
---|---|---|
06fcb0c6 IM |
1 | #ifndef _LINUX_IRQ_H |
2 | #define _LINUX_IRQ_H | |
1da177e4 LT |
3 | |
4 | /* | |
5 | * Please do not include this file in generic code. There is currently | |
6 | * no requirement for any architecture to implement anything held | |
7 | * within this file. | |
8 | * | |
9 | * Thanks. --rmk | |
10 | */ | |
11 | ||
23f9b317 | 12 | #include <linux/smp.h> |
1da177e4 | 13 | |
06fcb0c6 | 14 | #ifndef CONFIG_S390 |
1da177e4 LT |
15 | |
16 | #include <linux/linkage.h> | |
17 | #include <linux/cache.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/cpumask.h> | |
908dcecd | 20 | #include <linux/irqreturn.h> |
1da177e4 LT |
21 | |
22 | #include <asm/irq.h> | |
23 | #include <asm/ptrace.h> | |
24 | ||
25 | /* | |
26 | * IRQ line status. | |
27 | */ | |
28 | #define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */ | |
29 | #define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */ | |
30 | #define IRQ_PENDING 4 /* IRQ pending - replay on enable */ | |
31 | #define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ | |
32 | #define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ | |
33 | #define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ | |
34 | #define IRQ_LEVEL 64 /* IRQ level triggered */ | |
35 | #define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ | |
06fcb0c6 | 36 | #ifdef ARCH_HAS_IRQ_PER_CPU |
f26fdd59 KW |
37 | # define IRQ_PER_CPU 256 /* IRQ is per CPU */ |
38 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | |
39 | #else | |
40 | # define CHECK_IRQ_PER_CPU(var) 0 | |
41 | #endif | |
1da177e4 LT |
42 | |
43 | /* | |
44 | * Interrupt controller descriptor. This is all we need | |
45 | * to describe about the low-level hardware. | |
46 | */ | |
47 | struct hw_interrupt_type { | |
71d218b7 IM |
48 | const char *typename; |
49 | unsigned int (*startup)(unsigned int irq); | |
50 | void (*shutdown)(unsigned int irq); | |
51 | void (*enable)(unsigned int irq); | |
52 | void (*disable)(unsigned int irq); | |
53 | void (*ack)(unsigned int irq); | |
54 | void (*end)(unsigned int irq); | |
55 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | |
b77d6adc PBG |
56 | /* Currently used only by UML, might disappear one day.*/ |
57 | #ifdef CONFIG_IRQ_RELEASE_METHOD | |
71d218b7 | 58 | void (*release)(unsigned int irq, void *dev_id); |
b77d6adc | 59 | #endif |
1da177e4 LT |
60 | }; |
61 | ||
62 | typedef struct hw_interrupt_type hw_irq_controller; | |
63 | ||
64 | /* | |
65 | * This is the "IRQ descriptor", which contains various information | |
66 | * about the irq, including what kind of hardware handling it has, | |
67 | * whether it is disabled etc etc. | |
68 | * | |
69 | * Pad this out to 32 bytes for cache and indexing reasons. | |
70 | */ | |
34ffdb72 | 71 | struct irq_desc { |
71d218b7 IM |
72 | hw_irq_controller *chip; |
73 | void *chip_data; | |
74 | struct irqaction *action; /* IRQ action list */ | |
75 | unsigned int status; /* IRQ status */ | |
76 | unsigned int depth; /* nested irq disables */ | |
77 | unsigned int irq_count; /* For detecting broken IRQs */ | |
78 | unsigned int irqs_unhandled; | |
79 | spinlock_t lock; | |
a53da52f | 80 | #ifdef CONFIG_SMP |
71d218b7 | 81 | cpumask_t affinity; |
a53da52f | 82 | #endif |
06fcb0c6 | 83 | #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) |
71d218b7 | 84 | unsigned int move_irq; /* need to re-target IRQ dest */ |
54d5d424 | 85 | #endif |
34ffdb72 | 86 | } ____cacheline_aligned; |
1da177e4 | 87 | |
34ffdb72 | 88 | extern struct irq_desc irq_desc[NR_IRQS]; |
1da177e4 | 89 | |
34ffdb72 IM |
90 | /* |
91 | * Migration helpers for obsolete names, they will go away: | |
92 | */ | |
93 | typedef struct irq_desc irq_desc_t; | |
94 | ||
95 | /* | |
96 | * Pick up the arch-dependent methods: | |
97 | */ | |
98 | #include <asm/hw_irq.h> | |
1da177e4 | 99 | |
06fcb0c6 | 100 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
1da177e4 LT |
101 | |
102 | #ifdef CONFIG_GENERIC_HARDIRQS | |
06fcb0c6 | 103 | |
54d5d424 AR |
104 | #ifdef CONFIG_SMP |
105 | static inline void set_native_irq_info(int irq, cpumask_t mask) | |
106 | { | |
a53da52f | 107 | irq_desc[irq].affinity = mask; |
54d5d424 AR |
108 | } |
109 | #else | |
110 | static inline void set_native_irq_info(int irq, cpumask_t mask) | |
111 | { | |
112 | } | |
113 | #endif | |
114 | ||
115 | #ifdef CONFIG_SMP | |
116 | ||
06fcb0c6 | 117 | #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) |
54d5d424 AR |
118 | extern cpumask_t pending_irq_cpumask[NR_IRQS]; |
119 | ||
c777ac55 AM |
120 | void set_pending_irq(unsigned int irq, cpumask_t mask); |
121 | void move_native_irq(int irq); | |
54d5d424 AR |
122 | |
123 | #ifdef CONFIG_PCI_MSI | |
124 | /* | |
125 | * Wonder why these are dummies? | |
126 | * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq() | |
127 | * counter part after translating the vector to irq info. We need to perform | |
128 | * this operation on the real irq, when we dont use vector, i.e when | |
129 | * pci_use_vector() is false. | |
130 | */ | |
131 | static inline void move_irq(int irq) | |
132 | { | |
133 | } | |
134 | ||
135 | static inline void set_irq_info(int irq, cpumask_t mask) | |
136 | { | |
137 | } | |
138 | ||
06fcb0c6 | 139 | #else /* CONFIG_PCI_MSI */ |
54d5d424 AR |
140 | |
141 | static inline void move_irq(int irq) | |
142 | { | |
143 | move_native_irq(irq); | |
144 | } | |
145 | ||
146 | static inline void set_irq_info(int irq, cpumask_t mask) | |
147 | { | |
148 | set_native_irq_info(irq, mask); | |
149 | } | |
54d5d424 | 150 | |
06fcb0c6 IM |
151 | #endif /* CONFIG_PCI_MSI */ |
152 | ||
153 | #else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */ | |
154 | ||
155 | static inline void move_irq(int irq) | |
156 | { | |
157 | } | |
158 | ||
159 | static inline void move_native_irq(int irq) | |
160 | { | |
161 | } | |
162 | ||
163 | static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | |
164 | { | |
165 | } | |
54d5d424 | 166 | |
54d5d424 AR |
167 | static inline void set_irq_info(int irq, cpumask_t mask) |
168 | { | |
169 | set_native_irq_info(irq, mask); | |
170 | } | |
171 | ||
06fcb0c6 | 172 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ |
54d5d424 | 173 | |
06fcb0c6 | 174 | #else /* CONFIG_SMP */ |
54d5d424 AR |
175 | |
176 | #define move_irq(x) | |
177 | #define move_native_irq(x) | |
178 | ||
06fcb0c6 | 179 | #endif /* CONFIG_SMP */ |
54d5d424 | 180 | |
1b61b910 ZY |
181 | #ifdef CONFIG_IRQBALANCE |
182 | extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask); | |
183 | #else | |
184 | static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) | |
185 | { | |
186 | } | |
187 | #endif | |
188 | ||
71d218b7 IM |
189 | #ifdef CONFIG_AUTO_IRQ_AFFINITY |
190 | extern int select_smp_affinity(unsigned int irq); | |
191 | #else | |
192 | static inline int select_smp_affinity(unsigned int irq) | |
193 | { | |
194 | return 1; | |
195 | } | |
196 | #endif | |
197 | ||
1da177e4 LT |
198 | extern int no_irq_affinity; |
199 | extern int noirqdebug_setup(char *str); | |
200 | ||
2e60bbb6 IM |
201 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, |
202 | struct irqaction *action); | |
203 | /* | |
204 | * Explicit fastcall, because i386 4KSTACKS calls it from assembly: | |
205 | */ | |
1da177e4 | 206 | extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); |
2e60bbb6 | 207 | |
34ffdb72 | 208 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
2e60bbb6 | 209 | int action_ret, struct pt_regs *regs); |
1da177e4 LT |
210 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
211 | ||
212 | extern void init_irq_proc(void); | |
eee45269 | 213 | |
06fcb0c6 | 214 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
1da177e4 LT |
215 | |
216 | extern hw_irq_controller no_irq_type; /* needed in every arch ? */ | |
217 | ||
06fcb0c6 | 218 | #endif /* !CONFIG_S390 */ |
1da177e4 | 219 | |
06fcb0c6 | 220 | #endif /* _LINUX_IRQ_H */ |