]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H | |
3 | #define _LINUX_INTERRUPT_H | |
4 | ||
1da177e4 LT |
5 | #include <linux/kernel.h> |
6 | #include <linux/linkage.h> | |
7 | #include <linux/bitops.h> | |
8 | #include <linux/preempt.h> | |
9 | #include <linux/cpumask.h> | |
908dcecd | 10 | #include <linux/irqreturn.h> |
1da177e4 | 11 | #include <linux/hardirq.h> |
f037360f | 12 | #include <linux/sched.h> |
de30a2b3 | 13 | #include <linux/irqflags.h> |
1da177e4 LT |
14 | #include <asm/atomic.h> |
15 | #include <asm/ptrace.h> | |
16 | #include <asm/system.h> | |
17 | ||
6e213616 TG |
18 | /* |
19 | * These correspond to the IORESOURCE_IRQ_* defines in | |
20 | * linux/ioport.h to select the interrupt line behaviour. When | |
21 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | |
22 | * setting should be assumed to be "as already configured", which | |
23 | * may be as per machine or firmware initialisation. | |
24 | */ | |
25 | #define IRQF_TRIGGER_NONE 0x00000000 | |
26 | #define IRQF_TRIGGER_RISING 0x00000001 | |
27 | #define IRQF_TRIGGER_FALLING 0x00000002 | |
28 | #define IRQF_TRIGGER_HIGH 0x00000004 | |
29 | #define IRQF_TRIGGER_LOW 0x00000008 | |
30 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | |
31 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | |
32 | #define IRQF_TRIGGER_PROBE 0x00000010 | |
33 | ||
34 | /* | |
35 | * These flags used only by the kernel as part of the | |
36 | * irq handling routines. | |
37 | * | |
38 | * IRQF_DISABLED - keep irqs disabled when calling the action handler | |
39 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator | |
40 | * IRQF_SHARED - allow sharing the irq among several devices | |
41 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | |
42 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | |
43 | */ | |
44 | #define IRQF_DISABLED 0x00000020 | |
45 | #define IRQF_SAMPLE_RANDOM 0x00000040 | |
46 | #define IRQF_SHARED 0x00000080 | |
47 | #define IRQF_PROBE_SHARED 0x00000100 | |
48 | #define IRQF_TIMER 0x00000200 | |
284c6680 | 49 | #define IRQF_PERCPU 0x00000400 |
6e213616 TG |
50 | |
51 | /* | |
52 | * Migration helpers. Scheduled for removal in 1/2007 | |
53 | * Do not use for new code ! | |
54 | */ | |
55 | #define SA_INTERRUPT IRQF_DISABLED | |
56 | #define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM | |
57 | #define SA_SHIRQ IRQF_SHARED | |
58 | #define SA_PROBEIRQ IRQF_PROBE_SHARED | |
284c6680 | 59 | #define SA_PERCPU IRQF_PERCPU |
6e213616 TG |
60 | |
61 | #define SA_TRIGGER_LOW IRQF_TRIGGER_LOW | |
62 | #define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH | |
63 | #define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING | |
64 | #define SA_TRIGGER_RISING IRQF_TRIGGER_RISING | |
65 | #define SA_TRIGGER_MASK IRQF_TRIGGER_MASK | |
66 | ||
7d12e780 | 67 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
da482792 | 68 | |
1da177e4 | 69 | struct irqaction { |
da482792 | 70 | irq_handler_t handler; |
1da177e4 LT |
71 | unsigned long flags; |
72 | cpumask_t mask; | |
73 | const char *name; | |
74 | void *dev_id; | |
75 | struct irqaction *next; | |
76 | int irq; | |
77 | struct proc_dir_entry *dir; | |
78 | }; | |
79 | ||
7d12e780 | 80 | extern irqreturn_t no_action(int cpl, void *dev_id); |
da482792 | 81 | extern int request_irq(unsigned int, irq_handler_t handler, |
1da177e4 LT |
82 | unsigned long, const char *, void *); |
83 | extern void free_irq(unsigned int, void *); | |
84 | ||
d7e9629d IM |
85 | /* |
86 | * On lockdep we dont want to enable hardirqs in hardirq | |
87 | * context. Use local_irq_enable_in_hardirq() to annotate | |
88 | * kernel code that has to do this nevertheless (pretty much | |
89 | * the only valid case is for old/broken hardware that is | |
90 | * insanely slow). | |
91 | * | |
92 | * NOTE: in theory this might break fragile code that relies | |
93 | * on hardirq delivery - in practice we dont seem to have such | |
94 | * places left. So the only effect should be slightly increased | |
95 | * irqs-off latencies. | |
96 | */ | |
97 | #ifdef CONFIG_LOCKDEP | |
98 | # define local_irq_enable_in_hardirq() do { } while (0) | |
99 | #else | |
100 | # define local_irq_enable_in_hardirq() local_irq_enable() | |
101 | #endif | |
1da177e4 LT |
102 | |
103 | #ifdef CONFIG_GENERIC_HARDIRQS | |
104 | extern void disable_irq_nosync(unsigned int irq); | |
105 | extern void disable_irq(unsigned int irq); | |
106 | extern void enable_irq(unsigned int irq); | |
ba9a2331 | 107 | |
c01d403b IM |
108 | /* |
109 | * Special lockdep variants of irq disabling/enabling. | |
110 | * These should be used for locking constructs that | |
111 | * know that a particular irq context which is disabled, | |
112 | * and which is the only irq-context user of a lock, | |
113 | * that it's safe to take the lock in the irq-disabled | |
114 | * section without disabling hardirqs. | |
115 | * | |
116 | * On !CONFIG_LOCKDEP they are equivalent to the normal | |
117 | * irq disable/enable methods. | |
118 | */ | |
119 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | |
120 | { | |
121 | disable_irq_nosync(irq); | |
122 | #ifdef CONFIG_LOCKDEP | |
123 | local_irq_disable(); | |
124 | #endif | |
125 | } | |
126 | ||
e8106b94 AV |
127 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
128 | { | |
129 | disable_irq_nosync(irq); | |
130 | #ifdef CONFIG_LOCKDEP | |
131 | local_irq_save(*flags); | |
132 | #endif | |
133 | } | |
134 | ||
c01d403b IM |
135 | static inline void disable_irq_lockdep(unsigned int irq) |
136 | { | |
137 | disable_irq(irq); | |
138 | #ifdef CONFIG_LOCKDEP | |
139 | local_irq_disable(); | |
140 | #endif | |
141 | } | |
142 | ||
143 | static inline void enable_irq_lockdep(unsigned int irq) | |
144 | { | |
145 | #ifdef CONFIG_LOCKDEP | |
146 | local_irq_enable(); | |
147 | #endif | |
148 | enable_irq(irq); | |
149 | } | |
150 | ||
e8106b94 AV |
151 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
152 | { | |
153 | #ifdef CONFIG_LOCKDEP | |
154 | local_irq_restore(*flags); | |
155 | #endif | |
156 | enable_irq(irq); | |
157 | } | |
158 | ||
ba9a2331 TG |
159 | /* IRQ wakeup (PM) control: */ |
160 | extern int set_irq_wake(unsigned int irq, unsigned int on); | |
161 | ||
162 | static inline int enable_irq_wake(unsigned int irq) | |
163 | { | |
164 | return set_irq_wake(irq, 1); | |
165 | } | |
166 | ||
167 | static inline int disable_irq_wake(unsigned int irq) | |
168 | { | |
169 | return set_irq_wake(irq, 0); | |
170 | } | |
171 | ||
c01d403b IM |
172 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
173 | /* | |
174 | * NOTE: non-genirq architectures, if they want to support the lock | |
175 | * validator need to define the methods below in their asm/irq.h | |
176 | * files, under an #ifdef CONFIG_LOCKDEP section. | |
177 | */ | |
178 | # ifndef CONFIG_LOCKDEP | |
179 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) | |
180 | # define disable_irq_lockdep(irq) disable_irq(irq) | |
181 | # define enable_irq_lockdep(irq) enable_irq(irq) | |
182 | # endif | |
183 | ||
184 | #endif /* CONFIG_GENERIC_HARDIRQS */ | |
1da177e4 | 185 | |
3f74478b AK |
186 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
187 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | |
188 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | |
189 | #endif | |
190 | ||
1da177e4 LT |
191 | /* |
192 | * Temporary defines for UP kernels, until all code gets fixed. | |
193 | */ | |
194 | #ifndef CONFIG_SMP | |
195 | static inline void __deprecated cli(void) | |
196 | { | |
197 | local_irq_disable(); | |
198 | } | |
199 | static inline void __deprecated sti(void) | |
200 | { | |
201 | local_irq_enable(); | |
202 | } | |
203 | static inline void __deprecated save_flags(unsigned long *x) | |
204 | { | |
205 | local_save_flags(*x); | |
206 | } | |
ef9ceab2 | 207 | #define save_flags(x) save_flags(&x) |
1da177e4 LT |
208 | static inline void __deprecated restore_flags(unsigned long x) |
209 | { | |
210 | local_irq_restore(x); | |
211 | } | |
212 | ||
213 | static inline void __deprecated save_and_cli(unsigned long *x) | |
214 | { | |
215 | local_irq_save(*x); | |
216 | } | |
217 | #define save_and_cli(x) save_and_cli(&x) | |
218 | #endif /* CONFIG_SMP */ | |
219 | ||
de30a2b3 IM |
220 | extern void local_bh_disable(void); |
221 | extern void __local_bh_enable(void); | |
222 | extern void _local_bh_enable(void); | |
1da177e4 | 223 | extern void local_bh_enable(void); |
de30a2b3 | 224 | extern void local_bh_enable_ip(unsigned long ip); |
1da177e4 LT |
225 | |
226 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | |
227 | frequency threaded job scheduling. For almost all the purposes | |
228 | tasklets are more than enough. F.e. all serial device BHs et | |
229 | al. should be converted to tasklets, not to softirqs. | |
230 | */ | |
231 | ||
232 | enum | |
233 | { | |
234 | HI_SOFTIRQ=0, | |
235 | TIMER_SOFTIRQ, | |
236 | NET_TX_SOFTIRQ, | |
237 | NET_RX_SOFTIRQ, | |
ff856bad | 238 | BLOCK_SOFTIRQ, |
1da177e4 LT |
239 | TASKLET_SOFTIRQ |
240 | }; | |
241 | ||
242 | /* softirq mask and active fields moved to irq_cpustat_t in | |
243 | * asm/hardirq.h to get better cache usage. KAO | |
244 | */ | |
245 | ||
246 | struct softirq_action | |
247 | { | |
248 | void (*action)(struct softirq_action *); | |
249 | void *data; | |
250 | }; | |
251 | ||
252 | asmlinkage void do_softirq(void); | |
253 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); | |
254 | extern void softirq_init(void); | |
3f74478b | 255 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
1da177e4 LT |
256 | extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); |
257 | extern void FASTCALL(raise_softirq(unsigned int nr)); | |
258 | ||
259 | ||
260 | /* Tasklets --- multithreaded analogue of BHs. | |
261 | ||
262 | Main feature differing them of generic softirqs: tasklet | |
263 | is running only on one CPU simultaneously. | |
264 | ||
265 | Main feature differing them of BHs: different tasklets | |
266 | may be run simultaneously on different CPUs. | |
267 | ||
268 | Properties: | |
269 | * If tasklet_schedule() is called, then tasklet is guaranteed | |
270 | to be executed on some cpu at least once after this. | |
271 | * If the tasklet is already scheduled, but its excecution is still not | |
272 | started, it will be executed only once. | |
273 | * If this tasklet is already running on another CPU (or schedule is called | |
274 | from tasklet itself), it is rescheduled for later. | |
275 | * Tasklet is strictly serialized wrt itself, but not | |
276 | wrt another tasklets. If client needs some intertask synchronization, | |
277 | he makes it with spinlocks. | |
278 | */ | |
279 | ||
280 | struct tasklet_struct | |
281 | { | |
282 | struct tasklet_struct *next; | |
283 | unsigned long state; | |
284 | atomic_t count; | |
285 | void (*func)(unsigned long); | |
286 | unsigned long data; | |
287 | }; | |
288 | ||
289 | #define DECLARE_TASKLET(name, func, data) \ | |
290 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | |
291 | ||
292 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | |
293 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | |
294 | ||
295 | ||
296 | enum | |
297 | { | |
298 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | |
299 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | |
300 | }; | |
301 | ||
302 | #ifdef CONFIG_SMP | |
303 | static inline int tasklet_trylock(struct tasklet_struct *t) | |
304 | { | |
305 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | |
306 | } | |
307 | ||
308 | static inline void tasklet_unlock(struct tasklet_struct *t) | |
309 | { | |
310 | smp_mb__before_clear_bit(); | |
311 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | |
312 | } | |
313 | ||
314 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |
315 | { | |
316 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | |
317 | } | |
318 | #else | |
319 | #define tasklet_trylock(t) 1 | |
320 | #define tasklet_unlock_wait(t) do { } while (0) | |
321 | #define tasklet_unlock(t) do { } while (0) | |
322 | #endif | |
323 | ||
324 | extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); | |
325 | ||
326 | static inline void tasklet_schedule(struct tasklet_struct *t) | |
327 | { | |
328 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
329 | __tasklet_schedule(t); | |
330 | } | |
331 | ||
332 | extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); | |
333 | ||
334 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |
335 | { | |
336 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
337 | __tasklet_hi_schedule(t); | |
338 | } | |
339 | ||
340 | ||
341 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | |
342 | { | |
343 | atomic_inc(&t->count); | |
344 | smp_mb__after_atomic_inc(); | |
345 | } | |
346 | ||
347 | static inline void tasklet_disable(struct tasklet_struct *t) | |
348 | { | |
349 | tasklet_disable_nosync(t); | |
350 | tasklet_unlock_wait(t); | |
351 | smp_mb(); | |
352 | } | |
353 | ||
354 | static inline void tasklet_enable(struct tasklet_struct *t) | |
355 | { | |
356 | smp_mb__before_atomic_dec(); | |
357 | atomic_dec(&t->count); | |
358 | } | |
359 | ||
360 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | |
361 | { | |
362 | smp_mb__before_atomic_dec(); | |
363 | atomic_dec(&t->count); | |
364 | } | |
365 | ||
366 | extern void tasklet_kill(struct tasklet_struct *t); | |
367 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | |
368 | extern void tasklet_init(struct tasklet_struct *t, | |
369 | void (*func)(unsigned long), unsigned long data); | |
370 | ||
371 | /* | |
372 | * Autoprobing for irqs: | |
373 | * | |
374 | * probe_irq_on() and probe_irq_off() provide robust primitives | |
375 | * for accurate IRQ probing during kernel initialization. They are | |
376 | * reasonably simple to use, are not "fooled" by spurious interrupts, | |
377 | * and, unlike other attempts at IRQ probing, they do not get hung on | |
378 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | |
379 | * | |
380 | * For reasonably foolproof probing, use them as follows: | |
381 | * | |
382 | * 1. clear and/or mask the device's internal interrupt. | |
383 | * 2. sti(); | |
384 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | |
385 | * 4. enable the device and cause it to trigger an interrupt. | |
386 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | |
387 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | |
388 | * 7. service the device to clear its pending interrupt. | |
389 | * 8. loop again if paranoia is required. | |
390 | * | |
391 | * probe_irq_on() returns a mask of allocated irq's. | |
392 | * | |
393 | * probe_irq_off() takes the mask as a parameter, | |
394 | * and returns the irq number which occurred, | |
395 | * or zero if none occurred, or a negative irq number | |
396 | * if more than one irq occurred. | |
397 | */ | |
398 | ||
399 | #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) | |
400 | static inline unsigned long probe_irq_on(void) | |
401 | { | |
402 | return 0; | |
403 | } | |
404 | static inline int probe_irq_off(unsigned long val) | |
405 | { | |
406 | return 0; | |
407 | } | |
408 | static inline unsigned int probe_irq_mask(unsigned long val) | |
409 | { | |
410 | return 0; | |
411 | } | |
412 | #else | |
413 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | |
414 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | |
415 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | |
416 | #endif | |
417 | ||
418 | #endif |