]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H | |
3 | #define _LINUX_INTERRUPT_H | |
4 | ||
1da177e4 LT |
5 | #include <linux/kernel.h> |
6 | #include <linux/linkage.h> | |
7 | #include <linux/bitops.h> | |
8 | #include <linux/preempt.h> | |
9 | #include <linux/cpumask.h> | |
908dcecd | 10 | #include <linux/irqreturn.h> |
dd3a1db9 | 11 | #include <linux/irqnr.h> |
1da177e4 | 12 | #include <linux/hardirq.h> |
de30a2b3 | 13 | #include <linux/irqflags.h> |
54514a70 DM |
14 | #include <linux/smp.h> |
15 | #include <linux/percpu.h> | |
9ba5f005 | 16 | #include <linux/hrtimer.h> |
0ebb26e7 | 17 | |
1da177e4 LT |
18 | #include <asm/atomic.h> |
19 | #include <asm/ptrace.h> | |
20 | #include <asm/system.h> | |
21 | ||
6e213616 TG |
22 | /* |
23 | * These correspond to the IORESOURCE_IRQ_* defines in | |
24 | * linux/ioport.h to select the interrupt line behaviour. When | |
25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | |
26 | * setting should be assumed to be "as already configured", which | |
27 | * may be as per machine or firmware initialisation. | |
28 | */ | |
29 | #define IRQF_TRIGGER_NONE 0x00000000 | |
30 | #define IRQF_TRIGGER_RISING 0x00000001 | |
31 | #define IRQF_TRIGGER_FALLING 0x00000002 | |
32 | #define IRQF_TRIGGER_HIGH 0x00000004 | |
33 | #define IRQF_TRIGGER_LOW 0x00000008 | |
34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | |
35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | |
36 | #define IRQF_TRIGGER_PROBE 0x00000010 | |
37 | ||
38 | /* | |
39 | * These flags used only by the kernel as part of the | |
40 | * irq handling routines. | |
41 | * | |
6932bf37 TG |
42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. |
43 | * DEPRECATED. This flag is a NOOP and scheduled to be removed | |
6e213616 TG |
44 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator |
45 | * IRQF_SHARED - allow sharing the irq among several devices | |
46 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | |
47 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | |
950f4427 TG |
48 | * IRQF_PERCPU - Interrupt is per cpu |
49 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing | |
d85a60d8 BW |
50 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
51 | * registered first in an shared interrupt is considered for | |
52 | * performance reasons) | |
b25c340c TG |
53 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
54 | * Used by threaded interrupts which need to keep the | |
55 | * irq line disabled until the threaded handler has been run. | |
6e213616 TG |
56 | */ |
57 | #define IRQF_DISABLED 0x00000020 | |
58 | #define IRQF_SAMPLE_RANDOM 0x00000040 | |
59 | #define IRQF_SHARED 0x00000080 | |
60 | #define IRQF_PROBE_SHARED 0x00000100 | |
61 | #define IRQF_TIMER 0x00000200 | |
284c6680 | 62 | #define IRQF_PERCPU 0x00000400 |
950f4427 | 63 | #define IRQF_NOBALANCING 0x00000800 |
d85a60d8 | 64 | #define IRQF_IRQPOLL 0x00001000 |
b25c340c | 65 | #define IRQF_ONESHOT 0x00002000 |
6e213616 | 66 | |
3aa551c9 TG |
67 | /* |
68 | * Bits used by threaded handlers: | |
69 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | |
70 | * IRQTF_DIED - handler thread died | |
f48fe81e | 71 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
591d2fb0 | 72 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity |
3aa551c9 TG |
73 | */ |
74 | enum { | |
75 | IRQTF_RUNTHREAD, | |
76 | IRQTF_DIED, | |
f48fe81e | 77 | IRQTF_WARNED, |
591d2fb0 | 78 | IRQTF_AFFINITY, |
3aa551c9 TG |
79 | }; |
80 | ||
ae731f8d MZ |
81 | /** |
82 | * These values can be returned by request_any_context_irq() and | |
83 | * describe the context the interrupt will be run in. | |
84 | * | |
85 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context | |
86 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context | |
87 | */ | |
88 | enum { | |
89 | IRQC_IS_HARDIRQ = 0, | |
90 | IRQC_IS_NESTED, | |
91 | }; | |
92 | ||
7d12e780 | 93 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
da482792 | 94 | |
a9d0a1a3 TG |
95 | /** |
96 | * struct irqaction - per interrupt action descriptor | |
97 | * @handler: interrupt handler function | |
98 | * @flags: flags (see IRQF_* above) | |
a9d0a1a3 TG |
99 | * @name: name of the device |
100 | * @dev_id: cookie to identify the device | |
101 | * @next: pointer to the next irqaction for shared interrupts | |
102 | * @irq: interrupt number | |
103 | * @dir: pointer to the proc/irq/NN/name entry | |
3aa551c9 TG |
104 | * @thread_fn: interupt handler function for threaded interrupts |
105 | * @thread: thread pointer for threaded interrupts | |
106 | * @thread_flags: flags related to @thread | |
a9d0a1a3 | 107 | */ |
1da177e4 | 108 | struct irqaction { |
da482792 | 109 | irq_handler_t handler; |
1da177e4 | 110 | unsigned long flags; |
1da177e4 LT |
111 | const char *name; |
112 | void *dev_id; | |
113 | struct irqaction *next; | |
114 | int irq; | |
115 | struct proc_dir_entry *dir; | |
3aa551c9 TG |
116 | irq_handler_t thread_fn; |
117 | struct task_struct *thread; | |
118 | unsigned long thread_flags; | |
1da177e4 LT |
119 | }; |
120 | ||
7d12e780 | 121 | extern irqreturn_t no_action(int cpl, void *dev_id); |
3aa551c9 | 122 | |
3a38148f | 123 | #ifdef CONFIG_GENERIC_HARDIRQS |
3aa551c9 TG |
124 | extern int __must_check |
125 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | |
126 | irq_handler_t thread_fn, | |
127 | unsigned long flags, const char *name, void *dev); | |
128 | ||
129 | static inline int __must_check | |
130 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | |
131 | const char *name, void *dev) | |
132 | { | |
133 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | |
134 | } | |
135 | ||
ae731f8d MZ |
136 | extern int __must_check |
137 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
138 | unsigned long flags, const char *name, void *dev_id); | |
139 | ||
3aa551c9 TG |
140 | extern void exit_irq_thread(void); |
141 | #else | |
3a38148f TG |
142 | |
143 | extern int __must_check | |
144 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | |
145 | const char *name, void *dev); | |
146 | ||
de18836e TG |
147 | /* |
148 | * Special function to avoid ifdeffery in kernel/irq/devres.c which | |
149 | * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, | |
150 | * m68k). I really love these $@%#!* obvious Makefile references: | |
151 | * ../../../kernel/irq/devres.o | |
152 | */ | |
153 | static inline int __must_check | |
154 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | |
155 | irq_handler_t thread_fn, | |
156 | unsigned long flags, const char *name, void *dev) | |
157 | { | |
158 | return request_irq(irq, handler, flags, name, dev); | |
159 | } | |
160 | ||
ae731f8d MZ |
161 | static inline int __must_check |
162 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
163 | unsigned long flags, const char *name, void *dev_id) | |
164 | { | |
165 | return request_irq(irq, handler, flags, name, dev_id); | |
166 | } | |
167 | ||
3aa551c9 TG |
168 | static inline void exit_irq_thread(void) { } |
169 | #endif | |
170 | ||
1da177e4 LT |
171 | extern void free_irq(unsigned int, void *); |
172 | ||
0af3678f AV |
173 | struct device; |
174 | ||
935bd5b9 AV |
175 | extern int __must_check |
176 | devm_request_threaded_irq(struct device *dev, unsigned int irq, | |
177 | irq_handler_t handler, irq_handler_t thread_fn, | |
178 | unsigned long irqflags, const char *devname, | |
179 | void *dev_id); | |
180 | ||
181 | static inline int __must_check | |
182 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | |
183 | unsigned long irqflags, const char *devname, void *dev_id) | |
184 | { | |
185 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | |
186 | devname, dev_id); | |
187 | } | |
188 | ||
9ac7849e TH |
189 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
190 | ||
d7e9629d IM |
191 | /* |
192 | * On lockdep we dont want to enable hardirqs in hardirq | |
193 | * context. Use local_irq_enable_in_hardirq() to annotate | |
194 | * kernel code that has to do this nevertheless (pretty much | |
195 | * the only valid case is for old/broken hardware that is | |
196 | * insanely slow). | |
197 | * | |
198 | * NOTE: in theory this might break fragile code that relies | |
199 | * on hardirq delivery - in practice we dont seem to have such | |
200 | * places left. So the only effect should be slightly increased | |
201 | * irqs-off latencies. | |
202 | */ | |
203 | #ifdef CONFIG_LOCKDEP | |
204 | # define local_irq_enable_in_hardirq() do { } while (0) | |
205 | #else | |
206 | # define local_irq_enable_in_hardirq() local_irq_enable() | |
207 | #endif | |
1da177e4 | 208 | |
1da177e4 LT |
209 | extern void disable_irq_nosync(unsigned int irq); |
210 | extern void disable_irq(unsigned int irq); | |
211 | extern void enable_irq(unsigned int irq); | |
ba9a2331 | 212 | |
0a0c5168 | 213 | /* The following three functions are for the core kernel use only. */ |
5818a6e2 | 214 | #ifdef CONFIG_GENERIC_HARDIRQS |
0a0c5168 RW |
215 | extern void suspend_device_irqs(void); |
216 | extern void resume_device_irqs(void); | |
217 | #ifdef CONFIG_PM_SLEEP | |
218 | extern int check_wakeup_irqs(void); | |
219 | #else | |
220 | static inline int check_wakeup_irqs(void) { return 0; } | |
221 | #endif | |
5818a6e2 HC |
222 | #else |
223 | static inline void suspend_device_irqs(void) { }; | |
224 | static inline void resume_device_irqs(void) { }; | |
225 | static inline int check_wakeup_irqs(void) { return 0; } | |
0a0c5168 RW |
226 | #endif |
227 | ||
d7b90689 RK |
228 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
229 | ||
d036e67b | 230 | extern cpumask_var_t irq_default_affinity; |
18404756 | 231 | |
0de26520 | 232 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
d7b90689 | 233 | extern int irq_can_set_affinity(unsigned int irq); |
18404756 | 234 | extern int irq_select_affinity(unsigned int irq); |
d7b90689 RK |
235 | |
236 | #else /* CONFIG_SMP */ | |
237 | ||
0de26520 | 238 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
d7b90689 RK |
239 | { |
240 | return -EINVAL; | |
241 | } | |
242 | ||
243 | static inline int irq_can_set_affinity(unsigned int irq) | |
244 | { | |
245 | return 0; | |
246 | } | |
247 | ||
18404756 MK |
248 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
249 | ||
d7b90689 RK |
250 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ |
251 | ||
e9ed7e72 | 252 | #ifdef CONFIG_GENERIC_HARDIRQS |
c01d403b IM |
253 | /* |
254 | * Special lockdep variants of irq disabling/enabling. | |
255 | * These should be used for locking constructs that | |
256 | * know that a particular irq context which is disabled, | |
257 | * and which is the only irq-context user of a lock, | |
258 | * that it's safe to take the lock in the irq-disabled | |
259 | * section without disabling hardirqs. | |
260 | * | |
261 | * On !CONFIG_LOCKDEP they are equivalent to the normal | |
262 | * irq disable/enable methods. | |
263 | */ | |
264 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | |
265 | { | |
266 | disable_irq_nosync(irq); | |
267 | #ifdef CONFIG_LOCKDEP | |
268 | local_irq_disable(); | |
269 | #endif | |
270 | } | |
271 | ||
e8106b94 AV |
272 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
273 | { | |
274 | disable_irq_nosync(irq); | |
275 | #ifdef CONFIG_LOCKDEP | |
276 | local_irq_save(*flags); | |
277 | #endif | |
278 | } | |
279 | ||
c01d403b IM |
280 | static inline void disable_irq_lockdep(unsigned int irq) |
281 | { | |
282 | disable_irq(irq); | |
283 | #ifdef CONFIG_LOCKDEP | |
284 | local_irq_disable(); | |
285 | #endif | |
286 | } | |
287 | ||
288 | static inline void enable_irq_lockdep(unsigned int irq) | |
289 | { | |
290 | #ifdef CONFIG_LOCKDEP | |
291 | local_irq_enable(); | |
292 | #endif | |
293 | enable_irq(irq); | |
294 | } | |
295 | ||
e8106b94 AV |
296 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
297 | { | |
298 | #ifdef CONFIG_LOCKDEP | |
299 | local_irq_restore(*flags); | |
300 | #endif | |
301 | enable_irq(irq); | |
302 | } | |
303 | ||
ba9a2331 TG |
304 | /* IRQ wakeup (PM) control: */ |
305 | extern int set_irq_wake(unsigned int irq, unsigned int on); | |
306 | ||
307 | static inline int enable_irq_wake(unsigned int irq) | |
308 | { | |
309 | return set_irq_wake(irq, 1); | |
310 | } | |
311 | ||
312 | static inline int disable_irq_wake(unsigned int irq) | |
313 | { | |
314 | return set_irq_wake(irq, 0); | |
315 | } | |
316 | ||
c01d403b IM |
317 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
318 | /* | |
319 | * NOTE: non-genirq architectures, if they want to support the lock | |
320 | * validator need to define the methods below in their asm/irq.h | |
321 | * files, under an #ifdef CONFIG_LOCKDEP section. | |
322 | */ | |
b3e2fd9c | 323 | #ifndef CONFIG_LOCKDEP |
c01d403b | 324 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) |
b3e2fd9c RZ |
325 | # define disable_irq_nosync_lockdep_irqsave(irq, flags) \ |
326 | disable_irq_nosync(irq) | |
c01d403b IM |
327 | # define disable_irq_lockdep(irq) disable_irq(irq) |
328 | # define enable_irq_lockdep(irq) enable_irq(irq) | |
b3e2fd9c RZ |
329 | # define enable_irq_lockdep_irqrestore(irq, flags) \ |
330 | enable_irq(irq) | |
c01d403b IM |
331 | # endif |
332 | ||
aa5346a2 GL |
333 | static inline int enable_irq_wake(unsigned int irq) |
334 | { | |
335 | return 0; | |
336 | } | |
337 | ||
338 | static inline int disable_irq_wake(unsigned int irq) | |
339 | { | |
340 | return 0; | |
341 | } | |
c01d403b | 342 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
1da177e4 | 343 | |
3f74478b AK |
344 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
345 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | |
346 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | |
347 | #endif | |
348 | ||
2d3fbbb3 BH |
349 | /* Some architectures might implement lazy enabling/disabling of |
350 | * interrupts. In some cases, such as stop_machine, we might want | |
351 | * to ensure that after a local_irq_disable(), interrupts have | |
352 | * really been disabled in hardware. Such architectures need to | |
353 | * implement the following hook. | |
354 | */ | |
355 | #ifndef hard_irq_disable | |
356 | #define hard_irq_disable() do { } while(0) | |
357 | #endif | |
358 | ||
1da177e4 LT |
359 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
360 | frequency threaded job scheduling. For almost all the purposes | |
361 | tasklets are more than enough. F.e. all serial device BHs et | |
362 | al. should be converted to tasklets, not to softirqs. | |
363 | */ | |
364 | ||
365 | enum | |
366 | { | |
367 | HI_SOFTIRQ=0, | |
368 | TIMER_SOFTIRQ, | |
369 | NET_TX_SOFTIRQ, | |
370 | NET_RX_SOFTIRQ, | |
ff856bad | 371 | BLOCK_SOFTIRQ, |
5e605b64 | 372 | BLOCK_IOPOLL_SOFTIRQ, |
c9819f45 CL |
373 | TASKLET_SOFTIRQ, |
374 | SCHED_SOFTIRQ, | |
a6037b61 PZ |
375 | HRTIMER_SOFTIRQ, |
376 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | |
978b0116 AD |
377 | |
378 | NR_SOFTIRQS | |
1da177e4 LT |
379 | }; |
380 | ||
5d592b44 JB |
381 | /* map softirq index to softirq name. update 'softirq_to_name' in |
382 | * kernel/softirq.c when adding a new softirq. | |
383 | */ | |
384 | extern char *softirq_to_name[NR_SOFTIRQS]; | |
385 | ||
1da177e4 LT |
386 | /* softirq mask and active fields moved to irq_cpustat_t in |
387 | * asm/hardirq.h to get better cache usage. KAO | |
388 | */ | |
389 | ||
390 | struct softirq_action | |
391 | { | |
392 | void (*action)(struct softirq_action *); | |
1da177e4 LT |
393 | }; |
394 | ||
395 | asmlinkage void do_softirq(void); | |
eb0f1c44 | 396 | asmlinkage void __do_softirq(void); |
962cf36c | 397 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
1da177e4 | 398 | extern void softirq_init(void); |
3f74478b | 399 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
b3c97528 HH |
400 | extern void raise_softirq_irqoff(unsigned int nr); |
401 | extern void raise_softirq(unsigned int nr); | |
7f1e2ca9 | 402 | extern void wakeup_softirqd(void); |
1da177e4 | 403 | |
54514a70 DM |
404 | /* This is the worklist that queues up per-cpu softirq work. |
405 | * | |
406 | * send_remote_sendirq() adds work to these lists, and | |
407 | * the softirq handler itself dequeues from them. The queues | |
408 | * are protected by disabling local cpu interrupts and they must | |
409 | * only be accessed by the local cpu that they are for. | |
410 | */ | |
411 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | |
412 | ||
413 | /* Try to send a softirq to a remote cpu. If this cannot be done, the | |
414 | * work will be queued to the local cpu. | |
415 | */ | |
416 | extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); | |
417 | ||
418 | /* Like send_remote_softirq(), but the caller must disable local cpu interrupts | |
419 | * and compute the current cpu, passed in as 'this_cpu'. | |
420 | */ | |
421 | extern void __send_remote_softirq(struct call_single_data *cp, int cpu, | |
422 | int this_cpu, int softirq); | |
1da177e4 LT |
423 | |
424 | /* Tasklets --- multithreaded analogue of BHs. | |
425 | ||
426 | Main feature differing them of generic softirqs: tasklet | |
427 | is running only on one CPU simultaneously. | |
428 | ||
429 | Main feature differing them of BHs: different tasklets | |
430 | may be run simultaneously on different CPUs. | |
431 | ||
432 | Properties: | |
433 | * If tasklet_schedule() is called, then tasklet is guaranteed | |
434 | to be executed on some cpu at least once after this. | |
435 | * If the tasklet is already scheduled, but its excecution is still not | |
436 | started, it will be executed only once. | |
437 | * If this tasklet is already running on another CPU (or schedule is called | |
438 | from tasklet itself), it is rescheduled for later. | |
439 | * Tasklet is strictly serialized wrt itself, but not | |
440 | wrt another tasklets. If client needs some intertask synchronization, | |
441 | he makes it with spinlocks. | |
442 | */ | |
443 | ||
444 | struct tasklet_struct | |
445 | { | |
446 | struct tasklet_struct *next; | |
447 | unsigned long state; | |
448 | atomic_t count; | |
449 | void (*func)(unsigned long); | |
450 | unsigned long data; | |
451 | }; | |
452 | ||
453 | #define DECLARE_TASKLET(name, func, data) \ | |
454 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | |
455 | ||
456 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | |
457 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | |
458 | ||
459 | ||
460 | enum | |
461 | { | |
462 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | |
463 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | |
464 | }; | |
465 | ||
466 | #ifdef CONFIG_SMP | |
467 | static inline int tasklet_trylock(struct tasklet_struct *t) | |
468 | { | |
469 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | |
470 | } | |
471 | ||
472 | static inline void tasklet_unlock(struct tasklet_struct *t) | |
473 | { | |
474 | smp_mb__before_clear_bit(); | |
475 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | |
476 | } | |
477 | ||
478 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |
479 | { | |
480 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | |
481 | } | |
482 | #else | |
483 | #define tasklet_trylock(t) 1 | |
484 | #define tasklet_unlock_wait(t) do { } while (0) | |
485 | #define tasklet_unlock(t) do { } while (0) | |
486 | #endif | |
487 | ||
b3c97528 | 488 | extern void __tasklet_schedule(struct tasklet_struct *t); |
1da177e4 LT |
489 | |
490 | static inline void tasklet_schedule(struct tasklet_struct *t) | |
491 | { | |
492 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
493 | __tasklet_schedule(t); | |
494 | } | |
495 | ||
b3c97528 | 496 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
1da177e4 LT |
497 | |
498 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |
499 | { | |
500 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
501 | __tasklet_hi_schedule(t); | |
502 | } | |
503 | ||
7c692cba VN |
504 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
505 | ||
506 | /* | |
507 | * This version avoids touching any other tasklets. Needed for kmemcheck | |
508 | * in order not to take any page faults while enqueueing this tasklet; | |
509 | * consider VERY carefully whether you really need this or | |
510 | * tasklet_hi_schedule()... | |
511 | */ | |
512 | static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) | |
513 | { | |
514 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
515 | __tasklet_hi_schedule_first(t); | |
516 | } | |
517 | ||
1da177e4 LT |
518 | |
519 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | |
520 | { | |
521 | atomic_inc(&t->count); | |
522 | smp_mb__after_atomic_inc(); | |
523 | } | |
524 | ||
525 | static inline void tasklet_disable(struct tasklet_struct *t) | |
526 | { | |
527 | tasklet_disable_nosync(t); | |
528 | tasklet_unlock_wait(t); | |
529 | smp_mb(); | |
530 | } | |
531 | ||
532 | static inline void tasklet_enable(struct tasklet_struct *t) | |
533 | { | |
534 | smp_mb__before_atomic_dec(); | |
535 | atomic_dec(&t->count); | |
536 | } | |
537 | ||
538 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | |
539 | { | |
540 | smp_mb__before_atomic_dec(); | |
541 | atomic_dec(&t->count); | |
542 | } | |
543 | ||
544 | extern void tasklet_kill(struct tasklet_struct *t); | |
545 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | |
546 | extern void tasklet_init(struct tasklet_struct *t, | |
547 | void (*func)(unsigned long), unsigned long data); | |
548 | ||
9ba5f005 PZ |
549 | struct tasklet_hrtimer { |
550 | struct hrtimer timer; | |
551 | struct tasklet_struct tasklet; | |
552 | enum hrtimer_restart (*function)(struct hrtimer *); | |
553 | }; | |
554 | ||
555 | extern void | |
556 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |
557 | enum hrtimer_restart (*function)(struct hrtimer *), | |
558 | clockid_t which_clock, enum hrtimer_mode mode); | |
559 | ||
560 | static inline | |
561 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, | |
562 | const enum hrtimer_mode mode) | |
563 | { | |
564 | return hrtimer_start(&ttimer->timer, time, mode); | |
565 | } | |
566 | ||
567 | static inline | |
568 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) | |
569 | { | |
570 | hrtimer_cancel(&ttimer->timer); | |
571 | tasklet_kill(&ttimer->tasklet); | |
572 | } | |
573 | ||
1da177e4 LT |
574 | /* |
575 | * Autoprobing for irqs: | |
576 | * | |
577 | * probe_irq_on() and probe_irq_off() provide robust primitives | |
578 | * for accurate IRQ probing during kernel initialization. They are | |
579 | * reasonably simple to use, are not "fooled" by spurious interrupts, | |
580 | * and, unlike other attempts at IRQ probing, they do not get hung on | |
581 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | |
582 | * | |
583 | * For reasonably foolproof probing, use them as follows: | |
584 | * | |
585 | * 1. clear and/or mask the device's internal interrupt. | |
586 | * 2. sti(); | |
587 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | |
588 | * 4. enable the device and cause it to trigger an interrupt. | |
589 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | |
590 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | |
591 | * 7. service the device to clear its pending interrupt. | |
592 | * 8. loop again if paranoia is required. | |
593 | * | |
594 | * probe_irq_on() returns a mask of allocated irq's. | |
595 | * | |
596 | * probe_irq_off() takes the mask as a parameter, | |
597 | * and returns the irq number which occurred, | |
598 | * or zero if none occurred, or a negative irq number | |
599 | * if more than one irq occurred. | |
600 | */ | |
601 | ||
602 | #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) | |
603 | static inline unsigned long probe_irq_on(void) | |
604 | { | |
605 | return 0; | |
606 | } | |
607 | static inline int probe_irq_off(unsigned long val) | |
608 | { | |
609 | return 0; | |
610 | } | |
611 | static inline unsigned int probe_irq_mask(unsigned long val) | |
612 | { | |
613 | return 0; | |
614 | } | |
615 | #else | |
616 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | |
617 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | |
618 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | |
619 | #endif | |
620 | ||
6168a702 AM |
621 | #ifdef CONFIG_PROC_FS |
622 | /* Initialize /proc/irq/ */ | |
623 | extern void init_irq_proc(void); | |
624 | #else | |
625 | static inline void init_irq_proc(void) | |
626 | { | |
627 | } | |
628 | #endif | |
629 | ||
d43c36dc | 630 | struct seq_file; |
f74596d0 AB |
631 | int show_interrupts(struct seq_file *p, void *v); |
632 | ||
43a25632 YL |
633 | struct irq_desc; |
634 | ||
635 | extern int early_irq_init(void); | |
4a046d17 | 636 | extern int arch_probe_nr_irqs(void); |
43a25632 | 637 | extern int arch_early_irq_init(void); |
85ac16d0 | 638 | extern int arch_init_chip_data(struct irq_desc *desc, int node); |
43a25632 | 639 | |
1da177e4 | 640 | #endif |