]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* interrupt.h */ |
3 | #ifndef _LINUX_INTERRUPT_H | |
4 | #define _LINUX_INTERRUPT_H | |
5 | ||
1da177e4 | 6 | #include <linux/kernel.h> |
1da177e4 | 7 | #include <linux/bitops.h> |
1da177e4 | 8 | #include <linux/cpumask.h> |
908dcecd | 9 | #include <linux/irqreturn.h> |
dd3a1db9 | 10 | #include <linux/irqnr.h> |
1da177e4 | 11 | #include <linux/hardirq.h> |
de30a2b3 | 12 | #include <linux/irqflags.h> |
9ba5f005 | 13 | #include <linux/hrtimer.h> |
cd7eab44 BH |
14 | #include <linux/kref.h> |
15 | #include <linux/workqueue.h> | |
0ebb26e7 | 16 | |
60063497 | 17 | #include <linux/atomic.h> |
1da177e4 | 18 | #include <asm/ptrace.h> |
7d65f4a6 | 19 | #include <asm/irq.h> |
229a7186 | 20 | #include <asm/sections.h> |
1da177e4 | 21 | |
6e213616 TG |
22 | /* |
23 | * These correspond to the IORESOURCE_IRQ_* defines in | |
24 | * linux/ioport.h to select the interrupt line behaviour. When | |
25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | |
26 | * setting should be assumed to be "as already configured", which | |
27 | * may be as per machine or firmware initialisation. | |
28 | */ | |
29 | #define IRQF_TRIGGER_NONE 0x00000000 | |
30 | #define IRQF_TRIGGER_RISING 0x00000001 | |
31 | #define IRQF_TRIGGER_FALLING 0x00000002 | |
32 | #define IRQF_TRIGGER_HIGH 0x00000004 | |
33 | #define IRQF_TRIGGER_LOW 0x00000008 | |
34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | |
35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | |
36 | #define IRQF_TRIGGER_PROBE 0x00000010 | |
37 | ||
38 | /* | |
39 | * These flags used only by the kernel as part of the | |
40 | * irq handling routines. | |
41 | * | |
6e213616 TG |
42 | * IRQF_SHARED - allow sharing the irq among several devices |
43 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | |
44 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | |
950f4427 TG |
45 | * IRQF_PERCPU - Interrupt is per cpu |
46 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing | |
d85a60d8 | 47 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
b8d62f33 | 48 | * registered first in a shared interrupt is considered for |
d85a60d8 | 49 | * performance reasons) |
b25c340c TG |
50 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
51 | * Used by threaded interrupts which need to keep the | |
52 | * irq line disabled until the threaded handler has been run. | |
737eb030 MR |
53 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee |
54 | * that this interrupt will wake the system from a suspended | |
55 | * state. See Documentation/power/suspend-and-interrupts.txt | |
dc5f219e | 56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
0c4602ff | 57 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
9bab0b7f IC |
58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
59 | * resume time. | |
17f48034 RW |
60 | * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this |
61 | * interrupt handler after suspending interrupts. For system | |
62 | * wakeup devices users need to implement wakeup detection in | |
63 | * their interrupt handlers. | |
6e213616 | 64 | */ |
6e213616 TG |
65 | #define IRQF_SHARED 0x00000080 |
66 | #define IRQF_PROBE_SHARED 0x00000100 | |
685fd0b4 | 67 | #define __IRQF_TIMER 0x00000200 |
284c6680 | 68 | #define IRQF_PERCPU 0x00000400 |
950f4427 | 69 | #define IRQF_NOBALANCING 0x00000800 |
d85a60d8 | 70 | #define IRQF_IRQPOLL 0x00001000 |
b25c340c | 71 | #define IRQF_ONESHOT 0x00002000 |
685fd0b4 | 72 | #define IRQF_NO_SUSPEND 0x00004000 |
dc5f219e | 73 | #define IRQF_FORCE_RESUME 0x00008000 |
0c4602ff | 74 | #define IRQF_NO_THREAD 0x00010000 |
9bab0b7f | 75 | #define IRQF_EARLY_RESUME 0x00020000 |
17f48034 | 76 | #define IRQF_COND_SUSPEND 0x00040000 |
685fd0b4 | 77 | |
0c4602ff | 78 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
3aa551c9 | 79 | |
b4e6b097 | 80 | /* |
ae731f8d MZ |
81 | * These values can be returned by request_any_context_irq() and |
82 | * describe the context the interrupt will be run in. | |
83 | * | |
84 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context | |
85 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context | |
86 | */ | |
87 | enum { | |
88 | IRQC_IS_HARDIRQ = 0, | |
89 | IRQC_IS_NESTED, | |
90 | }; | |
91 | ||
7d12e780 | 92 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
da482792 | 93 | |
a9d0a1a3 TG |
94 | /** |
95 | * struct irqaction - per interrupt action descriptor | |
96 | * @handler: interrupt handler function | |
a9d0a1a3 TG |
97 | * @name: name of the device |
98 | * @dev_id: cookie to identify the device | |
31d9d9b6 | 99 | * @percpu_dev_id: cookie to identify the device |
a9d0a1a3 TG |
100 | * @next: pointer to the next irqaction for shared interrupts |
101 | * @irq: interrupt number | |
c0ecaa06 | 102 | * @flags: flags (see IRQF_* above) |
25985edc | 103 | * @thread_fn: interrupt handler function for threaded interrupts |
3aa551c9 | 104 | * @thread: thread pointer for threaded interrupts |
2a1d3ab8 | 105 | * @secondary: pointer to secondary irqaction (force threading) |
3aa551c9 | 106 | * @thread_flags: flags related to @thread |
b5faba21 | 107 | * @thread_mask: bitmask for keeping track of @thread activity |
c0ecaa06 | 108 | * @dir: pointer to the proc/irq/NN/name entry |
a9d0a1a3 | 109 | */ |
1da177e4 | 110 | struct irqaction { |
31d9d9b6 | 111 | irq_handler_t handler; |
31d9d9b6 MZ |
112 | void *dev_id; |
113 | void __percpu *percpu_dev_id; | |
114 | struct irqaction *next; | |
31d9d9b6 MZ |
115 | irq_handler_t thread_fn; |
116 | struct task_struct *thread; | |
2a1d3ab8 | 117 | struct irqaction *secondary; |
c0ecaa06 TG |
118 | unsigned int irq; |
119 | unsigned int flags; | |
31d9d9b6 MZ |
120 | unsigned long thread_flags; |
121 | unsigned long thread_mask; | |
122 | const char *name; | |
123 | struct proc_dir_entry *dir; | |
f6cd2477 | 124 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 125 | |
7d12e780 | 126 | extern irqreturn_t no_action(int cpl, void *dev_id); |
3aa551c9 | 127 | |
e237a551 CF |
128 | /* |
129 | * If a (PCI) device interrupt is not connected we set dev->irq to | |
130 | * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we | |
131 | * can distingiush that case from other error returns. | |
132 | * | |
133 | * 0x80000000 is guaranteed to be outside the available range of interrupts | |
134 | * and easy to distinguish from other possible incorrect values. | |
135 | */ | |
136 | #define IRQ_NOTCONNECTED (1U << 31) | |
137 | ||
3aa551c9 TG |
138 | extern int __must_check |
139 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | |
140 | irq_handler_t thread_fn, | |
141 | unsigned long flags, const char *name, void *dev); | |
142 | ||
143 | static inline int __must_check | |
144 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | |
145 | const char *name, void *dev) | |
146 | { | |
147 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | |
148 | } | |
149 | ||
ae731f8d MZ |
150 | extern int __must_check |
151 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
152 | unsigned long flags, const char *name, void *dev_id); | |
153 | ||
31d9d9b6 | 154 | extern int __must_check |
c80081b9 DL |
155 | __request_percpu_irq(unsigned int irq, irq_handler_t handler, |
156 | unsigned long flags, const char *devname, | |
157 | void __percpu *percpu_dev_id); | |
158 | ||
159 | static inline int __must_check | |
31d9d9b6 | 160 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
c80081b9 DL |
161 | const char *devname, void __percpu *percpu_dev_id) |
162 | { | |
163 | return __request_percpu_irq(irq, handler, 0, | |
164 | devname, percpu_dev_id); | |
165 | } | |
3aa551c9 | 166 | |
25ce4be7 | 167 | extern const void *free_irq(unsigned int, void *); |
31d9d9b6 | 168 | extern void free_percpu_irq(unsigned int, void __percpu *); |
1da177e4 | 169 | |
0af3678f AV |
170 | struct device; |
171 | ||
935bd5b9 AV |
172 | extern int __must_check |
173 | devm_request_threaded_irq(struct device *dev, unsigned int irq, | |
174 | irq_handler_t handler, irq_handler_t thread_fn, | |
175 | unsigned long irqflags, const char *devname, | |
176 | void *dev_id); | |
177 | ||
178 | static inline int __must_check | |
179 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | |
180 | unsigned long irqflags, const char *devname, void *dev_id) | |
181 | { | |
182 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | |
183 | devname, dev_id); | |
184 | } | |
185 | ||
0668d306 SB |
186 | extern int __must_check |
187 | devm_request_any_context_irq(struct device *dev, unsigned int irq, | |
188 | irq_handler_t handler, unsigned long irqflags, | |
189 | const char *devname, void *dev_id); | |
190 | ||
9ac7849e TH |
191 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
192 | ||
d7e9629d IM |
193 | /* |
194 | * On lockdep we dont want to enable hardirqs in hardirq | |
195 | * context. Use local_irq_enable_in_hardirq() to annotate | |
196 | * kernel code that has to do this nevertheless (pretty much | |
197 | * the only valid case is for old/broken hardware that is | |
198 | * insanely slow). | |
199 | * | |
200 | * NOTE: in theory this might break fragile code that relies | |
201 | * on hardirq delivery - in practice we dont seem to have such | |
202 | * places left. So the only effect should be slightly increased | |
203 | * irqs-off latencies. | |
204 | */ | |
205 | #ifdef CONFIG_LOCKDEP | |
206 | # define local_irq_enable_in_hardirq() do { } while (0) | |
207 | #else | |
208 | # define local_irq_enable_in_hardirq() local_irq_enable() | |
209 | #endif | |
1da177e4 | 210 | |
1da177e4 | 211 | extern void disable_irq_nosync(unsigned int irq); |
02cea395 | 212 | extern bool disable_hardirq(unsigned int irq); |
1da177e4 | 213 | extern void disable_irq(unsigned int irq); |
31d9d9b6 | 214 | extern void disable_percpu_irq(unsigned int irq); |
1da177e4 | 215 | extern void enable_irq(unsigned int irq); |
1e7c5fd2 | 216 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); |
f0cb3220 | 217 | extern bool irq_percpu_is_enabled(unsigned int irq); |
a92444c6 | 218 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
ba9a2331 | 219 | |
0a0c5168 RW |
220 | /* The following three functions are for the core kernel use only. */ |
221 | extern void suspend_device_irqs(void); | |
222 | extern void resume_device_irqs(void); | |
0a0c5168 | 223 | |
f0ba3d05 EP |
224 | /** |
225 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | |
226 | * @irq: Interrupt to which notification applies | |
227 | * @kref: Reference count, for internal use | |
228 | * @work: Work item, for internal use | |
229 | * @notify: Function to be called on change. This will be | |
230 | * called in process context. | |
231 | * @release: Function to be called on release. This will be | |
232 | * called in process context. Once registered, the | |
233 | * structure must only be freed when this function is | |
234 | * called or later. | |
235 | */ | |
236 | struct irq_affinity_notify { | |
237 | unsigned int irq; | |
238 | struct kref kref; | |
239 | struct work_struct work; | |
240 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | |
241 | void (*release)(struct kref *ref); | |
242 | }; | |
243 | ||
20e407e1 CH |
244 | /** |
245 | * struct irq_affinity - Description for automatic irq affinity assignements | |
246 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of | |
247 | * the MSI(-X) vector space | |
248 | * @post_vectors: Don't apply affinity to @post_vectors at end of | |
249 | * the MSI(-X) vector space | |
6da4b3ab JA |
250 | * @nr_sets: Length of passed in *sets array |
251 | * @sets: Number of affinitized sets | |
20e407e1 CH |
252 | */ |
253 | struct irq_affinity { | |
254 | int pre_vectors; | |
255 | int post_vectors; | |
6da4b3ab JA |
256 | int nr_sets; |
257 | int *sets; | |
20e407e1 CH |
258 | }; |
259 | ||
bec04037 DL |
260 | /** |
261 | * struct irq_affinity_desc - Interrupt affinity descriptor | |
262 | * @mask: cpumask to hold the affinity assignment | |
70921ae2 | 263 | * @is_managed: 1 if the interrupt is managed internally |
bec04037 DL |
264 | */ |
265 | struct irq_affinity_desc { | |
266 | struct cpumask mask; | |
c410abbb | 267 | unsigned int is_managed : 1; |
bec04037 DL |
268 | }; |
269 | ||
0244ad00 | 270 | #if defined(CONFIG_SMP) |
d7b90689 | 271 | |
d036e67b | 272 | extern cpumask_var_t irq_default_affinity; |
18404756 | 273 | |
01f8fa4f TG |
274 | /* Internal implementation. Use the helpers below */ |
275 | extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, | |
276 | bool force); | |
277 | ||
278 | /** | |
279 | * irq_set_affinity - Set the irq affinity of a given irq | |
280 | * @irq: Interrupt to set affinity | |
def5f127 | 281 | * @cpumask: cpumask |
01f8fa4f TG |
282 | * |
283 | * Fails if cpumask does not contain an online CPU | |
284 | */ | |
285 | static inline int | |
286 | irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |
287 | { | |
288 | return __irq_set_affinity(irq, cpumask, false); | |
289 | } | |
290 | ||
291 | /** | |
292 | * irq_force_affinity - Force the irq affinity of a given irq | |
293 | * @irq: Interrupt to set affinity | |
def5f127 | 294 | * @cpumask: cpumask |
01f8fa4f TG |
295 | * |
296 | * Same as irq_set_affinity, but without checking the mask against | |
297 | * online cpus. | |
298 | * | |
299 | * Solely for low level cpu hotplug code, where we need to make per | |
300 | * cpu interrupts affine before the cpu becomes online. | |
301 | */ | |
302 | static inline int | |
303 | irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) | |
304 | { | |
305 | return __irq_set_affinity(irq, cpumask, true); | |
306 | } | |
307 | ||
d7b90689 | 308 | extern int irq_can_set_affinity(unsigned int irq); |
18404756 | 309 | extern int irq_select_affinity(unsigned int irq); |
d7b90689 | 310 | |
e7a297b0 | 311 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
cd7eab44 | 312 | |
cd7eab44 BH |
313 | extern int |
314 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | |
315 | ||
bec04037 DL |
316 | struct irq_affinity_desc * |
317 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); | |
318 | ||
6f9a22bc | 319 | int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); |
5e385a6e | 320 | |
d7b90689 RK |
321 | #else /* CONFIG_SMP */ |
322 | ||
0de26520 | 323 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
d7b90689 RK |
324 | { |
325 | return -EINVAL; | |
326 | } | |
327 | ||
4c88d7f9 AB |
328 | static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) |
329 | { | |
330 | return 0; | |
331 | } | |
332 | ||
d7b90689 RK |
333 | static inline int irq_can_set_affinity(unsigned int irq) |
334 | { | |
335 | return 0; | |
336 | } | |
337 | ||
18404756 MK |
338 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
339 | ||
e7a297b0 | 340 | static inline int irq_set_affinity_hint(unsigned int irq, |
cd7eab44 | 341 | const struct cpumask *m) |
e7a297b0 PWJ |
342 | { |
343 | return -EINVAL; | |
344 | } | |
f0ba3d05 EP |
345 | |
346 | static inline int | |
347 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |
348 | { | |
349 | return 0; | |
350 | } | |
5e385a6e | 351 | |
bec04037 | 352 | static inline struct irq_affinity_desc * |
67c93c21 | 353 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) |
34c3d981 TG |
354 | { |
355 | return NULL; | |
356 | } | |
357 | ||
358 | static inline int | |
6f9a22bc | 359 | irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) |
34c3d981 TG |
360 | { |
361 | return maxvec; | |
362 | } | |
363 | ||
0244ad00 | 364 | #endif /* CONFIG_SMP */ |
d7b90689 | 365 | |
c01d403b IM |
366 | /* |
367 | * Special lockdep variants of irq disabling/enabling. | |
368 | * These should be used for locking constructs that | |
369 | * know that a particular irq context which is disabled, | |
370 | * and which is the only irq-context user of a lock, | |
371 | * that it's safe to take the lock in the irq-disabled | |
372 | * section without disabling hardirqs. | |
373 | * | |
374 | * On !CONFIG_LOCKDEP they are equivalent to the normal | |
375 | * irq disable/enable methods. | |
376 | */ | |
377 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | |
378 | { | |
379 | disable_irq_nosync(irq); | |
380 | #ifdef CONFIG_LOCKDEP | |
381 | local_irq_disable(); | |
382 | #endif | |
383 | } | |
384 | ||
e8106b94 AV |
385 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
386 | { | |
387 | disable_irq_nosync(irq); | |
388 | #ifdef CONFIG_LOCKDEP | |
389 | local_irq_save(*flags); | |
390 | #endif | |
391 | } | |
392 | ||
c01d403b IM |
393 | static inline void disable_irq_lockdep(unsigned int irq) |
394 | { | |
395 | disable_irq(irq); | |
396 | #ifdef CONFIG_LOCKDEP | |
397 | local_irq_disable(); | |
398 | #endif | |
399 | } | |
400 | ||
401 | static inline void enable_irq_lockdep(unsigned int irq) | |
402 | { | |
403 | #ifdef CONFIG_LOCKDEP | |
404 | local_irq_enable(); | |
405 | #endif | |
406 | enable_irq(irq); | |
407 | } | |
408 | ||
e8106b94 AV |
409 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
410 | { | |
411 | #ifdef CONFIG_LOCKDEP | |
412 | local_irq_restore(*flags); | |
413 | #endif | |
414 | enable_irq(irq); | |
415 | } | |
416 | ||
ba9a2331 | 417 | /* IRQ wakeup (PM) control: */ |
a0cd9ca2 TG |
418 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
419 | ||
ba9a2331 TG |
420 | static inline int enable_irq_wake(unsigned int irq) |
421 | { | |
a0cd9ca2 | 422 | return irq_set_irq_wake(irq, 1); |
ba9a2331 TG |
423 | } |
424 | ||
425 | static inline int disable_irq_wake(unsigned int irq) | |
426 | { | |
a0cd9ca2 | 427 | return irq_set_irq_wake(irq, 0); |
ba9a2331 TG |
428 | } |
429 | ||
1b7047ed MZ |
430 | /* |
431 | * irq_get_irqchip_state/irq_set_irqchip_state specific flags | |
432 | */ | |
433 | enum irqchip_irq_state { | |
434 | IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ | |
435 | IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ | |
436 | IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ | |
437 | IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ | |
438 | }; | |
439 | ||
440 | extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
441 | bool *state); | |
442 | extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
443 | bool state); | |
8d32a307 TG |
444 | |
445 | #ifdef CONFIG_IRQ_FORCED_THREADING | |
446 | extern bool force_irqthreads; | |
447 | #else | |
448 | #define force_irqthreads (0) | |
449 | #endif | |
450 | ||
0fd7d862 FW |
451 | #ifndef local_softirq_pending |
452 | ||
453 | #ifndef local_softirq_pending_ref | |
454 | #define local_softirq_pending_ref irq_stat.__softirq_pending | |
455 | #endif | |
456 | ||
457 | #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) | |
458 | #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) | |
459 | #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) | |
460 | ||
0fd7d862 FW |
461 | #endif /* local_softirq_pending */ |
462 | ||
2d3fbbb3 BH |
463 | /* Some architectures might implement lazy enabling/disabling of |
464 | * interrupts. In some cases, such as stop_machine, we might want | |
465 | * to ensure that after a local_irq_disable(), interrupts have | |
466 | * really been disabled in hardware. Such architectures need to | |
467 | * implement the following hook. | |
468 | */ | |
469 | #ifndef hard_irq_disable | |
470 | #define hard_irq_disable() do { } while(0) | |
471 | #endif | |
472 | ||
1da177e4 LT |
473 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
474 | frequency threaded job scheduling. For almost all the purposes | |
475 | tasklets are more than enough. F.e. all serial device BHs et | |
476 | al. should be converted to tasklets, not to softirqs. | |
477 | */ | |
478 | ||
479 | enum | |
480 | { | |
481 | HI_SOFTIRQ=0, | |
482 | TIMER_SOFTIRQ, | |
483 | NET_TX_SOFTIRQ, | |
484 | NET_RX_SOFTIRQ, | |
ff856bad | 485 | BLOCK_SOFTIRQ, |
511cbce2 | 486 | IRQ_POLL_SOFTIRQ, |
c9819f45 CL |
487 | TASKLET_SOFTIRQ, |
488 | SCHED_SOFTIRQ, | |
c6eb3f70 TG |
489 | HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the |
490 | numbering. Sigh! */ | |
09223371 | 491 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
978b0116 AD |
492 | |
493 | NR_SOFTIRQS | |
1da177e4 LT |
494 | }; |
495 | ||
803b0eba PM |
496 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
497 | ||
5d592b44 JB |
498 | /* map softirq index to softirq name. update 'softirq_to_name' in |
499 | * kernel/softirq.c when adding a new softirq. | |
500 | */ | |
ce85b4f2 | 501 | extern const char * const softirq_to_name[NR_SOFTIRQS]; |
5d592b44 | 502 | |
1da177e4 LT |
503 | /* softirq mask and active fields moved to irq_cpustat_t in |
504 | * asm/hardirq.h to get better cache usage. KAO | |
505 | */ | |
506 | ||
507 | struct softirq_action | |
508 | { | |
509 | void (*action)(struct softirq_action *); | |
1da177e4 LT |
510 | }; |
511 | ||
512 | asmlinkage void do_softirq(void); | |
eb0f1c44 | 513 | asmlinkage void __do_softirq(void); |
7d65f4a6 FW |
514 | |
515 | #ifdef __ARCH_HAS_DO_SOFTIRQ | |
516 | void do_softirq_own_stack(void); | |
517 | #else | |
518 | static inline void do_softirq_own_stack(void) | |
519 | { | |
520 | __do_softirq(); | |
521 | } | |
522 | #endif | |
523 | ||
962cf36c | 524 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
1da177e4 | 525 | extern void softirq_init(void); |
f069686e | 526 | extern void __raise_softirq_irqoff(unsigned int nr); |
2bf2160d | 527 | |
b3c97528 HH |
528 | extern void raise_softirq_irqoff(unsigned int nr); |
529 | extern void raise_softirq(unsigned int nr); | |
1da177e4 | 530 | |
4dd53d89 VP |
531 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
532 | ||
533 | static inline struct task_struct *this_cpu_ksoftirqd(void) | |
534 | { | |
535 | return this_cpu_read(ksoftirqd); | |
536 | } | |
537 | ||
1da177e4 LT |
538 | /* Tasklets --- multithreaded analogue of BHs. |
539 | ||
540 | Main feature differing them of generic softirqs: tasklet | |
541 | is running only on one CPU simultaneously. | |
542 | ||
543 | Main feature differing them of BHs: different tasklets | |
544 | may be run simultaneously on different CPUs. | |
545 | ||
546 | Properties: | |
547 | * If tasklet_schedule() is called, then tasklet is guaranteed | |
548 | to be executed on some cpu at least once after this. | |
25985edc | 549 | * If the tasklet is already scheduled, but its execution is still not |
1da177e4 LT |
550 | started, it will be executed only once. |
551 | * If this tasklet is already running on another CPU (or schedule is called | |
552 | from tasklet itself), it is rescheduled for later. | |
553 | * Tasklet is strictly serialized wrt itself, but not | |
554 | wrt another tasklets. If client needs some intertask synchronization, | |
555 | he makes it with spinlocks. | |
556 | */ | |
557 | ||
558 | struct tasklet_struct | |
559 | { | |
560 | struct tasklet_struct *next; | |
561 | unsigned long state; | |
562 | atomic_t count; | |
563 | void (*func)(unsigned long); | |
564 | unsigned long data; | |
565 | }; | |
566 | ||
567 | #define DECLARE_TASKLET(name, func, data) \ | |
568 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | |
569 | ||
570 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | |
571 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | |
572 | ||
573 | ||
574 | enum | |
575 | { | |
576 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | |
577 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | |
578 | }; | |
579 | ||
580 | #ifdef CONFIG_SMP | |
581 | static inline int tasklet_trylock(struct tasklet_struct *t) | |
582 | { | |
583 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | |
584 | } | |
585 | ||
586 | static inline void tasklet_unlock(struct tasklet_struct *t) | |
587 | { | |
4e857c58 | 588 | smp_mb__before_atomic(); |
1da177e4 LT |
589 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
590 | } | |
591 | ||
592 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |
593 | { | |
594 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | |
595 | } | |
596 | #else | |
597 | #define tasklet_trylock(t) 1 | |
598 | #define tasklet_unlock_wait(t) do { } while (0) | |
599 | #define tasklet_unlock(t) do { } while (0) | |
600 | #endif | |
601 | ||
b3c97528 | 602 | extern void __tasklet_schedule(struct tasklet_struct *t); |
1da177e4 LT |
603 | |
604 | static inline void tasklet_schedule(struct tasklet_struct *t) | |
605 | { | |
606 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
607 | __tasklet_schedule(t); | |
608 | } | |
609 | ||
b3c97528 | 610 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
1da177e4 LT |
611 | |
612 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |
613 | { | |
614 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
615 | __tasklet_hi_schedule(t); | |
616 | } | |
617 | ||
1da177e4 LT |
618 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
619 | { | |
620 | atomic_inc(&t->count); | |
4e857c58 | 621 | smp_mb__after_atomic(); |
1da177e4 LT |
622 | } |
623 | ||
624 | static inline void tasklet_disable(struct tasklet_struct *t) | |
625 | { | |
626 | tasklet_disable_nosync(t); | |
627 | tasklet_unlock_wait(t); | |
628 | smp_mb(); | |
629 | } | |
630 | ||
631 | static inline void tasklet_enable(struct tasklet_struct *t) | |
632 | { | |
4e857c58 | 633 | smp_mb__before_atomic(); |
1da177e4 LT |
634 | atomic_dec(&t->count); |
635 | } | |
636 | ||
1da177e4 LT |
637 | extern void tasklet_kill(struct tasklet_struct *t); |
638 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | |
639 | extern void tasklet_init(struct tasklet_struct *t, | |
640 | void (*func)(unsigned long), unsigned long data); | |
641 | ||
9ba5f005 PZ |
642 | struct tasklet_hrtimer { |
643 | struct hrtimer timer; | |
644 | struct tasklet_struct tasklet; | |
645 | enum hrtimer_restart (*function)(struct hrtimer *); | |
646 | }; | |
647 | ||
648 | extern void | |
649 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |
650 | enum hrtimer_restart (*function)(struct hrtimer *), | |
651 | clockid_t which_clock, enum hrtimer_mode mode); | |
652 | ||
653 | static inline | |
61699e13 TG |
654 | void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
655 | const enum hrtimer_mode mode) | |
9ba5f005 | 656 | { |
61699e13 | 657 | hrtimer_start(&ttimer->timer, time, mode); |
9ba5f005 PZ |
658 | } |
659 | ||
660 | static inline | |
661 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) | |
662 | { | |
663 | hrtimer_cancel(&ttimer->timer); | |
664 | tasklet_kill(&ttimer->tasklet); | |
665 | } | |
666 | ||
1da177e4 LT |
667 | /* |
668 | * Autoprobing for irqs: | |
669 | * | |
670 | * probe_irq_on() and probe_irq_off() provide robust primitives | |
671 | * for accurate IRQ probing during kernel initialization. They are | |
672 | * reasonably simple to use, are not "fooled" by spurious interrupts, | |
673 | * and, unlike other attempts at IRQ probing, they do not get hung on | |
674 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | |
675 | * | |
676 | * For reasonably foolproof probing, use them as follows: | |
677 | * | |
678 | * 1. clear and/or mask the device's internal interrupt. | |
679 | * 2. sti(); | |
680 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | |
681 | * 4. enable the device and cause it to trigger an interrupt. | |
682 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | |
683 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | |
684 | * 7. service the device to clear its pending interrupt. | |
685 | * 8. loop again if paranoia is required. | |
686 | * | |
687 | * probe_irq_on() returns a mask of allocated irq's. | |
688 | * | |
689 | * probe_irq_off() takes the mask as a parameter, | |
690 | * and returns the irq number which occurred, | |
691 | * or zero if none occurred, or a negative irq number | |
692 | * if more than one irq occurred. | |
693 | */ | |
694 | ||
0244ad00 | 695 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) |
1da177e4 LT |
696 | static inline unsigned long probe_irq_on(void) |
697 | { | |
698 | return 0; | |
699 | } | |
700 | static inline int probe_irq_off(unsigned long val) | |
701 | { | |
702 | return 0; | |
703 | } | |
704 | static inline unsigned int probe_irq_mask(unsigned long val) | |
705 | { | |
706 | return 0; | |
707 | } | |
708 | #else | |
709 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | |
710 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | |
711 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | |
712 | #endif | |
713 | ||
6168a702 AM |
714 | #ifdef CONFIG_PROC_FS |
715 | /* Initialize /proc/irq/ */ | |
716 | extern void init_irq_proc(void); | |
717 | #else | |
718 | static inline void init_irq_proc(void) | |
719 | { | |
720 | } | |
721 | #endif | |
722 | ||
b2d3d61a DL |
723 | #ifdef CONFIG_IRQ_TIMINGS |
724 | void irq_timings_enable(void); | |
725 | void irq_timings_disable(void); | |
e1c92149 | 726 | u64 irq_timings_next_event(u64 now); |
b2d3d61a DL |
727 | #endif |
728 | ||
d43c36dc | 729 | struct seq_file; |
f74596d0 | 730 | int show_interrupts(struct seq_file *p, void *v); |
c78b9b65 | 731 | int arch_show_interrupts(struct seq_file *p, int prec); |
f74596d0 | 732 | |
43a25632 | 733 | extern int early_irq_init(void); |
4a046d17 | 734 | extern int arch_probe_nr_irqs(void); |
43a25632 | 735 | extern int arch_early_irq_init(void); |
43a25632 | 736 | |
be7635e7 AP |
737 | /* |
738 | * We want to know which function is an entrypoint of a hardirq or a softirq. | |
739 | */ | |
740 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) | |
741 | #define __softirq_entry \ | |
742 | __attribute__((__section__(".softirqentry.text"))) | |
743 | ||
1da177e4 | 744 | #endif |