]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H | |
3 | #define _LINUX_INTERRUPT_H | |
4 | ||
1da177e4 LT |
5 | #include <linux/kernel.h> |
6 | #include <linux/linkage.h> | |
7 | #include <linux/bitops.h> | |
8 | #include <linux/preempt.h> | |
9 | #include <linux/cpumask.h> | |
908dcecd | 10 | #include <linux/irqreturn.h> |
1da177e4 | 11 | #include <linux/hardirq.h> |
f037360f | 12 | #include <linux/sched.h> |
1da177e4 LT |
13 | #include <asm/atomic.h> |
14 | #include <asm/ptrace.h> | |
15 | #include <asm/system.h> | |
16 | ||
1da177e4 LT |
17 | struct irqaction { |
18 | irqreturn_t (*handler)(int, void *, struct pt_regs *); | |
19 | unsigned long flags; | |
20 | cpumask_t mask; | |
21 | const char *name; | |
22 | void *dev_id; | |
23 | struct irqaction *next; | |
24 | int irq; | |
25 | struct proc_dir_entry *dir; | |
26 | }; | |
27 | ||
28 | extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs); | |
29 | extern int request_irq(unsigned int, | |
30 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | |
31 | unsigned long, const char *, void *); | |
32 | extern void free_irq(unsigned int, void *); | |
33 | ||
34 | ||
35 | #ifdef CONFIG_GENERIC_HARDIRQS | |
36 | extern void disable_irq_nosync(unsigned int irq); | |
37 | extern void disable_irq(unsigned int irq); | |
38 | extern void enable_irq(unsigned int irq); | |
39 | #endif | |
40 | ||
3f74478b AK |
41 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
42 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | |
43 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | |
44 | #endif | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * Temporary defines for UP kernels, until all code gets fixed. | |
48 | */ | |
49 | #ifndef CONFIG_SMP | |
50 | static inline void __deprecated cli(void) | |
51 | { | |
52 | local_irq_disable(); | |
53 | } | |
54 | static inline void __deprecated sti(void) | |
55 | { | |
56 | local_irq_enable(); | |
57 | } | |
58 | static inline void __deprecated save_flags(unsigned long *x) | |
59 | { | |
60 | local_save_flags(*x); | |
61 | } | |
ef9ceab2 | 62 | #define save_flags(x) save_flags(&x) |
1da177e4 LT |
63 | static inline void __deprecated restore_flags(unsigned long x) |
64 | { | |
65 | local_irq_restore(x); | |
66 | } | |
67 | ||
68 | static inline void __deprecated save_and_cli(unsigned long *x) | |
69 | { | |
70 | local_irq_save(*x); | |
71 | } | |
72 | #define save_and_cli(x) save_and_cli(&x) | |
73 | #endif /* CONFIG_SMP */ | |
74 | ||
75 | /* SoftIRQ primitives. */ | |
76 | #define local_bh_disable() \ | |
77 | do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) | |
78 | #define __local_bh_enable() \ | |
79 | do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) | |
80 | ||
81 | extern void local_bh_enable(void); | |
82 | ||
83 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | |
84 | frequency threaded job scheduling. For almost all the purposes | |
85 | tasklets are more than enough. F.e. all serial device BHs et | |
86 | al. should be converted to tasklets, not to softirqs. | |
87 | */ | |
88 | ||
89 | enum | |
90 | { | |
91 | HI_SOFTIRQ=0, | |
92 | TIMER_SOFTIRQ, | |
93 | NET_TX_SOFTIRQ, | |
94 | NET_RX_SOFTIRQ, | |
ff856bad | 95 | BLOCK_SOFTIRQ, |
1da177e4 LT |
96 | TASKLET_SOFTIRQ |
97 | }; | |
98 | ||
99 | /* softirq mask and active fields moved to irq_cpustat_t in | |
100 | * asm/hardirq.h to get better cache usage. KAO | |
101 | */ | |
102 | ||
103 | struct softirq_action | |
104 | { | |
105 | void (*action)(struct softirq_action *); | |
106 | void *data; | |
107 | }; | |
108 | ||
109 | asmlinkage void do_softirq(void); | |
110 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); | |
111 | extern void softirq_init(void); | |
3f74478b | 112 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
1da177e4 LT |
113 | extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); |
114 | extern void FASTCALL(raise_softirq(unsigned int nr)); | |
115 | ||
116 | ||
117 | /* Tasklets --- multithreaded analogue of BHs. | |
118 | ||
119 | Main feature differing them of generic softirqs: tasklet | |
120 | is running only on one CPU simultaneously. | |
121 | ||
122 | Main feature differing them of BHs: different tasklets | |
123 | may be run simultaneously on different CPUs. | |
124 | ||
125 | Properties: | |
126 | * If tasklet_schedule() is called, then tasklet is guaranteed | |
127 | to be executed on some cpu at least once after this. | |
128 | * If the tasklet is already scheduled, but its excecution is still not | |
129 | started, it will be executed only once. | |
130 | * If this tasklet is already running on another CPU (or schedule is called | |
131 | from tasklet itself), it is rescheduled for later. | |
132 | * Tasklet is strictly serialized wrt itself, but not | |
133 | wrt another tasklets. If client needs some intertask synchronization, | |
134 | he makes it with spinlocks. | |
135 | */ | |
136 | ||
137 | struct tasklet_struct | |
138 | { | |
139 | struct tasklet_struct *next; | |
140 | unsigned long state; | |
141 | atomic_t count; | |
142 | void (*func)(unsigned long); | |
143 | unsigned long data; | |
144 | }; | |
145 | ||
146 | #define DECLARE_TASKLET(name, func, data) \ | |
147 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | |
148 | ||
149 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | |
150 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | |
151 | ||
152 | ||
153 | enum | |
154 | { | |
155 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | |
156 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | |
157 | }; | |
158 | ||
159 | #ifdef CONFIG_SMP | |
160 | static inline int tasklet_trylock(struct tasklet_struct *t) | |
161 | { | |
162 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | |
163 | } | |
164 | ||
165 | static inline void tasklet_unlock(struct tasklet_struct *t) | |
166 | { | |
167 | smp_mb__before_clear_bit(); | |
168 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | |
169 | } | |
170 | ||
171 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | |
172 | { | |
173 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | |
174 | } | |
175 | #else | |
176 | #define tasklet_trylock(t) 1 | |
177 | #define tasklet_unlock_wait(t) do { } while (0) | |
178 | #define tasklet_unlock(t) do { } while (0) | |
179 | #endif | |
180 | ||
181 | extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); | |
182 | ||
183 | static inline void tasklet_schedule(struct tasklet_struct *t) | |
184 | { | |
185 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
186 | __tasklet_schedule(t); | |
187 | } | |
188 | ||
189 | extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); | |
190 | ||
191 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | |
192 | { | |
193 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | |
194 | __tasklet_hi_schedule(t); | |
195 | } | |
196 | ||
197 | ||
198 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | |
199 | { | |
200 | atomic_inc(&t->count); | |
201 | smp_mb__after_atomic_inc(); | |
202 | } | |
203 | ||
204 | static inline void tasklet_disable(struct tasklet_struct *t) | |
205 | { | |
206 | tasklet_disable_nosync(t); | |
207 | tasklet_unlock_wait(t); | |
208 | smp_mb(); | |
209 | } | |
210 | ||
211 | static inline void tasklet_enable(struct tasklet_struct *t) | |
212 | { | |
213 | smp_mb__before_atomic_dec(); | |
214 | atomic_dec(&t->count); | |
215 | } | |
216 | ||
217 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | |
218 | { | |
219 | smp_mb__before_atomic_dec(); | |
220 | atomic_dec(&t->count); | |
221 | } | |
222 | ||
223 | extern void tasklet_kill(struct tasklet_struct *t); | |
224 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | |
225 | extern void tasklet_init(struct tasklet_struct *t, | |
226 | void (*func)(unsigned long), unsigned long data); | |
227 | ||
228 | /* | |
229 | * Autoprobing for irqs: | |
230 | * | |
231 | * probe_irq_on() and probe_irq_off() provide robust primitives | |
232 | * for accurate IRQ probing during kernel initialization. They are | |
233 | * reasonably simple to use, are not "fooled" by spurious interrupts, | |
234 | * and, unlike other attempts at IRQ probing, they do not get hung on | |
235 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | |
236 | * | |
237 | * For reasonably foolproof probing, use them as follows: | |
238 | * | |
239 | * 1. clear and/or mask the device's internal interrupt. | |
240 | * 2. sti(); | |
241 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | |
242 | * 4. enable the device and cause it to trigger an interrupt. | |
243 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | |
244 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | |
245 | * 7. service the device to clear its pending interrupt. | |
246 | * 8. loop again if paranoia is required. | |
247 | * | |
248 | * probe_irq_on() returns a mask of allocated irq's. | |
249 | * | |
250 | * probe_irq_off() takes the mask as a parameter, | |
251 | * and returns the irq number which occurred, | |
252 | * or zero if none occurred, or a negative irq number | |
253 | * if more than one irq occurred. | |
254 | */ | |
255 | ||
256 | #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) | |
257 | static inline unsigned long probe_irq_on(void) | |
258 | { | |
259 | return 0; | |
260 | } | |
261 | static inline int probe_irq_off(unsigned long val) | |
262 | { | |
263 | return 0; | |
264 | } | |
265 | static inline unsigned int probe_irq_mask(unsigned long val) | |
266 | { | |
267 | return 0; | |
268 | } | |
269 | #else | |
270 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | |
271 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | |
272 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | |
273 | #endif | |
274 | ||
275 | #endif |