]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PREEMPT_H |
2 | #define __LINUX_PREEMPT_H | |
3 | ||
4 | /* | |
5 | * include/linux/preempt.h - macros for accessing and manipulating | |
6 | * preempt_count (used for kernel preemption, interrupt count, etc.) | |
7 | */ | |
8 | ||
1da177e4 | 9 | #include <linux/linkage.h> |
e107be36 | 10 | #include <linux/list.h> |
1da177e4 | 11 | |
92cf2118 FW |
12 | /* |
13 | * We put the hardirq and softirq counter into the preemption | |
14 | * counter. The bitmask has the following meaning: | |
15 | * | |
16 | * - bits 0-7 are the preemption count (max preemption depth: 256) | |
17 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | |
18 | * | |
19 | * The hardirq count could in theory be the same as the number of | |
20 | * interrupts in the system, but we run all interrupt handlers with | |
21 | * interrupts disabled, so we cannot have nesting interrupts. Though | |
22 | * there are a few palaeontologic drivers which reenable interrupts in | |
23 | * the handler, so we need more than one bit here. | |
24 | * | |
2e10e71c FW |
25 | * PREEMPT_MASK: 0x000000ff |
26 | * SOFTIRQ_MASK: 0x0000ff00 | |
27 | * HARDIRQ_MASK: 0x000f0000 | |
28 | * NMI_MASK: 0x00100000 | |
2e10e71c | 29 | * PREEMPT_NEED_RESCHED: 0x80000000 |
92cf2118 FW |
30 | */ |
31 | #define PREEMPT_BITS 8 | |
32 | #define SOFTIRQ_BITS 8 | |
33 | #define HARDIRQ_BITS 4 | |
34 | #define NMI_BITS 1 | |
35 | ||
36 | #define PREEMPT_SHIFT 0 | |
37 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
38 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
39 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | |
40 | ||
41 | #define __IRQ_MASK(x) ((1UL << (x))-1) | |
42 | ||
43 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | |
44 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | |
45 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | |
46 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | |
47 | ||
48 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | |
49 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | |
50 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | |
51 | #define NMI_OFFSET (1UL << NMI_SHIFT) | |
52 | ||
53 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | |
54 | ||
2e10e71c FW |
55 | /* We use the MSB mostly because its available */ |
56 | #define PREEMPT_NEED_RESCHED 0x80000000 | |
57 | ||
d04b0ad3 IM |
58 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
59 | ||
60 | /* | |
61 | * Disable preemption until the scheduler is running -- use an unconditional | |
62 | * value so that it also works on !PREEMPT_COUNT kernels. | |
63 | * | |
64 | * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). | |
65 | */ | |
66 | #define INIT_PREEMPT_COUNT PREEMPT_OFFSET | |
67 | ||
68 | /* | |
69 | * Initial preempt_count value; reflects the preempt_count schedule invariant | |
70 | * which states that during context switches: | |
71 | * | |
72 | * preempt_count() == 2*PREEMPT_DISABLE_OFFSET | |
73 | * | |
74 | * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. | |
75 | * Note: See finish_task_switch(). | |
76 | */ | |
77 | #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) | |
78 | ||
2e10e71c FW |
79 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
80 | #include <asm/preempt.h> | |
81 | ||
92cf2118 FW |
82 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
83 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | |
84 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | |
85 | | NMI_MASK)) | |
86 | ||
87 | /* | |
88 | * Are we doing bottom half or hardware interrupt processing? | |
7c478895 PZ |
89 | * |
90 | * in_irq() - We're in (hard) IRQ context | |
91 | * in_softirq() - We have BH disabled, or are processing softirqs | |
92 | * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled | |
93 | * in_serving_softirq() - We're in softirq context | |
94 | * in_nmi() - We're in NMI context | |
95 | * in_task() - We're in task context | |
96 | * | |
97 | * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really | |
98 | * should not be used in new code. | |
92cf2118 FW |
99 | */ |
100 | #define in_irq() (hardirq_count()) | |
101 | #define in_softirq() (softirq_count()) | |
102 | #define in_interrupt() (irq_count()) | |
103 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | |
7c478895 PZ |
104 | #define in_nmi() (preempt_count() & NMI_MASK) |
105 | #define in_task() (!(preempt_count() & \ | |
106 | (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) | |
92cf2118 | 107 | |
fe32d3cd KK |
108 | /* |
109 | * The preempt_count offset after preempt_disable(); | |
110 | */ | |
92cf2118 | 111 | #if defined(CONFIG_PREEMPT_COUNT) |
fe32d3cd | 112 | # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET |
92cf2118 | 113 | #else |
fe32d3cd | 114 | # define PREEMPT_DISABLE_OFFSET 0 |
92cf2118 FW |
115 | #endif |
116 | ||
fe32d3cd KK |
117 | /* |
118 | * The preempt_count offset after spin_lock() | |
119 | */ | |
120 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET | |
121 | ||
92cf2118 FW |
122 | /* |
123 | * The preempt_count offset needed for things like: | |
124 | * | |
125 | * spin_lock_bh() | |
126 | * | |
127 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | |
128 | * softirqs, such that unlock sequences of: | |
129 | * | |
130 | * spin_unlock(); | |
131 | * local_bh_enable(); | |
132 | * | |
133 | * Work as expected. | |
134 | */ | |
fe32d3cd | 135 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) |
92cf2118 FW |
136 | |
137 | /* | |
138 | * Are we running in atomic context? WARNING: this macro cannot | |
139 | * always detect atomic context; in particular, it cannot know about | |
140 | * held spinlocks in non-preemptible kernels. Thus it should not be | |
141 | * used in the general case to determine whether sleeping is possible. | |
142 | * Do not use in_atomic() in driver code. | |
143 | */ | |
3e51f3c4 | 144 | #define in_atomic() (preempt_count() != 0) |
92cf2118 FW |
145 | |
146 | /* | |
147 | * Check whether we were atomic before we did preempt_disable(): | |
e017cf21 | 148 | * (used by the scheduler) |
92cf2118 | 149 | */ |
da7142e2 | 150 | #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) |
92cf2118 | 151 | |
6cd8a4bb | 152 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
bdb43806 PZ |
153 | extern void preempt_count_add(int val); |
154 | extern void preempt_count_sub(int val); | |
fe32d3cd KK |
155 | #define preempt_count_dec_and_test() \ |
156 | ({ preempt_count_sub(1); should_resched(0); }) | |
1da177e4 | 157 | #else |
bdb43806 PZ |
158 | #define preempt_count_add(val) __preempt_count_add(val) |
159 | #define preempt_count_sub(val) __preempt_count_sub(val) | |
160 | #define preempt_count_dec_and_test() __preempt_count_dec_and_test() | |
1da177e4 LT |
161 | #endif |
162 | ||
bdb43806 PZ |
163 | #define __preempt_count_inc() __preempt_count_add(1) |
164 | #define __preempt_count_dec() __preempt_count_sub(1) | |
bdd4e85d | 165 | |
bdb43806 PZ |
166 | #define preempt_count_inc() preempt_count_add(1) |
167 | #define preempt_count_dec() preempt_count_sub(1) | |
bdd4e85d FW |
168 | |
169 | #ifdef CONFIG_PREEMPT_COUNT | |
170 | ||
1da177e4 LT |
171 | #define preempt_disable() \ |
172 | do { \ | |
bdb43806 | 173 | preempt_count_inc(); \ |
1da177e4 LT |
174 | barrier(); \ |
175 | } while (0) | |
176 | ||
ba74c144 | 177 | #define sched_preempt_enable_no_resched() \ |
1da177e4 LT |
178 | do { \ |
179 | barrier(); \ | |
bdb43806 | 180 | preempt_count_dec(); \ |
1da177e4 LT |
181 | } while (0) |
182 | ||
bdb43806 | 183 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
ba74c144 | 184 | |
2e10e71c FW |
185 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
186 | ||
bdb43806 | 187 | #ifdef CONFIG_PREEMPT |
1da177e4 LT |
188 | #define preempt_enable() \ |
189 | do { \ | |
bdb43806 PZ |
190 | barrier(); \ |
191 | if (unlikely(preempt_count_dec_and_test())) \ | |
1a338ac3 | 192 | __preempt_schedule(); \ |
1da177e4 LT |
193 | } while (0) |
194 | ||
9a92e3dc FW |
195 | #define preempt_enable_notrace() \ |
196 | do { \ | |
197 | barrier(); \ | |
198 | if (unlikely(__preempt_count_dec_and_test())) \ | |
199 | __preempt_schedule_notrace(); \ | |
200 | } while (0) | |
201 | ||
bdb43806 PZ |
202 | #define preempt_check_resched() \ |
203 | do { \ | |
fe32d3cd | 204 | if (should_resched(0)) \ |
1a338ac3 | 205 | __preempt_schedule(); \ |
bdb43806 PZ |
206 | } while (0) |
207 | ||
9a92e3dc | 208 | #else /* !CONFIG_PREEMPT */ |
62b94a08 PZ |
209 | #define preempt_enable() \ |
210 | do { \ | |
211 | barrier(); \ | |
212 | preempt_count_dec(); \ | |
213 | } while (0) | |
50282528 | 214 | |
9a92e3dc | 215 | #define preempt_enable_notrace() \ |
50282528 SR |
216 | do { \ |
217 | barrier(); \ | |
bdb43806 | 218 | __preempt_count_dec(); \ |
50282528 SR |
219 | } while (0) |
220 | ||
9a92e3dc FW |
221 | #define preempt_check_resched() do { } while (0) |
222 | #endif /* CONFIG_PREEMPT */ | |
bdb43806 | 223 | |
9a92e3dc | 224 | #define preempt_disable_notrace() \ |
50282528 | 225 | do { \ |
9a92e3dc | 226 | __preempt_count_inc(); \ |
bdb43806 | 227 | barrier(); \ |
50282528 | 228 | } while (0) |
9a92e3dc FW |
229 | |
230 | #define preempt_enable_no_resched_notrace() \ | |
62b94a08 PZ |
231 | do { \ |
232 | barrier(); \ | |
233 | __preempt_count_dec(); \ | |
234 | } while (0) | |
50282528 | 235 | |
bdd4e85d | 236 | #else /* !CONFIG_PREEMPT_COUNT */ |
1da177e4 | 237 | |
386afc91 LT |
238 | /* |
239 | * Even if we don't have any preemption, we need preempt disable/enable | |
240 | * to be barriers, so that we don't have things like get_user/put_user | |
241 | * that can cause faults and scheduling migrate into our preempt-protected | |
242 | * region. | |
243 | */ | |
bdb43806 | 244 | #define preempt_disable() barrier() |
386afc91 | 245 | #define sched_preempt_enable_no_resched() barrier() |
bdb43806 PZ |
246 | #define preempt_enable_no_resched() barrier() |
247 | #define preempt_enable() barrier() | |
248 | #define preempt_check_resched() do { } while (0) | |
386afc91 LT |
249 | |
250 | #define preempt_disable_notrace() barrier() | |
251 | #define preempt_enable_no_resched_notrace() barrier() | |
252 | #define preempt_enable_notrace() barrier() | |
2e10e71c | 253 | #define preemptible() 0 |
50282528 | 254 | |
bdd4e85d | 255 | #endif /* CONFIG_PREEMPT_COUNT */ |
1da177e4 | 256 | |
62b94a08 PZ |
257 | #ifdef MODULE |
258 | /* | |
259 | * Modules have no business playing preemption tricks. | |
260 | */ | |
261 | #undef sched_preempt_enable_no_resched | |
262 | #undef preempt_enable_no_resched | |
263 | #undef preempt_enable_no_resched_notrace | |
264 | #undef preempt_check_resched | |
265 | #endif | |
266 | ||
8cb75e0c PZ |
267 | #define preempt_set_need_resched() \ |
268 | do { \ | |
269 | set_preempt_need_resched(); \ | |
270 | } while (0) | |
271 | #define preempt_fold_need_resched() \ | |
272 | do { \ | |
273 | if (tif_need_resched()) \ | |
274 | set_preempt_need_resched(); \ | |
275 | } while (0) | |
8cb75e0c | 276 | |
e107be36 AK |
277 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
278 | ||
279 | struct preempt_notifier; | |
280 | ||
281 | /** | |
282 | * preempt_ops - notifiers called when a task is preempted and rescheduled | |
283 | * @sched_in: we're about to be rescheduled: | |
284 | * notifier: struct preempt_notifier for the task being scheduled | |
285 | * cpu: cpu we're scheduled on | |
286 | * @sched_out: we've just been preempted | |
287 | * notifier: struct preempt_notifier for the task being preempted | |
288 | * next: the task that's kicking us out | |
8592e648 TH |
289 | * |
290 | * Please note that sched_in and out are called under different | |
291 | * contexts. sched_out is called with rq lock held and irq disabled | |
292 | * while sched_in is called without rq lock and irq enabled. This | |
293 | * difference is intentional and depended upon by its users. | |
e107be36 AK |
294 | */ |
295 | struct preempt_ops { | |
296 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); | |
297 | void (*sched_out)(struct preempt_notifier *notifier, | |
298 | struct task_struct *next); | |
299 | }; | |
300 | ||
301 | /** | |
302 | * preempt_notifier - key for installing preemption notifiers | |
303 | * @link: internal use | |
304 | * @ops: defines the notifier functions to be called | |
305 | * | |
306 | * Usually used in conjunction with container_of(). | |
307 | */ | |
308 | struct preempt_notifier { | |
309 | struct hlist_node link; | |
310 | struct preempt_ops *ops; | |
311 | }; | |
312 | ||
2ecd9d29 PZ |
313 | void preempt_notifier_inc(void); |
314 | void preempt_notifier_dec(void); | |
e107be36 AK |
315 | void preempt_notifier_register(struct preempt_notifier *notifier); |
316 | void preempt_notifier_unregister(struct preempt_notifier *notifier); | |
317 | ||
318 | static inline void preempt_notifier_init(struct preempt_notifier *notifier, | |
319 | struct preempt_ops *ops) | |
320 | { | |
321 | INIT_HLIST_NODE(¬ifier->link); | |
322 | notifier->ops = ops; | |
323 | } | |
324 | ||
325 | #endif | |
326 | ||
1da177e4 | 327 | #endif /* __LINUX_PREEMPT_H */ |