]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_PREEMPT_H |
3 | #define __LINUX_PREEMPT_H | |
4 | ||
5 | /* | |
6 | * include/linux/preempt.h - macros for accessing and manipulating | |
7 | * preempt_count (used for kernel preemption, interrupt count, etc.) | |
8 | */ | |
9 | ||
1da177e4 | 10 | #include <linux/linkage.h> |
e107be36 | 11 | #include <linux/list.h> |
1da177e4 | 12 | |
92cf2118 FW |
13 | /* |
14 | * We put the hardirq and softirq counter into the preemption | |
15 | * counter. The bitmask has the following meaning: | |
16 | * | |
17 | * - bits 0-7 are the preemption count (max preemption depth: 256) | |
18 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | |
19 | * | |
20 | * The hardirq count could in theory be the same as the number of | |
21 | * interrupts in the system, but we run all interrupt handlers with | |
22 | * interrupts disabled, so we cannot have nesting interrupts. Though | |
23 | * there are a few palaeontologic drivers which reenable interrupts in | |
24 | * the handler, so we need more than one bit here. | |
25 | * | |
2e10e71c FW |
26 | * PREEMPT_MASK: 0x000000ff |
27 | * SOFTIRQ_MASK: 0x0000ff00 | |
28 | * HARDIRQ_MASK: 0x000f0000 | |
69ea03b5 | 29 | * NMI_MASK: 0x00f00000 |
2e10e71c | 30 | * PREEMPT_NEED_RESCHED: 0x80000000 |
92cf2118 FW |
31 | */ |
32 | #define PREEMPT_BITS 8 | |
33 | #define SOFTIRQ_BITS 8 | |
34 | #define HARDIRQ_BITS 4 | |
69ea03b5 | 35 | #define NMI_BITS 4 |
92cf2118 FW |
36 | |
37 | #define PREEMPT_SHIFT 0 | |
38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
39 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
40 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | |
41 | ||
42 | #define __IRQ_MASK(x) ((1UL << (x))-1) | |
43 | ||
44 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | |
45 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | |
46 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | |
47 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | |
48 | ||
49 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | |
50 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | |
51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | |
52 | #define NMI_OFFSET (1UL << NMI_SHIFT) | |
53 | ||
54 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | |
55 | ||
d04b0ad3 IM |
56 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
57 | ||
58 | /* | |
59 | * Disable preemption until the scheduler is running -- use an unconditional | |
60 | * value so that it also works on !PREEMPT_COUNT kernels. | |
61 | * | |
62 | * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). | |
63 | */ | |
64 | #define INIT_PREEMPT_COUNT PREEMPT_OFFSET | |
65 | ||
66 | /* | |
67 | * Initial preempt_count value; reflects the preempt_count schedule invariant | |
68 | * which states that during context switches: | |
69 | * | |
70 | * preempt_count() == 2*PREEMPT_DISABLE_OFFSET | |
71 | * | |
72 | * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. | |
73 | * Note: See finish_task_switch(). | |
74 | */ | |
75 | #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) | |
76 | ||
2e10e71c FW |
77 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
78 | #include <asm/preempt.h> | |
79 | ||
15115830 | 80 | #define nmi_count() (preempt_count() & NMI_MASK) |
92cf2118 | 81 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
728b478d TG |
82 | #ifdef CONFIG_PREEMPT_RT |
83 | # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) | |
84 | #else | |
85 | # define softirq_count() (preempt_count() & SOFTIRQ_MASK) | |
86 | #endif | |
15115830 | 87 | #define irq_count() (nmi_count() | hardirq_count() | softirq_count()) |
92cf2118 FW |
88 | |
89 | /* | |
15115830 | 90 | * Macros to retrieve the current execution context: |
7c478895 | 91 | * |
15115830 TG |
92 | * in_nmi() - We're in NMI context |
93 | * in_hardirq() - We're in hard IRQ context | |
94 | * in_serving_softirq() - We're in softirq context | |
95 | * in_task() - We're in task context | |
96 | */ | |
97 | #define in_nmi() (nmi_count()) | |
98 | #define in_hardirq() (hardirq_count()) | |
99 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | |
100 | #define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq())) | |
101 | ||
102 | /* | |
103 | * The following macros are deprecated and should not be used in new code: | |
104 | * in_irq() - Obsolete version of in_hardirq() | |
7c478895 PZ |
105 | * in_softirq() - We have BH disabled, or are processing softirqs |
106 | * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled | |
92cf2118 FW |
107 | */ |
108 | #define in_irq() (hardirq_count()) | |
109 | #define in_softirq() (softirq_count()) | |
110 | #define in_interrupt() (irq_count()) | |
92cf2118 | 111 | |
fe32d3cd KK |
112 | /* |
113 | * The preempt_count offset after preempt_disable(); | |
114 | */ | |
92cf2118 | 115 | #if defined(CONFIG_PREEMPT_COUNT) |
fe32d3cd | 116 | # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET |
92cf2118 | 117 | #else |
fe32d3cd | 118 | # define PREEMPT_DISABLE_OFFSET 0 |
92cf2118 FW |
119 | #endif |
120 | ||
fe32d3cd KK |
121 | /* |
122 | * The preempt_count offset after spin_lock() | |
123 | */ | |
015680aa | 124 | #if !defined(CONFIG_PREEMPT_RT) |
fe32d3cd | 125 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
015680aa TG |
126 | #else |
127 | #define PREEMPT_LOCK_OFFSET 0 | |
128 | #endif | |
fe32d3cd | 129 | |
92cf2118 FW |
130 | /* |
131 | * The preempt_count offset needed for things like: | |
132 | * | |
133 | * spin_lock_bh() | |
134 | * | |
135 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | |
136 | * softirqs, such that unlock sequences of: | |
137 | * | |
138 | * spin_unlock(); | |
139 | * local_bh_enable(); | |
140 | * | |
141 | * Work as expected. | |
142 | */ | |
fe32d3cd | 143 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) |
92cf2118 FW |
144 | |
145 | /* | |
146 | * Are we running in atomic context? WARNING: this macro cannot | |
147 | * always detect atomic context; in particular, it cannot know about | |
148 | * held spinlocks in non-preemptible kernels. Thus it should not be | |
149 | * used in the general case to determine whether sleeping is possible. | |
150 | * Do not use in_atomic() in driver code. | |
151 | */ | |
3e51f3c4 | 152 | #define in_atomic() (preempt_count() != 0) |
92cf2118 FW |
153 | |
154 | /* | |
155 | * Check whether we were atomic before we did preempt_disable(): | |
e017cf21 | 156 | * (used by the scheduler) |
92cf2118 | 157 | */ |
da7142e2 | 158 | #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) |
92cf2118 | 159 | |
c3bc8fd6 | 160 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) |
bdb43806 PZ |
161 | extern void preempt_count_add(int val); |
162 | extern void preempt_count_sub(int val); | |
fe32d3cd KK |
163 | #define preempt_count_dec_and_test() \ |
164 | ({ preempt_count_sub(1); should_resched(0); }) | |
1da177e4 | 165 | #else |
bdb43806 PZ |
166 | #define preempt_count_add(val) __preempt_count_add(val) |
167 | #define preempt_count_sub(val) __preempt_count_sub(val) | |
168 | #define preempt_count_dec_and_test() __preempt_count_dec_and_test() | |
1da177e4 LT |
169 | #endif |
170 | ||
bdb43806 PZ |
171 | #define __preempt_count_inc() __preempt_count_add(1) |
172 | #define __preempt_count_dec() __preempt_count_sub(1) | |
bdd4e85d | 173 | |
bdb43806 PZ |
174 | #define preempt_count_inc() preempt_count_add(1) |
175 | #define preempt_count_dec() preempt_count_sub(1) | |
bdd4e85d FW |
176 | |
177 | #ifdef CONFIG_PREEMPT_COUNT | |
178 | ||
1da177e4 LT |
179 | #define preempt_disable() \ |
180 | do { \ | |
bdb43806 | 181 | preempt_count_inc(); \ |
1da177e4 LT |
182 | barrier(); \ |
183 | } while (0) | |
184 | ||
ba74c144 | 185 | #define sched_preempt_enable_no_resched() \ |
1da177e4 LT |
186 | do { \ |
187 | barrier(); \ | |
bdb43806 | 188 | preempt_count_dec(); \ |
1da177e4 LT |
189 | } while (0) |
190 | ||
bdb43806 | 191 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
ba74c144 | 192 | |
2e10e71c FW |
193 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
194 | ||
c1a280b6 | 195 | #ifdef CONFIG_PREEMPTION |
1da177e4 LT |
196 | #define preempt_enable() \ |
197 | do { \ | |
bdb43806 PZ |
198 | barrier(); \ |
199 | if (unlikely(preempt_count_dec_and_test())) \ | |
1a338ac3 | 200 | __preempt_schedule(); \ |
1da177e4 LT |
201 | } while (0) |
202 | ||
9a92e3dc FW |
203 | #define preempt_enable_notrace() \ |
204 | do { \ | |
205 | barrier(); \ | |
206 | if (unlikely(__preempt_count_dec_and_test())) \ | |
207 | __preempt_schedule_notrace(); \ | |
208 | } while (0) | |
209 | ||
bdb43806 PZ |
210 | #define preempt_check_resched() \ |
211 | do { \ | |
fe32d3cd | 212 | if (should_resched(0)) \ |
1a338ac3 | 213 | __preempt_schedule(); \ |
bdb43806 PZ |
214 | } while (0) |
215 | ||
c1a280b6 | 216 | #else /* !CONFIG_PREEMPTION */ |
62b94a08 PZ |
217 | #define preempt_enable() \ |
218 | do { \ | |
219 | barrier(); \ | |
220 | preempt_count_dec(); \ | |
221 | } while (0) | |
50282528 | 222 | |
9a92e3dc | 223 | #define preempt_enable_notrace() \ |
50282528 SR |
224 | do { \ |
225 | barrier(); \ | |
bdb43806 | 226 | __preempt_count_dec(); \ |
50282528 SR |
227 | } while (0) |
228 | ||
9a92e3dc | 229 | #define preempt_check_resched() do { } while (0) |
c1a280b6 | 230 | #endif /* CONFIG_PREEMPTION */ |
bdb43806 | 231 | |
9a92e3dc | 232 | #define preempt_disable_notrace() \ |
50282528 | 233 | do { \ |
9a92e3dc | 234 | __preempt_count_inc(); \ |
bdb43806 | 235 | barrier(); \ |
50282528 | 236 | } while (0) |
9a92e3dc FW |
237 | |
238 | #define preempt_enable_no_resched_notrace() \ | |
62b94a08 PZ |
239 | do { \ |
240 | barrier(); \ | |
241 | __preempt_count_dec(); \ | |
242 | } while (0) | |
50282528 | 243 | |
bdd4e85d | 244 | #else /* !CONFIG_PREEMPT_COUNT */ |
1da177e4 | 245 | |
386afc91 LT |
246 | /* |
247 | * Even if we don't have any preemption, we need preempt disable/enable | |
248 | * to be barriers, so that we don't have things like get_user/put_user | |
249 | * that can cause faults and scheduling migrate into our preempt-protected | |
250 | * region. | |
251 | */ | |
bdb43806 | 252 | #define preempt_disable() barrier() |
386afc91 | 253 | #define sched_preempt_enable_no_resched() barrier() |
bdb43806 PZ |
254 | #define preempt_enable_no_resched() barrier() |
255 | #define preempt_enable() barrier() | |
256 | #define preempt_check_resched() do { } while (0) | |
386afc91 LT |
257 | |
258 | #define preempt_disable_notrace() barrier() | |
259 | #define preempt_enable_no_resched_notrace() barrier() | |
260 | #define preempt_enable_notrace() barrier() | |
2e10e71c | 261 | #define preemptible() 0 |
50282528 | 262 | |
bdd4e85d | 263 | #endif /* CONFIG_PREEMPT_COUNT */ |
1da177e4 | 264 | |
62b94a08 PZ |
265 | #ifdef MODULE |
266 | /* | |
267 | * Modules have no business playing preemption tricks. | |
268 | */ | |
269 | #undef sched_preempt_enable_no_resched | |
270 | #undef preempt_enable_no_resched | |
271 | #undef preempt_enable_no_resched_notrace | |
272 | #undef preempt_check_resched | |
273 | #endif | |
274 | ||
8cb75e0c PZ |
275 | #define preempt_set_need_resched() \ |
276 | do { \ | |
277 | set_preempt_need_resched(); \ | |
278 | } while (0) | |
279 | #define preempt_fold_need_resched() \ | |
280 | do { \ | |
281 | if (tif_need_resched()) \ | |
282 | set_preempt_need_resched(); \ | |
283 | } while (0) | |
8cb75e0c | 284 | |
e107be36 AK |
285 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
286 | ||
287 | struct preempt_notifier; | |
288 | ||
289 | /** | |
290 | * preempt_ops - notifiers called when a task is preempted and rescheduled | |
291 | * @sched_in: we're about to be rescheduled: | |
292 | * notifier: struct preempt_notifier for the task being scheduled | |
293 | * cpu: cpu we're scheduled on | |
294 | * @sched_out: we've just been preempted | |
295 | * notifier: struct preempt_notifier for the task being preempted | |
296 | * next: the task that's kicking us out | |
8592e648 TH |
297 | * |
298 | * Please note that sched_in and out are called under different | |
299 | * contexts. sched_out is called with rq lock held and irq disabled | |
300 | * while sched_in is called without rq lock and irq enabled. This | |
301 | * difference is intentional and depended upon by its users. | |
e107be36 AK |
302 | */ |
303 | struct preempt_ops { | |
304 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); | |
305 | void (*sched_out)(struct preempt_notifier *notifier, | |
306 | struct task_struct *next); | |
307 | }; | |
308 | ||
309 | /** | |
310 | * preempt_notifier - key for installing preemption notifiers | |
311 | * @link: internal use | |
312 | * @ops: defines the notifier functions to be called | |
313 | * | |
314 | * Usually used in conjunction with container_of(). | |
315 | */ | |
316 | struct preempt_notifier { | |
317 | struct hlist_node link; | |
318 | struct preempt_ops *ops; | |
319 | }; | |
320 | ||
2ecd9d29 PZ |
321 | void preempt_notifier_inc(void); |
322 | void preempt_notifier_dec(void); | |
e107be36 AK |
323 | void preempt_notifier_register(struct preempt_notifier *notifier); |
324 | void preempt_notifier_unregister(struct preempt_notifier *notifier); | |
325 | ||
326 | static inline void preempt_notifier_init(struct preempt_notifier *notifier, | |
327 | struct preempt_ops *ops) | |
328 | { | |
329 | INIT_HLIST_NODE(¬ifier->link); | |
330 | notifier->ops = ops; | |
331 | } | |
332 | ||
333 | #endif | |
334 | ||
74d862b6 | 335 | #ifdef CONFIG_SMP |
af449901 PZ |
336 | |
337 | /* | |
a7c81556 | 338 | * Migrate-Disable and why it is undesired. |
af449901 | 339 | * |
a7c81556 PZ |
340 | * When a preempted task becomes elegible to run under the ideal model (IOW it |
341 | * becomes one of the M highest priority tasks), it might still have to wait | |
342 | * for the preemptee's migrate_disable() section to complete. Thereby suffering | |
343 | * a reduction in bandwidth in the exact duration of the migrate_disable() | |
344 | * section. | |
af449901 | 345 | * |
a7c81556 PZ |
346 | * Per this argument, the change from preempt_disable() to migrate_disable() |
347 | * gets us: | |
af449901 | 348 | * |
a7c81556 PZ |
349 | * - a higher priority tasks gains reduced wake-up latency; with preempt_disable() |
350 | * it would have had to wait for the lower priority task. | |
351 | * | |
352 | * - a lower priority tasks; which under preempt_disable() could've instantly | |
353 | * migrated away when another CPU becomes available, is now constrained | |
354 | * by the ability to push the higher priority task away, which might itself be | |
355 | * in a migrate_disable() section, reducing it's available bandwidth. | |
356 | * | |
357 | * IOW it trades latency / moves the interference term, but it stays in the | |
358 | * system, and as long as it remains unbounded, the system is not fully | |
359 | * deterministic. | |
af449901 PZ |
360 | * |
361 | * | |
362 | * The reason we have it anyway. | |
363 | * | |
364 | * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a | |
365 | * number of primitives into becoming preemptible, they would also allow | |
366 | * migration. This turns out to break a bunch of per-cpu usage. To this end, | |
367 | * all these primitives employ migirate_disable() to restore this implicit | |
368 | * assumption. | |
369 | * | |
370 | * This is a 'temporary' work-around at best. The correct solution is getting | |
371 | * rid of the above assumptions and reworking the code to employ explicit | |
372 | * per-cpu locking or short preempt-disable regions. | |
373 | * | |
374 | * The end goal must be to get rid of migrate_disable(), alternatively we need | |
375 | * a schedulability theory that does not depend on abritrary migration. | |
376 | * | |
377 | * | |
378 | * Notes on the implementation. | |
379 | * | |
380 | * The implementation is particularly tricky since existing code patterns | |
381 | * dictate neither migrate_disable() nor migrate_enable() is allowed to block. | |
382 | * This means that it cannot use cpus_read_lock() to serialize against hotplug, | |
383 | * nor can it easily migrate itself into a pending affinity mask change on | |
384 | * migrate_enable(). | |
385 | * | |
386 | * | |
387 | * Note: even non-work-conserving schedulers like semi-partitioned depends on | |
388 | * migration, so migrate_disable() is not only a problem for | |
389 | * work-conserving schedulers. | |
390 | * | |
391 | */ | |
392 | extern void migrate_disable(void); | |
393 | extern void migrate_enable(void); | |
394 | ||
74d862b6 | 395 | #else |
af449901 PZ |
396 | |
397 | static inline void migrate_disable(void) { } | |
398 | static inline void migrate_enable(void) { } | |
399 | ||
74d862b6 | 400 | #endif /* CONFIG_SMP */ |
af449901 | 401 | |
1da177e4 | 402 | #endif /* __LINUX_PREEMPT_H */ |