]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/preempt.h
sched: Add NEED_RESCHED to the preempt_count
[mirror_ubuntu-artful-kernel.git] / include / linux / preempt.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
f037360f 9#include <linux/thread_info.h>
1da177e4 10#include <linux/linkage.h>
e107be36 11#include <linux/list.h>
1da177e4 12
f27dde8d
PZ
13/*
14 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
15 * the other bits -- can't include that header due to inclusion hell.
16 */
17#define PREEMPT_NEED_RESCHED 0x80000000
18
19/*
20 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
21 * that think a non-zero value indicates we cannot preempt.
22 */
4a2b4b22
PZ
23static __always_inline int preempt_count(void)
24{
f27dde8d 25 return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
4a2b4b22
PZ
26}
27
28static __always_inline int *preempt_count_ptr(void)
29{
30 return &current_thread_info()->preempt_count;
31}
32
f27dde8d
PZ
33/*
34 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
35 * alternative is loosing a reschedule. Better schedule too often -- also this
36 * should be a very rare operation.
37 */
4a2b4b22
PZ
38static __always_inline void preempt_count_set(int pc)
39{
40 *preempt_count_ptr() = pc;
41}
42
f27dde8d
PZ
43/*
44 * We fold the NEED_RESCHED bit into the preempt count such that
45 * preempt_enable() can decrement and test for needing to reschedule with a
46 * single instruction.
47 *
48 * We invert the actual bit, so that when the decrement hits 0 we know we both
49 * need to resched (the bit is cleared) and can resched (no preempt count).
50 */
51
52static __always_inline void set_preempt_need_resched(void)
53{
54 *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
55}
56
57static __always_inline void clear_preempt_need_resched(void)
58{
59 *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
60}
61
62static __always_inline bool test_preempt_need_resched(void)
63{
64 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
65}
66
6cd8a4bb 67#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
ec701584
HH
68 extern void add_preempt_count(int val);
69 extern void sub_preempt_count(int val);
1da177e4 70#else
4a2b4b22
PZ
71# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0)
72# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0)
1da177e4
LT
73#endif
74
75#define inc_preempt_count() add_preempt_count(1)
76#define dec_preempt_count() sub_preempt_count(1)
77
1da177e4
LT
78#ifdef CONFIG_PREEMPT
79
80asmlinkage void preempt_schedule(void);
81
bdd4e85d
FW
82#define preempt_check_resched() \
83do { \
f27dde8d 84 if (unlikely(!*preempt_count_ptr())) \
bdd4e85d
FW
85 preempt_schedule(); \
86} while (0)
87
29bb9e5a
SR
88#ifdef CONFIG_CONTEXT_TRACKING
89
90void preempt_schedule_context(void);
91
92#define preempt_check_resched_context() \
93do { \
f27dde8d 94 if (unlikely(!*preempt_count_ptr())) \
29bb9e5a
SR
95 preempt_schedule_context(); \
96} while (0)
97#else
98
99#define preempt_check_resched_context() preempt_check_resched()
100
101#endif /* CONFIG_CONTEXT_TRACKING */
102
bdd4e85d
FW
103#else /* !CONFIG_PREEMPT */
104
105#define preempt_check_resched() do { } while (0)
29bb9e5a 106#define preempt_check_resched_context() do { } while (0)
bdd4e85d
FW
107
108#endif /* CONFIG_PREEMPT */
109
110
111#ifdef CONFIG_PREEMPT_COUNT
112
1da177e4
LT
113#define preempt_disable() \
114do { \
115 inc_preempt_count(); \
116 barrier(); \
117} while (0)
118
ba74c144 119#define sched_preempt_enable_no_resched() \
1da177e4
LT
120do { \
121 barrier(); \
122 dec_preempt_count(); \
123} while (0)
124
ba74c144
TG
125#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
126
1da177e4
LT
127#define preempt_enable() \
128do { \
129 preempt_enable_no_resched(); \
130 preempt_check_resched(); \
131} while (0)
132
50282528
SR
133/* For debugging and tracer internals only! */
134#define add_preempt_count_notrace(val) \
4a2b4b22 135 do { *preempt_count_ptr() += (val); } while (0)
50282528 136#define sub_preempt_count_notrace(val) \
4a2b4b22 137 do { *preempt_count_ptr() -= (val); } while (0)
50282528
SR
138#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
139#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
140
141#define preempt_disable_notrace() \
142do { \
143 inc_preempt_count_notrace(); \
144 barrier(); \
145} while (0)
146
147#define preempt_enable_no_resched_notrace() \
148do { \
149 barrier(); \
150 dec_preempt_count_notrace(); \
151} while (0)
152
153/* preempt_check_resched is OK to trace */
154#define preempt_enable_notrace() \
155do { \
156 preempt_enable_no_resched_notrace(); \
29bb9e5a 157 preempt_check_resched_context(); \
50282528
SR
158} while (0)
159
bdd4e85d 160#else /* !CONFIG_PREEMPT_COUNT */
1da177e4 161
386afc91
LT
162/*
163 * Even if we don't have any preemption, we need preempt disable/enable
164 * to be barriers, so that we don't have things like get_user/put_user
165 * that can cause faults and scheduling migrate into our preempt-protected
166 * region.
167 */
168#define preempt_disable() barrier()
169#define sched_preempt_enable_no_resched() barrier()
170#define preempt_enable_no_resched() barrier()
171#define preempt_enable() barrier()
172
173#define preempt_disable_notrace() barrier()
174#define preempt_enable_no_resched_notrace() barrier()
175#define preempt_enable_notrace() barrier()
50282528 176
bdd4e85d 177#endif /* CONFIG_PREEMPT_COUNT */
1da177e4 178
e107be36
AK
179#ifdef CONFIG_PREEMPT_NOTIFIERS
180
181struct preempt_notifier;
182
183/**
184 * preempt_ops - notifiers called when a task is preempted and rescheduled
185 * @sched_in: we're about to be rescheduled:
186 * notifier: struct preempt_notifier for the task being scheduled
187 * cpu: cpu we're scheduled on
188 * @sched_out: we've just been preempted
189 * notifier: struct preempt_notifier for the task being preempted
190 * next: the task that's kicking us out
8592e648
TH
191 *
192 * Please note that sched_in and out are called under different
193 * contexts. sched_out is called with rq lock held and irq disabled
194 * while sched_in is called without rq lock and irq enabled. This
195 * difference is intentional and depended upon by its users.
e107be36
AK
196 */
197struct preempt_ops {
198 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
199 void (*sched_out)(struct preempt_notifier *notifier,
200 struct task_struct *next);
201};
202
203/**
204 * preempt_notifier - key for installing preemption notifiers
205 * @link: internal use
206 * @ops: defines the notifier functions to be called
207 *
208 * Usually used in conjunction with container_of().
209 */
210struct preempt_notifier {
211 struct hlist_node link;
212 struct preempt_ops *ops;
213};
214
215void preempt_notifier_register(struct preempt_notifier *notifier);
216void preempt_notifier_unregister(struct preempt_notifier *notifier);
217
218static inline void preempt_notifier_init(struct preempt_notifier *notifier,
219 struct preempt_ops *ops)
220{
221 INIT_HLIST_NODE(&notifier->link);
222 notifier->ops = ops;
223}
224
225#endif
226
1da177e4 227#endif /* __LINUX_PREEMPT_H */