]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/preempt.h
locking: Optimize lock_bh functions
[mirror_ubuntu-bionic-kernel.git] / include / linux / preempt.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
1da177e4 9#include <linux/linkage.h>
e107be36 10#include <linux/list.h>
1da177e4 11
f27dde8d
PZ
12/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
a7878709 18#include <asm/preempt.h>
f27dde8d 19
6cd8a4bb 20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
bdb43806
PZ
21extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
1da177e4 24#else
bdb43806
PZ
25#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
1da177e4
LT
28#endif
29
bdb43806
PZ
30#define __preempt_count_inc() __preempt_count_add(1)
31#define __preempt_count_dec() __preempt_count_sub(1)
bdd4e85d 32
bdb43806
PZ
33#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
bdd4e85d
FW
35
36#ifdef CONFIG_PREEMPT_COUNT
37
1da177e4
LT
38#define preempt_disable() \
39do { \
bdb43806 40 preempt_count_inc(); \
1da177e4
LT
41 barrier(); \
42} while (0)
43
ba74c144 44#define sched_preempt_enable_no_resched() \
1da177e4
LT
45do { \
46 barrier(); \
bdb43806 47 preempt_count_dec(); \
1da177e4
LT
48} while (0)
49
bdb43806 50#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
ba74c144 51
bdb43806 52#ifdef CONFIG_PREEMPT
1da177e4
LT
53#define preempt_enable() \
54do { \
bdb43806
PZ
55 barrier(); \
56 if (unlikely(preempt_count_dec_and_test())) \
1a338ac3 57 __preempt_schedule(); \
1da177e4
LT
58} while (0)
59
bdb43806
PZ
60#define preempt_check_resched() \
61do { \
62 if (should_resched()) \
1a338ac3 63 __preempt_schedule(); \
bdb43806
PZ
64} while (0)
65
66#else
67#define preempt_enable() preempt_enable_no_resched()
68#define preempt_check_resched() do { } while (0)
69#endif
50282528
SR
70
71#define preempt_disable_notrace() \
72do { \
bdb43806 73 __preempt_count_inc(); \
50282528
SR
74 barrier(); \
75} while (0)
76
77#define preempt_enable_no_resched_notrace() \
78do { \
79 barrier(); \
bdb43806 80 __preempt_count_dec(); \
50282528
SR
81} while (0)
82
bdb43806
PZ
83#ifdef CONFIG_PREEMPT
84
1a338ac3
PZ
85#ifndef CONFIG_CONTEXT_TRACKING
86#define __preempt_schedule_context() __preempt_schedule()
bdb43806
PZ
87#endif
88
50282528
SR
89#define preempt_enable_notrace() \
90do { \
bdb43806
PZ
91 barrier(); \
92 if (unlikely(__preempt_count_dec_and_test())) \
1a338ac3 93 __preempt_schedule_context(); \
50282528 94} while (0)
bdb43806
PZ
95#else
96#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
97#endif
50282528 98
bdd4e85d 99#else /* !CONFIG_PREEMPT_COUNT */
1da177e4 100
386afc91
LT
101/*
102 * Even if we don't have any preemption, we need preempt disable/enable
103 * to be barriers, so that we don't have things like get_user/put_user
104 * that can cause faults and scheduling migrate into our preempt-protected
105 * region.
106 */
bdb43806 107#define preempt_disable() barrier()
386afc91 108#define sched_preempt_enable_no_resched() barrier()
bdb43806
PZ
109#define preempt_enable_no_resched() barrier()
110#define preempt_enable() barrier()
111#define preempt_check_resched() do { } while (0)
386afc91
LT
112
113#define preempt_disable_notrace() barrier()
114#define preempt_enable_no_resched_notrace() barrier()
115#define preempt_enable_notrace() barrier()
50282528 116
bdd4e85d 117#endif /* CONFIG_PREEMPT_COUNT */
1da177e4 118
e107be36
AK
119#ifdef CONFIG_PREEMPT_NOTIFIERS
120
121struct preempt_notifier;
122
123/**
124 * preempt_ops - notifiers called when a task is preempted and rescheduled
125 * @sched_in: we're about to be rescheduled:
126 * notifier: struct preempt_notifier for the task being scheduled
127 * cpu: cpu we're scheduled on
128 * @sched_out: we've just been preempted
129 * notifier: struct preempt_notifier for the task being preempted
130 * next: the task that's kicking us out
8592e648
TH
131 *
132 * Please note that sched_in and out are called under different
133 * contexts. sched_out is called with rq lock held and irq disabled
134 * while sched_in is called without rq lock and irq enabled. This
135 * difference is intentional and depended upon by its users.
e107be36
AK
136 */
137struct preempt_ops {
138 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
139 void (*sched_out)(struct preempt_notifier *notifier,
140 struct task_struct *next);
141};
142
143/**
144 * preempt_notifier - key for installing preemption notifiers
145 * @link: internal use
146 * @ops: defines the notifier functions to be called
147 *
148 * Usually used in conjunction with container_of().
149 */
150struct preempt_notifier {
151 struct hlist_node link;
152 struct preempt_ops *ops;
153};
154
155void preempt_notifier_register(struct preempt_notifier *notifier);
156void preempt_notifier_unregister(struct preempt_notifier *notifier);
157
158static inline void preempt_notifier_init(struct preempt_notifier *notifier,
159 struct preempt_ops *ops)
160{
161 INIT_HLIST_NODE(&notifier->link);
162 notifier->ops = ops;
163}
164
165#endif
166
1da177e4 167#endif /* __LINUX_PREEMPT_H */