]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/rcutiny.h
srcu: Expedite srcu_schedule_cbs_snp() callback invocation
[mirror_ubuntu-bionic-kernel.git] / include / linux / rcutiny.h
CommitLineData
9b1d82fa
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
87de1cfd
PM
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
9b1d82fa
PM
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
4ce5b903 23 * Documentation/RCU
9b1d82fa 24 */
9b1d82fa
PM
25#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
02a5c550
PM
30struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
b8c17e66
PM
36static inline bool rcu_eqs_special_set(int cpu)
37{
38 return false; /* Never flag non-existent other CPUs! */
39}
40
765a3f4f
PM
41static inline unsigned long get_state_synchronize_rcu(void)
42{
43 return 0;
44}
45
46static inline void cond_synchronize_rcu(unsigned long oldstate)
47{
48 might_sleep();
49}
50
24560056
PM
51static inline unsigned long get_state_synchronize_sched(void)
52{
53 return 0;
54}
55
56static inline void cond_synchronize_sched(unsigned long oldstate)
57{
58 might_sleep();
59}
60
f9411ebe
IM
61extern void rcu_barrier_bh(void);
62extern void rcu_barrier_sched(void);
2c42818e 63
a57eb940 64static inline void synchronize_rcu_expedited(void)
bf66f18e 65{
a57eb940 66 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
bf66f18e
PM
67}
68
a57eb940 69static inline void rcu_barrier(void)
bf66f18e 70{
a57eb940 71 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
bf66f18e
PM
72}
73
a57eb940 74static inline void synchronize_rcu_bh(void)
9b1d82fa
PM
75{
76 synchronize_sched();
77}
78
79static inline void synchronize_rcu_bh_expedited(void)
80{
81 synchronize_sched();
82}
83
7b27d547
LJ
84static inline void synchronize_sched_expedited(void)
85{
86 synchronize_sched();
87}
88
486e2593 89static inline void kfree_call_rcu(struct rcu_head *head,
b6a4ae76 90 rcu_callback_t func)
486e2593
PM
91{
92 call_rcu(head, func);
93}
94
38200cf2 95static inline void rcu_note_context_switch(void)
a57eb940 96{
284a8c93 97 rcu_sched_qs();
a57eb940
PM
98}
99
29ce8310
GN
100/*
101 * Take advantage of the fact that there is only one CPU, which
102 * allows us to ignore virtualization-based context switches.
103 */
104static inline void rcu_virt_note_context_switch(int cpu)
105{
106}
107
a57eb940 108/*
917963d0 109 * Return the number of grace periods started.
a57eb940 110 */
917963d0 111static inline unsigned long rcu_batches_started(void)
a57eb940
PM
112{
113 return 0;
114}
115
116/*
917963d0 117 * Return the number of bottom-half grace periods started.
a57eb940 118 */
917963d0
PM
119static inline unsigned long rcu_batches_started_bh(void)
120{
121 return 0;
122}
123
124/*
125 * Return the number of sched grace periods started.
126 */
127static inline unsigned long rcu_batches_started_sched(void)
128{
129 return 0;
130}
131
132/*
133 * Return the number of grace periods completed.
a57eb940 134 */
9733e4f0 135static inline unsigned long rcu_batches_completed(void)
a57eb940
PM
136{
137 return 0;
138}
139
140/*
917963d0 141 * Return the number of bottom-half grace periods completed.
a57eb940 142 */
9733e4f0 143static inline unsigned long rcu_batches_completed_bh(void)
a57eb940
PM
144{
145 return 0;
146}
147
c1fe9cde 148/*
917963d0 149 * Return the number of sched grace periods completed.
c1fe9cde
PM
150 */
151static inline unsigned long rcu_batches_completed_sched(void)
a57eb940
PM
152{
153 return 0;
154}
155
291783b8
PM
156/*
157 * Return the number of expedited grace periods completed.
158 */
159static inline unsigned long rcu_exp_batches_completed(void)
160{
161 return 0;
162}
163
164/*
165 * Return the number of expedited sched grace periods completed.
166 */
167static inline unsigned long rcu_exp_batches_completed_sched(void)
168{
169 return 0;
170}
171
a57eb940
PM
172static inline void rcu_force_quiescent_state(void)
173{
174}
175
176static inline void rcu_bh_force_quiescent_state(void)
177{
178}
179
180static inline void rcu_sched_force_quiescent_state(void)
181{
182}
183
afea227f
PM
184static inline void show_rcu_gp_kthreads(void)
185{
186}
187
53d84e00
PM
188static inline void rcu_cpu_stall_reset(void)
189{
190}
191
51952bc6
PM
192static inline void rcu_idle_enter(void)
193{
194}
195
196static inline void rcu_idle_exit(void)
197{
198}
199
200static inline void rcu_irq_enter(void)
201{
202}
203
7c9906ca
PM
204static inline void rcu_irq_exit_irqson(void)
205{
206}
207
208static inline void rcu_irq_enter_irqson(void)
209{
210}
211
51952bc6
PM
212static inline void rcu_irq_exit(void)
213{
214}
215
2439b696
PM
216static inline void exit_rcu(void)
217{
218}
219
900b1028 220#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
bbad9379 221extern int rcu_scheduler_active __read_mostly;
584dc4ce 222void rcu_scheduler_starting(void);
900b1028 223#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
bbad9379
PM
224static inline void rcu_scheduler_starting(void)
225{
226}
900b1028 227#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
bbad9379 228
5c173eb8 229#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
cc6783f7 230
5c173eb8 231static inline bool rcu_is_watching(void)
cc6783f7 232{
5c173eb8 233 return __rcu_is_watching();
cc6783f7
PM
234}
235
5c173eb8
PM
236#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
237
238static inline bool rcu_is_watching(void)
239{
240 return true;
241}
242
5c173eb8 243#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
cc6783f7 244
5cd37193
PM
245static inline void rcu_all_qs(void)
246{
bb73c52b 247 barrier(); /* Avoid RCU read-side critical sections leaking across. */
5cd37193
PM
248}
249
4df83742
TG
250/* RCUtree hotplug events */
251#define rcutree_prepare_cpu NULL
252#define rcutree_online_cpu NULL
253#define rcutree_offline_cpu NULL
254#define rcutree_dead_cpu NULL
255#define rcutree_dying_cpu NULL
256
9b1d82fa 257#endif /* __LINUX_RCUTINY_H */