2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
25 #ifndef __LINUX_TINY_H
26 #define __LINUX_TINY_H
28 #include <linux/cache.h>
31 static inline int rcu_dynticks_snap(struct rcu_dynticks
*rdtp
)
36 static inline bool rcu_eqs_special_set(int cpu
)
38 return false; /* Never flag non-existent other CPUs! */
41 static inline unsigned long get_state_synchronize_rcu(void)
46 static inline void cond_synchronize_rcu(unsigned long oldstate
)
51 static inline unsigned long get_state_synchronize_sched(void)
56 static inline void cond_synchronize_sched(unsigned long oldstate
)
61 extern void rcu_barrier_bh(void);
62 extern void rcu_barrier_sched(void);
64 static inline void synchronize_rcu_expedited(void)
66 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
69 static inline void rcu_barrier(void)
71 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
74 static inline void synchronize_rcu_bh(void)
79 static inline void synchronize_rcu_bh_expedited(void)
84 static inline void synchronize_sched_expedited(void)
89 static inline void kfree_call_rcu(struct rcu_head
*head
,
95 #define rcu_note_context_switch(preempt) \
98 rcu_note_voluntary_context_switch_lite(current); \
102 * Take advantage of the fact that there is only one CPU, which
103 * allows us to ignore virtualization-based context switches.
105 static inline void rcu_virt_note_context_switch(int cpu
)
110 * Return the number of grace periods started.
112 static inline unsigned long rcu_batches_started(void)
118 * Return the number of bottom-half grace periods started.
120 static inline unsigned long rcu_batches_started_bh(void)
126 * Return the number of sched grace periods started.
128 static inline unsigned long rcu_batches_started_sched(void)
134 * Return the number of grace periods completed.
136 static inline unsigned long rcu_batches_completed(void)
142 * Return the number of bottom-half grace periods completed.
144 static inline unsigned long rcu_batches_completed_bh(void)
150 * Return the number of sched grace periods completed.
152 static inline unsigned long rcu_batches_completed_sched(void)
158 * Return the number of expedited grace periods completed.
160 static inline unsigned long rcu_exp_batches_completed(void)
166 * Return the number of expedited sched grace periods completed.
168 static inline unsigned long rcu_exp_batches_completed_sched(void)
173 static inline void rcu_force_quiescent_state(void)
177 static inline void rcu_bh_force_quiescent_state(void)
181 static inline void rcu_sched_force_quiescent_state(void)
185 static inline void show_rcu_gp_kthreads(void)
189 static inline void rcu_cpu_stall_reset(void)
193 static inline void rcu_idle_enter(void)
197 static inline void rcu_idle_exit(void)
201 static inline void rcu_irq_enter(void)
205 static inline void rcu_irq_exit_irqson(void)
209 static inline void rcu_irq_enter_irqson(void)
213 static inline void rcu_irq_exit(void)
217 static inline void exit_rcu(void)
221 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
222 extern int rcu_scheduler_active __read_mostly
;
223 void rcu_scheduler_starting(void);
224 #else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
225 static inline void rcu_scheduler_starting(void)
228 #endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
230 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
232 static inline bool rcu_is_watching(void)
234 return __rcu_is_watching();
237 #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
239 static inline bool rcu_is_watching(void)
244 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
246 static inline void rcu_request_urgent_qs_task(struct task_struct
*t
)
250 static inline void rcu_all_qs(void)
252 barrier(); /* Avoid RCU read-side critical sections leaking across. */
255 /* RCUtree hotplug events */
256 #define rcutree_prepare_cpu NULL
257 #define rcutree_online_cpu NULL
258 #define rcutree_offline_cpu NULL
259 #define rcutree_dead_cpu NULL
260 #define rcutree_dying_cpu NULL
262 #endif /* __LINUX_RCUTINY_H */