]>
Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
9b1d82fa PM |
17 | * |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
9b1d82fa PM |
25 | #ifndef __LINUX_TINY_H |
26 | #define __LINUX_TINY_H | |
27 | ||
28 | #include <linux/cache.h> | |
29 | ||
765a3f4f PM |
30 | static inline unsigned long get_state_synchronize_rcu(void) |
31 | { | |
32 | return 0; | |
33 | } | |
34 | ||
35 | static inline void cond_synchronize_rcu(unsigned long oldstate) | |
36 | { | |
37 | might_sleep(); | |
38 | } | |
39 | ||
2c42818e PM |
40 | static inline void rcu_barrier_bh(void) |
41 | { | |
42 | wait_rcu_gp(call_rcu_bh); | |
43 | } | |
44 | ||
45 | static inline void rcu_barrier_sched(void) | |
46 | { | |
47 | wait_rcu_gp(call_rcu_sched); | |
48 | } | |
49 | ||
a57eb940 | 50 | static inline void synchronize_rcu_expedited(void) |
bf66f18e | 51 | { |
a57eb940 | 52 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
bf66f18e PM |
53 | } |
54 | ||
a57eb940 | 55 | static inline void rcu_barrier(void) |
bf66f18e | 56 | { |
a57eb940 | 57 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
bf66f18e PM |
58 | } |
59 | ||
a57eb940 | 60 | static inline void synchronize_rcu_bh(void) |
9b1d82fa PM |
61 | { |
62 | synchronize_sched(); | |
63 | } | |
64 | ||
65 | static inline void synchronize_rcu_bh_expedited(void) | |
66 | { | |
67 | synchronize_sched(); | |
68 | } | |
69 | ||
7b27d547 LJ |
70 | static inline void synchronize_sched_expedited(void) |
71 | { | |
72 | synchronize_sched(); | |
73 | } | |
74 | ||
486e2593 PM |
75 | static inline void kfree_call_rcu(struct rcu_head *head, |
76 | void (*func)(struct rcu_head *rcu)) | |
77 | { | |
78 | call_rcu(head, func); | |
79 | } | |
80 | ||
38200cf2 | 81 | static inline void rcu_note_context_switch(void) |
a57eb940 | 82 | { |
284a8c93 | 83 | rcu_sched_qs(); |
a57eb940 PM |
84 | } |
85 | ||
29ce8310 GN |
86 | /* |
87 | * Take advantage of the fact that there is only one CPU, which | |
88 | * allows us to ignore virtualization-based context switches. | |
89 | */ | |
90 | static inline void rcu_virt_note_context_switch(int cpu) | |
91 | { | |
92 | } | |
93 | ||
a57eb940 PM |
94 | /* |
95 | * Return the number of grace periods. | |
96 | */ | |
97 | static inline long rcu_batches_completed(void) | |
98 | { | |
99 | return 0; | |
100 | } | |
101 | ||
102 | /* | |
103 | * Return the number of bottom-half grace periods. | |
104 | */ | |
105 | static inline long rcu_batches_completed_bh(void) | |
106 | { | |
107 | return 0; | |
108 | } | |
109 | ||
110 | static inline void rcu_force_quiescent_state(void) | |
111 | { | |
112 | } | |
113 | ||
114 | static inline void rcu_bh_force_quiescent_state(void) | |
115 | { | |
116 | } | |
117 | ||
118 | static inline void rcu_sched_force_quiescent_state(void) | |
119 | { | |
120 | } | |
121 | ||
afea227f PM |
122 | static inline void show_rcu_gp_kthreads(void) |
123 | { | |
124 | } | |
125 | ||
53d84e00 PM |
126 | static inline void rcu_cpu_stall_reset(void) |
127 | { | |
128 | } | |
129 | ||
2439b696 PM |
130 | static inline void exit_rcu(void) |
131 | { | |
132 | } | |
133 | ||
bbad9379 | 134 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
bbad9379 | 135 | extern int rcu_scheduler_active __read_mostly; |
584dc4ce | 136 | void rcu_scheduler_starting(void); |
bbad9379 | 137 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
bbad9379 PM |
138 | static inline void rcu_scheduler_starting(void) |
139 | { | |
140 | } | |
bbad9379 PM |
141 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
142 | ||
5c173eb8 | 143 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
cc6783f7 | 144 | |
5c173eb8 | 145 | static inline bool rcu_is_watching(void) |
cc6783f7 | 146 | { |
5c173eb8 | 147 | return __rcu_is_watching(); |
cc6783f7 PM |
148 | } |
149 | ||
5c173eb8 PM |
150 | #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
151 | ||
152 | static inline bool rcu_is_watching(void) | |
153 | { | |
154 | return true; | |
155 | } | |
156 | ||
157 | ||
158 | #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | |
cc6783f7 | 159 | |
9b1d82fa | 160 | #endif /* __LINUX_RCUTINY_H */ |