]>
Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
9b1d82fa PM |
17 | * |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
4ce5b903 IM |
25 | #include <linux/completion.h> |
26 | #include <linux/interrupt.h> | |
9b1d82fa | 27 | #include <linux/notifier.h> |
4ce5b903 IM |
28 | #include <linux/rcupdate.h> |
29 | #include <linux/kernel.h> | |
9984de1a | 30 | #include <linux/export.h> |
9b1d82fa | 31 | #include <linux/mutex.h> |
4ce5b903 IM |
32 | #include <linux/sched.h> |
33 | #include <linux/types.h> | |
34 | #include <linux/init.h> | |
9b1d82fa | 35 | #include <linux/time.h> |
4ce5b903 | 36 | #include <linux/cpu.h> |
268bb0ce | 37 | #include <linux/prefetch.h> |
0d752924 | 38 | #include <linux/ftrace_event.h> |
9b1d82fa | 39 | |
29c00b4a PM |
40 | #include "rcu.h" |
41 | ||
4102adab | 42 | /* Forward declarations for tiny_plugin.h. */ |
24278d14 | 43 | struct rcu_ctrlblk; |
965a002b PM |
44 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
45 | static void rcu_process_callbacks(struct softirq_action *unused); | |
a57eb940 PM |
46 | static void __call_rcu(struct rcu_head *head, |
47 | void (*func)(struct rcu_head *rcu), | |
48 | struct rcu_ctrlblk *rcp); | |
49 | ||
4102adab | 50 | #include "tiny_plugin.h" |
6bfc09e2 | 51 | |
9b2e4f18 PM |
52 | /* |
53 | * Enter idle, which is an extended quiescent state if we have fully | |
5f6130fa | 54 | * entered that mode. |
9b2e4f18 PM |
55 | */ |
56 | void rcu_idle_enter(void) | |
57 | { | |
9b2e4f18 | 58 | } |
8a2ecf47 | 59 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
9b2e4f18 PM |
60 | |
61 | /* | |
62 | * Exit an interrupt handler towards idle. | |
63 | */ | |
64 | void rcu_irq_exit(void) | |
65 | { | |
9b2e4f18 | 66 | } |
b4270ee3 | 67 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
9b2e4f18 | 68 | |
9b1d82fa | 69 | /* |
9b2e4f18 | 70 | * Exit idle, so that we are no longer in an extended quiescent state. |
9b1d82fa | 71 | */ |
9b2e4f18 | 72 | void rcu_idle_exit(void) |
9b1d82fa | 73 | { |
9b1d82fa | 74 | } |
8a2ecf47 | 75 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
9b1d82fa PM |
76 | |
77 | /* | |
9b2e4f18 | 78 | * Enter an interrupt handler, moving away from idle. |
9b1d82fa | 79 | */ |
9b2e4f18 | 80 | void rcu_irq_enter(void) |
9b1d82fa | 81 | { |
9b1d82fa | 82 | } |
b4270ee3 | 83 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
9b1d82fa | 84 | |
cc6783f7 | 85 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
9b2e4f18 PM |
86 | |
87 | /* | |
88 | * Test whether RCU thinks that the current CPU is idle. | |
89 | */ | |
9418fb20 | 90 | bool notrace __rcu_is_watching(void) |
9b2e4f18 | 91 | { |
5f6130fa | 92 | return true; |
9b2e4f18 | 93 | } |
5c173eb8 | 94 | EXPORT_SYMBOL(__rcu_is_watching); |
9b2e4f18 | 95 | |
cc6783f7 | 96 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
9b2e4f18 | 97 | |
9b1d82fa | 98 | /* |
b554d7de ED |
99 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
100 | * Also irqs are disabled to avoid confusion due to interrupt handlers | |
4ce5b903 | 101 | * invoking call_rcu(). |
9b1d82fa PM |
102 | */ |
103 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |
104 | { | |
14961444 | 105 | RCU_TRACE(reset_cpu_stall_ticks(rcp)); |
9b1d82fa PM |
106 | if (rcp->rcucblist != NULL && |
107 | rcp->donetail != rcp->curtail) { | |
108 | rcp->donetail = rcp->curtail; | |
9b1d82fa PM |
109 | return 1; |
110 | } | |
4ce5b903 | 111 | |
9b1d82fa PM |
112 | return 0; |
113 | } | |
114 | ||
115 | /* | |
116 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | |
117 | * are at it, given that any rcu quiescent state is also an rcu_bh | |
118 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | |
119 | */ | |
284a8c93 | 120 | void rcu_sched_qs(void) |
9b1d82fa | 121 | { |
b554d7de ED |
122 | unsigned long flags; |
123 | ||
124 | local_irq_save(flags); | |
99652b54 PM |
125 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
126 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
9dc5ad32 | 127 | raise_softirq(RCU_SOFTIRQ); |
b554d7de | 128 | local_irq_restore(flags); |
9b1d82fa PM |
129 | } |
130 | ||
131 | /* | |
132 | * Record an rcu_bh quiescent state. | |
133 | */ | |
284a8c93 | 134 | void rcu_bh_qs(void) |
9b1d82fa | 135 | { |
b554d7de ED |
136 | unsigned long flags; |
137 | ||
138 | local_irq_save(flags); | |
9b1d82fa | 139 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
9dc5ad32 | 140 | raise_softirq(RCU_SOFTIRQ); |
b554d7de | 141 | local_irq_restore(flags); |
9b1d82fa PM |
142 | } |
143 | ||
144 | /* | |
145 | * Check to see if the scheduling-clock interrupt came from an extended | |
9b2e4f18 PM |
146 | * quiescent state, and, if so, tell RCU about it. This function must |
147 | * be called from hardirq context. It is normally called from the | |
148 | * scheduling-clock interrupt. | |
9b1d82fa | 149 | */ |
c3377c2d | 150 | void rcu_check_callbacks(int user) |
9b1d82fa | 151 | { |
14961444 | 152 | RCU_TRACE(check_cpu_stalls()); |
ca9558a3 | 153 | if (user) |
284a8c93 | 154 | rcu_sched_qs(); |
9b1d82fa | 155 | else if (!in_softirq()) |
284a8c93 | 156 | rcu_bh_qs(); |
8315f422 PM |
157 | if (user) |
158 | rcu_note_voluntary_context_switch(current); | |
9b1d82fa PM |
159 | } |
160 | ||
161 | /* | |
b2c0710c PM |
162 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
163 | * whose grace period has elapsed. | |
9b1d82fa | 164 | */ |
965a002b | 165 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
9b1d82fa | 166 | { |
e66c33d5 | 167 | const char *rn = NULL; |
9b1d82fa | 168 | struct rcu_head *next, *list; |
4ce5b903 | 169 | unsigned long flags; |
9e571a82 | 170 | RCU_TRACE(int cb_count = 0); |
9b1d82fa PM |
171 | |
172 | /* If no RCU callbacks ready to invoke, just return. */ | |
29c00b4a | 173 | if (&rcp->rcucblist == rcp->donetail) { |
486e2593 | 174 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); |
4968c300 | 175 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
15f5191b | 176 | !!ACCESS_ONCE(rcp->rcucblist), |
4968c300 PM |
177 | need_resched(), |
178 | is_idle_task(current), | |
9dc5ad32 | 179 | false)); |
9b1d82fa | 180 | return; |
29c00b4a | 181 | } |
9b1d82fa PM |
182 | |
183 | /* Move the ready-to-invoke callbacks to a local list. */ | |
184 | local_irq_save(flags); | |
486e2593 | 185 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); |
9b1d82fa PM |
186 | list = rcp->rcucblist; |
187 | rcp->rcucblist = *rcp->donetail; | |
188 | *rcp->donetail = NULL; | |
189 | if (rcp->curtail == rcp->donetail) | |
190 | rcp->curtail = &rcp->rcucblist; | |
191 | rcp->donetail = &rcp->rcucblist; | |
192 | local_irq_restore(flags); | |
193 | ||
194 | /* Invoke the callbacks on the local list. */ | |
d4c08f2a | 195 | RCU_TRACE(rn = rcp->name); |
9b1d82fa PM |
196 | while (list) { |
197 | next = list->next; | |
198 | prefetch(next); | |
551d55a9 | 199 | debug_rcu_head_unqueue(list); |
b2c0710c | 200 | local_bh_disable(); |
d4c08f2a | 201 | __rcu_reclaim(rn, list); |
b2c0710c | 202 | local_bh_enable(); |
9b1d82fa | 203 | list = next; |
9e571a82 | 204 | RCU_TRACE(cb_count++); |
9b1d82fa | 205 | } |
9e571a82 | 206 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
0d752924 PM |
207 | RCU_TRACE(trace_rcu_batch_end(rcp->name, |
208 | cb_count, 0, need_resched(), | |
4968c300 | 209 | is_idle_task(current), |
9dc5ad32 | 210 | false)); |
9b1d82fa PM |
211 | } |
212 | ||
965a002b | 213 | static void rcu_process_callbacks(struct softirq_action *unused) |
b2c0710c | 214 | { |
965a002b PM |
215 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
216 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | |
b2c0710c PM |
217 | } |
218 | ||
9b1d82fa PM |
219 | /* |
220 | * Wait for a grace period to elapse. But it is illegal to invoke | |
221 | * synchronize_sched() from within an RCU read-side critical section. | |
222 | * Therefore, any legal call to synchronize_sched() is a quiescent | |
223 | * state, and so on a UP system, synchronize_sched() need do nothing. | |
224 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | |
225 | * benefits of doing might_sleep() to reduce latency.) | |
226 | * | |
227 | * Cool, huh? (Due to Josh Triplett.) | |
228 | * | |
da848c47 PM |
229 | * But we want to make this a static inline later. The cond_resched() |
230 | * currently makes this problematic. | |
9b1d82fa PM |
231 | */ |
232 | void synchronize_sched(void) | |
233 | { | |
fe15d706 PM |
234 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
235 | !lock_is_held(&rcu_lock_map) && | |
236 | !lock_is_held(&rcu_sched_lock_map), | |
237 | "Illegal synchronize_sched() in RCU read-side critical section"); | |
9b1d82fa PM |
238 | cond_resched(); |
239 | } | |
240 | EXPORT_SYMBOL_GPL(synchronize_sched); | |
241 | ||
9b1d82fa PM |
242 | /* |
243 | * Helper function for call_rcu() and call_rcu_bh(). | |
244 | */ | |
245 | static void __call_rcu(struct rcu_head *head, | |
246 | void (*func)(struct rcu_head *rcu), | |
247 | struct rcu_ctrlblk *rcp) | |
248 | { | |
249 | unsigned long flags; | |
250 | ||
551d55a9 | 251 | debug_rcu_head_queue(head); |
9b1d82fa PM |
252 | head->func = func; |
253 | head->next = NULL; | |
4ce5b903 | 254 | |
9b1d82fa PM |
255 | local_irq_save(flags); |
256 | *rcp->curtail = head; | |
257 | rcp->curtail = &head->next; | |
9e571a82 | 258 | RCU_TRACE(rcp->qlen++); |
9b1d82fa | 259 | local_irq_restore(flags); |
5f6130fa LJ |
260 | |
261 | if (unlikely(is_idle_task(current))) { | |
262 | /* force scheduling for rcu_sched_qs() */ | |
263 | resched_cpu(0); | |
264 | } | |
9b1d82fa PM |
265 | } |
266 | ||
267 | /* | |
a57eb940 | 268 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
9b1d82fa PM |
269 | * period. But since we have but one CPU, that would be after any |
270 | * quiescent state. | |
271 | */ | |
a57eb940 | 272 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa | 273 | { |
99652b54 | 274 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
9b1d82fa | 275 | } |
a57eb940 | 276 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
9b1d82fa PM |
277 | |
278 | /* | |
279 | * Post an RCU bottom-half callback to be invoked after any subsequent | |
280 | * quiescent state. | |
281 | */ | |
4ce5b903 | 282 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
283 | { |
284 | __call_rcu(head, func, &rcu_bh_ctrlblk); | |
285 | } | |
286 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
9dc5ad32 | 287 | |
aa23c6fb | 288 | void __init rcu_init(void) |
9dc5ad32 PM |
289 | { |
290 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
630181c4 PM |
291 | RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk)); |
292 | RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk)); | |
aa23c6fb PK |
293 | |
294 | rcu_early_boot_tests(); | |
9dc5ad32 | 295 | } |