]>
Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
9b1d82fa PM |
17 | * |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
4ce5b903 IM |
25 | #include <linux/completion.h> |
26 | #include <linux/interrupt.h> | |
9b1d82fa | 27 | #include <linux/notifier.h> |
4ce5b903 IM |
28 | #include <linux/rcupdate.h> |
29 | #include <linux/kernel.h> | |
9984de1a | 30 | #include <linux/export.h> |
9b1d82fa | 31 | #include <linux/mutex.h> |
4ce5b903 IM |
32 | #include <linux/sched.h> |
33 | #include <linux/types.h> | |
34 | #include <linux/init.h> | |
9b1d82fa | 35 | #include <linux/time.h> |
4ce5b903 | 36 | #include <linux/cpu.h> |
268bb0ce | 37 | #include <linux/prefetch.h> |
0d752924 | 38 | #include <linux/ftrace_event.h> |
9b1d82fa | 39 | |
29c00b4a PM |
40 | #include "rcu.h" |
41 | ||
4102adab | 42 | /* Forward declarations for tiny_plugin.h. */ |
24278d14 | 43 | struct rcu_ctrlblk; |
965a002b PM |
44 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
45 | static void rcu_process_callbacks(struct softirq_action *unused); | |
a57eb940 PM |
46 | static void __call_rcu(struct rcu_head *head, |
47 | void (*func)(struct rcu_head *rcu), | |
48 | struct rcu_ctrlblk *rcp); | |
49 | ||
29e37d81 | 50 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
9b1d82fa | 51 | |
4102adab | 52 | #include "tiny_plugin.h" |
6bfc09e2 | 53 | |
fafb6e84 | 54 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */ |
818615c4 | 55 | static void rcu_idle_enter_common(long long newval) |
9b2e4f18 | 56 | { |
818615c4 | 57 | if (newval) { |
0d752924 | 58 | RCU_TRACE(trace_rcu_dyntick(TPS("--="), |
818615c4 PM |
59 | rcu_dynticks_nesting, newval)); |
60 | rcu_dynticks_nesting = newval; | |
9b2e4f18 PM |
61 | return; |
62 | } | |
0d752924 PM |
63 | RCU_TRACE(trace_rcu_dyntick(TPS("Start"), |
64 | rcu_dynticks_nesting, newval)); | |
ade98624 | 65 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { |
4102adab | 66 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
0989cb46 | 67 | |
0d752924 | 68 | RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), |
818615c4 | 69 | rcu_dynticks_nesting, newval)); |
9b2e4f18 | 70 | ftrace_dump(DUMP_ALL); |
0989cb46 PM |
71 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
72 | current->pid, current->comm, | |
73 | idle->pid, idle->comm); /* must be idle task! */ | |
9b2e4f18 | 74 | } |
284a8c93 | 75 | rcu_sched_qs(); /* implies rcu_bh_inc() */ |
818615c4 PM |
76 | barrier(); |
77 | rcu_dynticks_nesting = newval; | |
9b2e4f18 PM |
78 | } |
79 | ||
80 | /* | |
81 | * Enter idle, which is an extended quiescent state if we have fully | |
82 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). | |
83 | */ | |
84 | void rcu_idle_enter(void) | |
85 | { | |
86 | unsigned long flags; | |
818615c4 | 87 | long long newval; |
9b2e4f18 PM |
88 | |
89 | local_irq_save(flags); | |
29e37d81 PM |
90 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
91 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == | |
92 | DYNTICK_TASK_NEST_VALUE) | |
818615c4 | 93 | newval = 0; |
29e37d81 | 94 | else |
818615c4 PM |
95 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; |
96 | rcu_idle_enter_common(newval); | |
9b2e4f18 PM |
97 | local_irq_restore(flags); |
98 | } | |
8a2ecf47 | 99 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
9b2e4f18 PM |
100 | |
101 | /* | |
102 | * Exit an interrupt handler towards idle. | |
103 | */ | |
104 | void rcu_irq_exit(void) | |
105 | { | |
106 | unsigned long flags; | |
818615c4 | 107 | long long newval; |
9b2e4f18 PM |
108 | |
109 | local_irq_save(flags); | |
818615c4 PM |
110 | newval = rcu_dynticks_nesting - 1; |
111 | WARN_ON_ONCE(newval < 0); | |
112 | rcu_idle_enter_common(newval); | |
9b2e4f18 PM |
113 | local_irq_restore(flags); |
114 | } | |
b4270ee3 | 115 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
9b2e4f18 | 116 | |
fafb6e84 | 117 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */ |
9b2e4f18 PM |
118 | static void rcu_idle_exit_common(long long oldval) |
119 | { | |
120 | if (oldval) { | |
0d752924 | 121 | RCU_TRACE(trace_rcu_dyntick(TPS("++="), |
4145fa7f | 122 | oldval, rcu_dynticks_nesting)); |
9b2e4f18 PM |
123 | return; |
124 | } | |
0d752924 | 125 | RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); |
ade98624 | 126 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { |
4102adab | 127 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
0989cb46 | 128 | |
0d752924 | 129 | RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), |
4145fa7f | 130 | oldval, rcu_dynticks_nesting)); |
9b2e4f18 | 131 | ftrace_dump(DUMP_ALL); |
0989cb46 PM |
132 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
133 | current->pid, current->comm, | |
134 | idle->pid, idle->comm); /* must be idle task! */ | |
9b2e4f18 PM |
135 | } |
136 | } | |
9b1d82fa PM |
137 | |
138 | /* | |
9b2e4f18 | 139 | * Exit idle, so that we are no longer in an extended quiescent state. |
9b1d82fa | 140 | */ |
9b2e4f18 | 141 | void rcu_idle_exit(void) |
9b1d82fa | 142 | { |
9b2e4f18 PM |
143 | unsigned long flags; |
144 | long long oldval; | |
145 | ||
146 | local_irq_save(flags); | |
147 | oldval = rcu_dynticks_nesting; | |
29e37d81 PM |
148 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); |
149 | if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) | |
150 | rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | |
151 | else | |
152 | rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | |
9b2e4f18 PM |
153 | rcu_idle_exit_common(oldval); |
154 | local_irq_restore(flags); | |
9b1d82fa | 155 | } |
8a2ecf47 | 156 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
9b1d82fa PM |
157 | |
158 | /* | |
9b2e4f18 | 159 | * Enter an interrupt handler, moving away from idle. |
9b1d82fa | 160 | */ |
9b2e4f18 | 161 | void rcu_irq_enter(void) |
9b1d82fa | 162 | { |
9b2e4f18 PM |
163 | unsigned long flags; |
164 | long long oldval; | |
165 | ||
166 | local_irq_save(flags); | |
167 | oldval = rcu_dynticks_nesting; | |
9b1d82fa | 168 | rcu_dynticks_nesting++; |
9b2e4f18 PM |
169 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
170 | rcu_idle_exit_common(oldval); | |
171 | local_irq_restore(flags); | |
9b1d82fa | 172 | } |
b4270ee3 | 173 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
9b1d82fa | 174 | |
cc6783f7 | 175 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
9b2e4f18 PM |
176 | |
177 | /* | |
178 | * Test whether RCU thinks that the current CPU is idle. | |
179 | */ | |
9418fb20 | 180 | bool notrace __rcu_is_watching(void) |
9b2e4f18 | 181 | { |
5c173eb8 | 182 | return rcu_dynticks_nesting; |
9b2e4f18 | 183 | } |
5c173eb8 | 184 | EXPORT_SYMBOL(__rcu_is_watching); |
9b2e4f18 | 185 | |
cc6783f7 | 186 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
9b2e4f18 PM |
187 | |
188 | /* | |
189 | * Test whether the current CPU was interrupted from idle. Nested | |
190 | * interrupts don't count, we must be running at the first interrupt | |
191 | * level. | |
192 | */ | |
62e3cb14 | 193 | static int rcu_is_cpu_rrupt_from_idle(void) |
9b2e4f18 | 194 | { |
351573a8 | 195 | return rcu_dynticks_nesting <= 1; |
9b2e4f18 | 196 | } |
9b1d82fa PM |
197 | |
198 | /* | |
b554d7de ED |
199 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
200 | * Also irqs are disabled to avoid confusion due to interrupt handlers | |
4ce5b903 | 201 | * invoking call_rcu(). |
9b1d82fa PM |
202 | */ |
203 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |
204 | { | |
14961444 | 205 | RCU_TRACE(reset_cpu_stall_ticks(rcp)); |
9b1d82fa PM |
206 | if (rcp->rcucblist != NULL && |
207 | rcp->donetail != rcp->curtail) { | |
208 | rcp->donetail = rcp->curtail; | |
9b1d82fa PM |
209 | return 1; |
210 | } | |
4ce5b903 | 211 | |
9b1d82fa PM |
212 | return 0; |
213 | } | |
214 | ||
215 | /* | |
216 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | |
217 | * are at it, given that any rcu quiescent state is also an rcu_bh | |
218 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | |
219 | */ | |
284a8c93 | 220 | void rcu_sched_qs(void) |
9b1d82fa | 221 | { |
b554d7de ED |
222 | unsigned long flags; |
223 | ||
224 | local_irq_save(flags); | |
99652b54 PM |
225 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
226 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
9dc5ad32 | 227 | raise_softirq(RCU_SOFTIRQ); |
b554d7de | 228 | local_irq_restore(flags); |
9b1d82fa PM |
229 | } |
230 | ||
231 | /* | |
232 | * Record an rcu_bh quiescent state. | |
233 | */ | |
284a8c93 | 234 | void rcu_bh_qs(void) |
9b1d82fa | 235 | { |
b554d7de ED |
236 | unsigned long flags; |
237 | ||
238 | local_irq_save(flags); | |
9b1d82fa | 239 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
9dc5ad32 | 240 | raise_softirq(RCU_SOFTIRQ); |
b554d7de | 241 | local_irq_restore(flags); |
9b1d82fa PM |
242 | } |
243 | ||
244 | /* | |
245 | * Check to see if the scheduling-clock interrupt came from an extended | |
9b2e4f18 PM |
246 | * quiescent state, and, if so, tell RCU about it. This function must |
247 | * be called from hardirq context. It is normally called from the | |
248 | * scheduling-clock interrupt. | |
9b1d82fa PM |
249 | */ |
250 | void rcu_check_callbacks(int cpu, int user) | |
251 | { | |
14961444 | 252 | RCU_TRACE(check_cpu_stalls()); |
9b2e4f18 | 253 | if (user || rcu_is_cpu_rrupt_from_idle()) |
284a8c93 | 254 | rcu_sched_qs(); |
9b1d82fa | 255 | else if (!in_softirq()) |
284a8c93 | 256 | rcu_bh_qs(); |
8315f422 PM |
257 | if (user) |
258 | rcu_note_voluntary_context_switch(current); | |
9b1d82fa PM |
259 | } |
260 | ||
261 | /* | |
b2c0710c PM |
262 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
263 | * whose grace period has elapsed. | |
9b1d82fa | 264 | */ |
965a002b | 265 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
9b1d82fa | 266 | { |
e66c33d5 | 267 | const char *rn = NULL; |
9b1d82fa | 268 | struct rcu_head *next, *list; |
4ce5b903 | 269 | unsigned long flags; |
9e571a82 | 270 | RCU_TRACE(int cb_count = 0); |
9b1d82fa PM |
271 | |
272 | /* If no RCU callbacks ready to invoke, just return. */ | |
29c00b4a | 273 | if (&rcp->rcucblist == rcp->donetail) { |
486e2593 | 274 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); |
4968c300 | 275 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
15f5191b | 276 | !!ACCESS_ONCE(rcp->rcucblist), |
4968c300 PM |
277 | need_resched(), |
278 | is_idle_task(current), | |
9dc5ad32 | 279 | false)); |
9b1d82fa | 280 | return; |
29c00b4a | 281 | } |
9b1d82fa PM |
282 | |
283 | /* Move the ready-to-invoke callbacks to a local list. */ | |
284 | local_irq_save(flags); | |
486e2593 | 285 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); |
9b1d82fa PM |
286 | list = rcp->rcucblist; |
287 | rcp->rcucblist = *rcp->donetail; | |
288 | *rcp->donetail = NULL; | |
289 | if (rcp->curtail == rcp->donetail) | |
290 | rcp->curtail = &rcp->rcucblist; | |
291 | rcp->donetail = &rcp->rcucblist; | |
292 | local_irq_restore(flags); | |
293 | ||
294 | /* Invoke the callbacks on the local list. */ | |
d4c08f2a | 295 | RCU_TRACE(rn = rcp->name); |
9b1d82fa PM |
296 | while (list) { |
297 | next = list->next; | |
298 | prefetch(next); | |
551d55a9 | 299 | debug_rcu_head_unqueue(list); |
b2c0710c | 300 | local_bh_disable(); |
d4c08f2a | 301 | __rcu_reclaim(rn, list); |
b2c0710c | 302 | local_bh_enable(); |
9b1d82fa | 303 | list = next; |
9e571a82 | 304 | RCU_TRACE(cb_count++); |
9b1d82fa | 305 | } |
9e571a82 | 306 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
0d752924 PM |
307 | RCU_TRACE(trace_rcu_batch_end(rcp->name, |
308 | cb_count, 0, need_resched(), | |
4968c300 | 309 | is_idle_task(current), |
9dc5ad32 | 310 | false)); |
9b1d82fa PM |
311 | } |
312 | ||
965a002b | 313 | static void rcu_process_callbacks(struct softirq_action *unused) |
b2c0710c | 314 | { |
965a002b PM |
315 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
316 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | |
b2c0710c PM |
317 | } |
318 | ||
9b1d82fa PM |
319 | /* |
320 | * Wait for a grace period to elapse. But it is illegal to invoke | |
321 | * synchronize_sched() from within an RCU read-side critical section. | |
322 | * Therefore, any legal call to synchronize_sched() is a quiescent | |
323 | * state, and so on a UP system, synchronize_sched() need do nothing. | |
324 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | |
325 | * benefits of doing might_sleep() to reduce latency.) | |
326 | * | |
327 | * Cool, huh? (Due to Josh Triplett.) | |
328 | * | |
da848c47 PM |
329 | * But we want to make this a static inline later. The cond_resched() |
330 | * currently makes this problematic. | |
9b1d82fa PM |
331 | */ |
332 | void synchronize_sched(void) | |
333 | { | |
fe15d706 PM |
334 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
335 | !lock_is_held(&rcu_lock_map) && | |
336 | !lock_is_held(&rcu_sched_lock_map), | |
337 | "Illegal synchronize_sched() in RCU read-side critical section"); | |
9b1d82fa PM |
338 | cond_resched(); |
339 | } | |
340 | EXPORT_SYMBOL_GPL(synchronize_sched); | |
341 | ||
9b1d82fa PM |
342 | /* |
343 | * Helper function for call_rcu() and call_rcu_bh(). | |
344 | */ | |
345 | static void __call_rcu(struct rcu_head *head, | |
346 | void (*func)(struct rcu_head *rcu), | |
347 | struct rcu_ctrlblk *rcp) | |
348 | { | |
349 | unsigned long flags; | |
350 | ||
551d55a9 | 351 | debug_rcu_head_queue(head); |
9b1d82fa PM |
352 | head->func = func; |
353 | head->next = NULL; | |
4ce5b903 | 354 | |
9b1d82fa PM |
355 | local_irq_save(flags); |
356 | *rcp->curtail = head; | |
357 | rcp->curtail = &head->next; | |
9e571a82 | 358 | RCU_TRACE(rcp->qlen++); |
9b1d82fa PM |
359 | local_irq_restore(flags); |
360 | } | |
361 | ||
362 | /* | |
a57eb940 | 363 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
9b1d82fa PM |
364 | * period. But since we have but one CPU, that would be after any |
365 | * quiescent state. | |
366 | */ | |
a57eb940 | 367 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa | 368 | { |
99652b54 | 369 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
9b1d82fa | 370 | } |
a57eb940 | 371 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
9b1d82fa PM |
372 | |
373 | /* | |
374 | * Post an RCU bottom-half callback to be invoked after any subsequent | |
375 | * quiescent state. | |
376 | */ | |
4ce5b903 | 377 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
378 | { |
379 | __call_rcu(head, func, &rcu_bh_ctrlblk); | |
380 | } | |
381 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
9dc5ad32 PM |
382 | |
383 | void rcu_init(void) | |
384 | { | |
385 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
386 | } |