]>
Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
4ce5b903 IM |
25 | #include <linux/completion.h> |
26 | #include <linux/interrupt.h> | |
9b1d82fa | 27 | #include <linux/notifier.h> |
4ce5b903 IM |
28 | #include <linux/rcupdate.h> |
29 | #include <linux/kernel.h> | |
9984de1a | 30 | #include <linux/export.h> |
9b1d82fa | 31 | #include <linux/mutex.h> |
4ce5b903 IM |
32 | #include <linux/sched.h> |
33 | #include <linux/types.h> | |
34 | #include <linux/init.h> | |
9b1d82fa | 35 | #include <linux/time.h> |
4ce5b903 | 36 | #include <linux/cpu.h> |
268bb0ce | 37 | #include <linux/prefetch.h> |
0d752924 | 38 | #include <linux/ftrace_event.h> |
9b1d82fa | 39 | |
29c00b4a | 40 | #ifdef CONFIG_RCU_TRACE |
29c00b4a | 41 | #include <trace/events/rcu.h> |
29c00b4a PM |
42 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
43 | ||
44 | #include "rcu.h" | |
45 | ||
4102adab | 46 | /* Forward declarations for tiny_plugin.h. */ |
24278d14 | 47 | struct rcu_ctrlblk; |
965a002b PM |
48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
49 | static void rcu_process_callbacks(struct softirq_action *unused); | |
a57eb940 PM |
50 | static void __call_rcu(struct rcu_head *head, |
51 | void (*func)(struct rcu_head *rcu), | |
52 | struct rcu_ctrlblk *rcp); | |
53 | ||
29e37d81 | 54 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
9b1d82fa | 55 | |
4102adab | 56 | #include "tiny_plugin.h" |
6bfc09e2 | 57 | |
9b2e4f18 | 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
818615c4 | 59 | static void rcu_idle_enter_common(long long newval) |
9b2e4f18 | 60 | { |
818615c4 | 61 | if (newval) { |
0d752924 | 62 | RCU_TRACE(trace_rcu_dyntick(TPS("--="), |
818615c4 PM |
63 | rcu_dynticks_nesting, newval)); |
64 | rcu_dynticks_nesting = newval; | |
9b2e4f18 PM |
65 | return; |
66 | } | |
0d752924 PM |
67 | RCU_TRACE(trace_rcu_dyntick(TPS("Start"), |
68 | rcu_dynticks_nesting, newval)); | |
99745b6a | 69 | if (!is_idle_task(current)) { |
4102adab | 70 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
0989cb46 | 71 | |
0d752924 | 72 | RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), |
818615c4 | 73 | rcu_dynticks_nesting, newval)); |
9b2e4f18 | 74 | ftrace_dump(DUMP_ALL); |
0989cb46 PM |
75 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
76 | current->pid, current->comm, | |
77 | idle->pid, idle->comm); /* must be idle task! */ | |
9b2e4f18 | 78 | } |
22a76726 | 79 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
818615c4 PM |
80 | barrier(); |
81 | rcu_dynticks_nesting = newval; | |
9b2e4f18 PM |
82 | } |
83 | ||
84 | /* | |
85 | * Enter idle, which is an extended quiescent state if we have fully | |
86 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). | |
87 | */ | |
88 | void rcu_idle_enter(void) | |
89 | { | |
90 | unsigned long flags; | |
818615c4 | 91 | long long newval; |
9b2e4f18 PM |
92 | |
93 | local_irq_save(flags); | |
29e37d81 PM |
94 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
95 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == | |
96 | DYNTICK_TASK_NEST_VALUE) | |
818615c4 | 97 | newval = 0; |
29e37d81 | 98 | else |
818615c4 PM |
99 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; |
100 | rcu_idle_enter_common(newval); | |
9b2e4f18 PM |
101 | local_irq_restore(flags); |
102 | } | |
8a2ecf47 | 103 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
9b2e4f18 PM |
104 | |
105 | /* | |
106 | * Exit an interrupt handler towards idle. | |
107 | */ | |
108 | void rcu_irq_exit(void) | |
109 | { | |
110 | unsigned long flags; | |
818615c4 | 111 | long long newval; |
9b2e4f18 PM |
112 | |
113 | local_irq_save(flags); | |
818615c4 PM |
114 | newval = rcu_dynticks_nesting - 1; |
115 | WARN_ON_ONCE(newval < 0); | |
116 | rcu_idle_enter_common(newval); | |
9b2e4f18 PM |
117 | local_irq_restore(flags); |
118 | } | |
b4270ee3 | 119 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
9b2e4f18 PM |
120 | |
121 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ | |
122 | static void rcu_idle_exit_common(long long oldval) | |
123 | { | |
124 | if (oldval) { | |
0d752924 | 125 | RCU_TRACE(trace_rcu_dyntick(TPS("++="), |
4145fa7f | 126 | oldval, rcu_dynticks_nesting)); |
9b2e4f18 PM |
127 | return; |
128 | } | |
0d752924 | 129 | RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); |
99745b6a | 130 | if (!is_idle_task(current)) { |
4102adab | 131 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
0989cb46 | 132 | |
0d752924 | 133 | RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), |
4145fa7f | 134 | oldval, rcu_dynticks_nesting)); |
9b2e4f18 | 135 | ftrace_dump(DUMP_ALL); |
0989cb46 PM |
136 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
137 | current->pid, current->comm, | |
138 | idle->pid, idle->comm); /* must be idle task! */ | |
9b2e4f18 PM |
139 | } |
140 | } | |
9b1d82fa PM |
141 | |
142 | /* | |
9b2e4f18 | 143 | * Exit idle, so that we are no longer in an extended quiescent state. |
9b1d82fa | 144 | */ |
9b2e4f18 | 145 | void rcu_idle_exit(void) |
9b1d82fa | 146 | { |
9b2e4f18 PM |
147 | unsigned long flags; |
148 | long long oldval; | |
149 | ||
150 | local_irq_save(flags); | |
151 | oldval = rcu_dynticks_nesting; | |
29e37d81 PM |
152 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); |
153 | if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) | |
154 | rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | |
155 | else | |
156 | rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | |
9b2e4f18 PM |
157 | rcu_idle_exit_common(oldval); |
158 | local_irq_restore(flags); | |
9b1d82fa | 159 | } |
8a2ecf47 | 160 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
9b1d82fa PM |
161 | |
162 | /* | |
9b2e4f18 | 163 | * Enter an interrupt handler, moving away from idle. |
9b1d82fa | 164 | */ |
9b2e4f18 | 165 | void rcu_irq_enter(void) |
9b1d82fa | 166 | { |
9b2e4f18 PM |
167 | unsigned long flags; |
168 | long long oldval; | |
169 | ||
170 | local_irq_save(flags); | |
171 | oldval = rcu_dynticks_nesting; | |
9b1d82fa | 172 | rcu_dynticks_nesting++; |
9b2e4f18 PM |
173 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
174 | rcu_idle_exit_common(oldval); | |
175 | local_irq_restore(flags); | |
9b1d82fa | 176 | } |
b4270ee3 | 177 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
9b1d82fa | 178 | |
cc6783f7 | 179 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
9b2e4f18 PM |
180 | |
181 | /* | |
182 | * Test whether RCU thinks that the current CPU is idle. | |
183 | */ | |
9418fb20 | 184 | bool notrace __rcu_is_watching(void) |
9b2e4f18 | 185 | { |
5c173eb8 | 186 | return rcu_dynticks_nesting; |
9b2e4f18 | 187 | } |
5c173eb8 | 188 | EXPORT_SYMBOL(__rcu_is_watching); |
9b2e4f18 | 189 | |
cc6783f7 | 190 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
9b2e4f18 PM |
191 | |
192 | /* | |
193 | * Test whether the current CPU was interrupted from idle. Nested | |
194 | * interrupts don't count, we must be running at the first interrupt | |
195 | * level. | |
196 | */ | |
62e3cb14 | 197 | static int rcu_is_cpu_rrupt_from_idle(void) |
9b2e4f18 | 198 | { |
351573a8 | 199 | return rcu_dynticks_nesting <= 1; |
9b2e4f18 | 200 | } |
9b1d82fa PM |
201 | |
202 | /* | |
b554d7de ED |
203 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
204 | * Also irqs are disabled to avoid confusion due to interrupt handlers | |
4ce5b903 | 205 | * invoking call_rcu(). |
9b1d82fa PM |
206 | */ |
207 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |
208 | { | |
14961444 | 209 | RCU_TRACE(reset_cpu_stall_ticks(rcp)); |
9b1d82fa PM |
210 | if (rcp->rcucblist != NULL && |
211 | rcp->donetail != rcp->curtail) { | |
212 | rcp->donetail = rcp->curtail; | |
9b1d82fa PM |
213 | return 1; |
214 | } | |
4ce5b903 | 215 | |
9b1d82fa PM |
216 | return 0; |
217 | } | |
218 | ||
219 | /* | |
220 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | |
221 | * are at it, given that any rcu quiescent state is also an rcu_bh | |
222 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | |
223 | */ | |
224 | void rcu_sched_qs(int cpu) | |
225 | { | |
b554d7de ED |
226 | unsigned long flags; |
227 | ||
228 | local_irq_save(flags); | |
99652b54 PM |
229 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
230 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
9dc5ad32 | 231 | raise_softirq(RCU_SOFTIRQ); |
b554d7de | 232 | local_irq_restore(flags); |
9b1d82fa PM |
233 | } |
234 | ||
235 | /* | |
236 | * Record an rcu_bh quiescent state. | |
237 | */ | |
238 | void rcu_bh_qs(int cpu) | |
239 | { | |
b554d7de ED |
240 | unsigned long flags; |
241 | ||
242 | local_irq_save(flags); | |
9b1d82fa | 243 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
9dc5ad32 | 244 | raise_softirq(RCU_SOFTIRQ); |
b554d7de | 245 | local_irq_restore(flags); |
9b1d82fa PM |
246 | } |
247 | ||
248 | /* | |
249 | * Check to see if the scheduling-clock interrupt came from an extended | |
9b2e4f18 PM |
250 | * quiescent state, and, if so, tell RCU about it. This function must |
251 | * be called from hardirq context. It is normally called from the | |
252 | * scheduling-clock interrupt. | |
9b1d82fa PM |
253 | */ |
254 | void rcu_check_callbacks(int cpu, int user) | |
255 | { | |
14961444 | 256 | RCU_TRACE(check_cpu_stalls()); |
9b2e4f18 | 257 | if (user || rcu_is_cpu_rrupt_from_idle()) |
9b1d82fa PM |
258 | rcu_sched_qs(cpu); |
259 | else if (!in_softirq()) | |
260 | rcu_bh_qs(cpu); | |
261 | } | |
262 | ||
263 | /* | |
b2c0710c PM |
264 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
265 | * whose grace period has elapsed. | |
9b1d82fa | 266 | */ |
965a002b | 267 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
9b1d82fa | 268 | { |
e66c33d5 | 269 | const char *rn = NULL; |
9b1d82fa | 270 | struct rcu_head *next, *list; |
4ce5b903 | 271 | unsigned long flags; |
9e571a82 | 272 | RCU_TRACE(int cb_count = 0); |
9b1d82fa PM |
273 | |
274 | /* If no RCU callbacks ready to invoke, just return. */ | |
29c00b4a | 275 | if (&rcp->rcucblist == rcp->donetail) { |
486e2593 | 276 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); |
4968c300 | 277 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
15f5191b | 278 | !!ACCESS_ONCE(rcp->rcucblist), |
4968c300 PM |
279 | need_resched(), |
280 | is_idle_task(current), | |
9dc5ad32 | 281 | false)); |
9b1d82fa | 282 | return; |
29c00b4a | 283 | } |
9b1d82fa PM |
284 | |
285 | /* Move the ready-to-invoke callbacks to a local list. */ | |
286 | local_irq_save(flags); | |
486e2593 | 287 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); |
9b1d82fa PM |
288 | list = rcp->rcucblist; |
289 | rcp->rcucblist = *rcp->donetail; | |
290 | *rcp->donetail = NULL; | |
291 | if (rcp->curtail == rcp->donetail) | |
292 | rcp->curtail = &rcp->rcucblist; | |
293 | rcp->donetail = &rcp->rcucblist; | |
294 | local_irq_restore(flags); | |
295 | ||
296 | /* Invoke the callbacks on the local list. */ | |
d4c08f2a | 297 | RCU_TRACE(rn = rcp->name); |
9b1d82fa PM |
298 | while (list) { |
299 | next = list->next; | |
300 | prefetch(next); | |
551d55a9 | 301 | debug_rcu_head_unqueue(list); |
b2c0710c | 302 | local_bh_disable(); |
d4c08f2a | 303 | __rcu_reclaim(rn, list); |
b2c0710c | 304 | local_bh_enable(); |
9b1d82fa | 305 | list = next; |
9e571a82 | 306 | RCU_TRACE(cb_count++); |
9b1d82fa | 307 | } |
9e571a82 | 308 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
0d752924 PM |
309 | RCU_TRACE(trace_rcu_batch_end(rcp->name, |
310 | cb_count, 0, need_resched(), | |
4968c300 | 311 | is_idle_task(current), |
9dc5ad32 | 312 | false)); |
9b1d82fa PM |
313 | } |
314 | ||
965a002b | 315 | static void rcu_process_callbacks(struct softirq_action *unused) |
b2c0710c | 316 | { |
965a002b PM |
317 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
318 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | |
b2c0710c PM |
319 | } |
320 | ||
9b1d82fa PM |
321 | /* |
322 | * Wait for a grace period to elapse. But it is illegal to invoke | |
323 | * synchronize_sched() from within an RCU read-side critical section. | |
324 | * Therefore, any legal call to synchronize_sched() is a quiescent | |
325 | * state, and so on a UP system, synchronize_sched() need do nothing. | |
326 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | |
327 | * benefits of doing might_sleep() to reduce latency.) | |
328 | * | |
329 | * Cool, huh? (Due to Josh Triplett.) | |
330 | * | |
da848c47 PM |
331 | * But we want to make this a static inline later. The cond_resched() |
332 | * currently makes this problematic. | |
9b1d82fa PM |
333 | */ |
334 | void synchronize_sched(void) | |
335 | { | |
fe15d706 PM |
336 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
337 | !lock_is_held(&rcu_lock_map) && | |
338 | !lock_is_held(&rcu_sched_lock_map), | |
339 | "Illegal synchronize_sched() in RCU read-side critical section"); | |
9b1d82fa PM |
340 | cond_resched(); |
341 | } | |
342 | EXPORT_SYMBOL_GPL(synchronize_sched); | |
343 | ||
9b1d82fa PM |
344 | /* |
345 | * Helper function for call_rcu() and call_rcu_bh(). | |
346 | */ | |
347 | static void __call_rcu(struct rcu_head *head, | |
348 | void (*func)(struct rcu_head *rcu), | |
349 | struct rcu_ctrlblk *rcp) | |
350 | { | |
351 | unsigned long flags; | |
352 | ||
551d55a9 | 353 | debug_rcu_head_queue(head); |
9b1d82fa PM |
354 | head->func = func; |
355 | head->next = NULL; | |
4ce5b903 | 356 | |
9b1d82fa PM |
357 | local_irq_save(flags); |
358 | *rcp->curtail = head; | |
359 | rcp->curtail = &head->next; | |
9e571a82 | 360 | RCU_TRACE(rcp->qlen++); |
9b1d82fa PM |
361 | local_irq_restore(flags); |
362 | } | |
363 | ||
364 | /* | |
a57eb940 | 365 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
9b1d82fa PM |
366 | * period. But since we have but one CPU, that would be after any |
367 | * quiescent state. | |
368 | */ | |
a57eb940 | 369 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa | 370 | { |
99652b54 | 371 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
9b1d82fa | 372 | } |
a57eb940 | 373 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
9b1d82fa PM |
374 | |
375 | /* | |
376 | * Post an RCU bottom-half callback to be invoked after any subsequent | |
377 | * quiescent state. | |
378 | */ | |
4ce5b903 | 379 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
380 | { |
381 | __call_rcu(head, func, &rcu_bh_ctrlblk); | |
382 | } | |
383 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
9dc5ad32 PM |
384 | |
385 | void rcu_init(void) | |
386 | { | |
387 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | |
388 | } |