]>
Commit | Line | Data |
---|---|---|
621934ee PM |
1 | /* |
2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
621934ee PM |
17 | * |
18 | * Copyright (C) IBM Corporation, 2006 | |
4e87b2d7 | 19 | * Copyright (C) Fujitsu, 2012 |
621934ee PM |
20 | * |
21 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
4e87b2d7 | 22 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
621934ee PM |
23 | * |
24 | * For detailed explanation of Read-Copy Update mechanism see - | |
8660b7d8 | 25 | * Documentation/RCU/ *.txt |
621934ee PM |
26 | * |
27 | */ | |
28 | ||
9984de1a | 29 | #include <linux/export.h> |
621934ee PM |
30 | #include <linux/mutex.h> |
31 | #include <linux/percpu.h> | |
32 | #include <linux/preempt.h> | |
f9411ebe | 33 | #include <linux/rcupdate_wait.h> |
621934ee | 34 | #include <linux/sched.h> |
621934ee | 35 | #include <linux/smp.h> |
46fdb093 | 36 | #include <linux/delay.h> |
621934ee PM |
37 | #include <linux/srcu.h> |
38 | ||
3705b88d AM |
39 | #include "rcu.h" |
40 | ||
dad81a20 PM |
41 | /* |
42 | * Initialize an rcu_batch structure to empty. | |
43 | */ | |
44 | static inline void rcu_batch_init(struct rcu_batch *b) | |
45 | { | |
46 | b->head = NULL; | |
47 | b->tail = &b->head; | |
48 | } | |
49 | ||
50 | /* | |
51 | * Enqueue a callback onto the tail of the specified rcu_batch structure. | |
52 | */ | |
53 | static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) | |
54 | { | |
55 | *b->tail = head; | |
56 | b->tail = &head->next; | |
57 | } | |
58 | ||
59 | /* | |
60 | * Is the specified rcu_batch structure empty? | |
61 | */ | |
62 | static inline bool rcu_batch_empty(struct rcu_batch *b) | |
63 | { | |
64 | return b->tail == &b->head; | |
65 | } | |
66 | ||
67 | /* | |
68 | * Remove the callback at the head of the specified rcu_batch structure | |
69 | * and return a pointer to it, or return NULL if the structure is empty. | |
70 | */ | |
71 | static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b) | |
72 | { | |
73 | struct rcu_head *head; | |
74 | ||
75 | if (rcu_batch_empty(b)) | |
76 | return NULL; | |
77 | ||
78 | head = b->head; | |
79 | b->head = head->next; | |
80 | if (b->tail == &head->next) | |
81 | rcu_batch_init(b); | |
82 | ||
83 | return head; | |
84 | } | |
85 | ||
86 | /* | |
87 | * Move all callbacks from the rcu_batch structure specified by "from" to | |
88 | * the structure specified by "to". | |
89 | */ | |
90 | static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) | |
91 | { | |
92 | if (!rcu_batch_empty(from)) { | |
93 | *to->tail = from->head; | |
94 | to->tail = from->tail; | |
95 | rcu_batch_init(from); | |
96 | } | |
97 | } | |
98 | ||
632ee200 PM |
99 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
100 | { | |
101 | sp->completed = 0; | |
931ea9d1 | 102 | spin_lock_init(&sp->queue_lock); |
dad81a20 PM |
103 | sp->running = false; |
104 | rcu_batch_init(&sp->batch_queue); | |
105 | rcu_batch_init(&sp->batch_check0); | |
106 | rcu_batch_init(&sp->batch_check1); | |
107 | rcu_batch_init(&sp->batch_done); | |
931ea9d1 | 108 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
f2c46896 | 109 | sp->per_cpu_ref = alloc_percpu(struct srcu_array); |
632ee200 PM |
110 | return sp->per_cpu_ref ? 0 : -ENOMEM; |
111 | } | |
112 | ||
113 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
114 | ||
115 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |
116 | struct lock_class_key *key) | |
117 | { | |
632ee200 PM |
118 | /* Don't re-initialize a lock while it is held. */ |
119 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | |
120 | lockdep_init_map(&sp->dep_map, name, key, 0); | |
632ee200 PM |
121 | return init_srcu_struct_fields(sp); |
122 | } | |
123 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
124 | ||
125 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
126 | ||
621934ee PM |
127 | /** |
128 | * init_srcu_struct - initialize a sleep-RCU structure | |
129 | * @sp: structure to initialize. | |
130 | * | |
131 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
132 | * to any other function. Each srcu_struct represents a separate domain | |
133 | * of SRCU protection. | |
134 | */ | |
e6a92013 | 135 | int init_srcu_struct(struct srcu_struct *sp) |
621934ee | 136 | { |
632ee200 | 137 | return init_srcu_struct_fields(sp); |
621934ee | 138 | } |
0cd397d3 | 139 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
621934ee | 140 | |
632ee200 PM |
141 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
142 | ||
b52ce066 | 143 | /* |
f2c46896 | 144 | * Returns approximate total of the readers' ->lock_count[] values for the |
b52ce066 LJ |
145 | * rank of per-CPU counters specified by idx. |
146 | */ | |
f2c46896 | 147 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) |
b52ce066 LJ |
148 | { |
149 | int cpu; | |
150 | unsigned long sum = 0; | |
b52ce066 LJ |
151 | |
152 | for_each_possible_cpu(cpu) { | |
f2c46896 LR |
153 | struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); |
154 | ||
155 | sum += READ_ONCE(cpuc->lock_count[idx]); | |
b52ce066 LJ |
156 | } |
157 | return sum; | |
158 | } | |
159 | ||
621934ee | 160 | /* |
f2c46896 LR |
161 | * Returns approximate total of the readers' ->unlock_count[] values for the |
162 | * rank of per-CPU counters specified by idx. | |
621934ee | 163 | */ |
f2c46896 | 164 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) |
cef50120 PM |
165 | { |
166 | int cpu; | |
167 | unsigned long sum = 0; | |
621934ee | 168 | |
cef50120 | 169 | for_each_possible_cpu(cpu) { |
f2c46896 LR |
170 | struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); |
171 | ||
172 | sum += READ_ONCE(cpuc->unlock_count[idx]); | |
cef50120 | 173 | } |
b52ce066 | 174 | return sum; |
cef50120 PM |
175 | } |
176 | ||
177 | /* | |
b52ce066 | 178 | * Return true if the number of pre-existing readers is determined to |
f2c46896 | 179 | * be zero. |
cef50120 PM |
180 | */ |
181 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) | |
621934ee | 182 | { |
f2c46896 | 183 | unsigned long unlocks; |
b52ce066 | 184 | |
f2c46896 | 185 | unlocks = srcu_readers_unlock_idx(sp, idx); |
b52ce066 LJ |
186 | |
187 | /* | |
f2c46896 LR |
188 | * Make sure that a lock is always counted if the corresponding unlock |
189 | * is counted. Needs to be a smp_mb() as the read side may contain a | |
190 | * read from a variable that is written to before the synchronize_srcu() | |
191 | * in the write side. In this case smp_mb()s A and B act like the store | |
192 | * buffering pattern. | |
b52ce066 | 193 | * |
f2c46896 LR |
194 | * This smp_mb() also pairs with smp_mb() C to prevent accesses after the |
195 | * synchronize_srcu() from being executed before the grace period ends. | |
b52ce066 LJ |
196 | */ |
197 | smp_mb(); /* A */ | |
621934ee | 198 | |
cef50120 | 199 | /* |
f2c46896 LR |
200 | * If the locks are the same as the unlocks, then there must have |
201 | * been no readers on this index at some time in between. This does not | |
202 | * mean that there are no more readers, as one could have read the | |
203 | * current index but not have incremented the lock counter yet. | |
cef50120 | 204 | * |
f2c46896 LR |
205 | * Possible bug: There is no guarantee that there haven't been ULONG_MAX |
206 | * increments of ->lock_count[] since the unlocks were counted, meaning | |
207 | * that this could return true even if there are still active readers. | |
208 | * Since there are no memory barriers around srcu_flip(), the CPU is not | |
209 | * required to increment ->completed before running | |
210 | * srcu_readers_unlock_idx(), which means that there could be an | |
211 | * arbitrarily large number of critical sections that execute after | |
212 | * srcu_readers_unlock_idx() but use the old value of ->completed. | |
cef50120 | 213 | */ |
f2c46896 | 214 | return srcu_readers_lock_idx(sp, idx) == unlocks; |
621934ee PM |
215 | } |
216 | ||
217 | /** | |
f765d113 NMG |
218 | * srcu_readers_active - returns true if there are readers. and false |
219 | * otherwise | |
621934ee PM |
220 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
221 | * | |
222 | * Note that this is not an atomic primitive, and can therefore suffer | |
223 | * severe errors when invoked on an active srcu_struct. That said, it | |
224 | * can be useful as an error check at cleanup time. | |
225 | */ | |
f765d113 | 226 | static bool srcu_readers_active(struct srcu_struct *sp) |
621934ee | 227 | { |
dc879175 LJ |
228 | int cpu; |
229 | unsigned long sum = 0; | |
230 | ||
231 | for_each_possible_cpu(cpu) { | |
f2c46896 LR |
232 | struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); |
233 | ||
234 | sum += READ_ONCE(cpuc->lock_count[0]); | |
235 | sum += READ_ONCE(cpuc->lock_count[1]); | |
236 | sum -= READ_ONCE(cpuc->unlock_count[0]); | |
237 | sum -= READ_ONCE(cpuc->unlock_count[1]); | |
dc879175 LJ |
238 | } |
239 | return sum; | |
621934ee PM |
240 | } |
241 | ||
242 | /** | |
243 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
244 | * @sp: structure to clean up. | |
245 | * | |
15c68f7f PM |
246 | * Must invoke this only after you are finished using a given srcu_struct |
247 | * that was initialized via init_srcu_struct(). This code does some | |
248 | * probabalistic checking, spotting late uses of srcu_read_lock(), | |
249 | * synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu(). | |
250 | * If any such late uses are detected, the per-CPU memory associated with | |
251 | * the srcu_struct is simply leaked and WARN_ON() is invoked. If the | |
252 | * caller frees the srcu_struct itself, a use-after-free crash will likely | |
253 | * ensue, but at least there will be a warning printed. | |
621934ee PM |
254 | */ |
255 | void cleanup_srcu_struct(struct srcu_struct *sp) | |
256 | { | |
ab4d2986 LJ |
257 | if (WARN_ON(srcu_readers_active(sp))) |
258 | return; /* Leakage unless caller handles error. */ | |
621934ee PM |
259 | free_percpu(sp->per_cpu_ref); |
260 | sp->per_cpu_ref = NULL; | |
261 | } | |
0cd397d3 | 262 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
621934ee | 263 | |
632ee200 | 264 | /* |
621934ee PM |
265 | * Counts the new reader in the appropriate per-CPU element of the |
266 | * srcu_struct. Must be called from process context. | |
267 | * Returns an index that must be passed to the matching srcu_read_unlock(). | |
268 | */ | |
632ee200 | 269 | int __srcu_read_lock(struct srcu_struct *sp) |
621934ee PM |
270 | { |
271 | int idx; | |
272 | ||
7d0ae808 | 273 | idx = READ_ONCE(sp->completed) & 0x1; |
f2c46896 | 274 | __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); |
cef50120 | 275 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
621934ee PM |
276 | return idx; |
277 | } | |
632ee200 | 278 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
621934ee | 279 | |
632ee200 | 280 | /* |
621934ee PM |
281 | * Removes the count for the old reader from the appropriate per-CPU |
282 | * element of the srcu_struct. Note that this may well be a different | |
283 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
284 | * Must be called from process context. | |
285 | */ | |
632ee200 | 286 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
621934ee | 287 | { |
cef50120 | 288 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
f2c46896 | 289 | this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]); |
621934ee | 290 | } |
632ee200 | 291 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
621934ee | 292 | |
c072a388 PM |
293 | /* |
294 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
295 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
296 | * (defined below) to allow SRCU readers to exit their read-side critical | |
dad81a20 PM |
297 | * sections. If there are still some readers after 10 microseconds, |
298 | * we repeatedly block for 1-millisecond time periods. This approach | |
299 | * has done well in testing, so there is no need for a config parameter. | |
c072a388 | 300 | */ |
931ea9d1 | 301 | #define SRCU_RETRY_CHECK_DELAY 5 |
dad81a20 PM |
302 | #define SYNCHRONIZE_SRCU_TRYCOUNT 2 |
303 | #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12 | |
cef50120 | 304 | |
ac367c1c | 305 | /* |
dad81a20 PM |
306 | * @@@ Wait until all pre-existing readers complete. Such readers |
307 | * will have used the index specified by "idx". | |
308 | * the caller should ensures the ->completed is not changed while checking | |
309 | * and idx = (->completed & 1) ^ 1 | |
18108ebf | 310 | */ |
931ea9d1 | 311 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
cef50120 | 312 | { |
931ea9d1 LJ |
313 | for (;;) { |
314 | if (srcu_readers_active_idx_check(sp, idx)) | |
315 | return true; | |
dad81a20 | 316 | if (--trycount <= 0) |
931ea9d1 LJ |
317 | return false; |
318 | udelay(SRCU_RETRY_CHECK_DELAY); | |
cef50120 | 319 | } |
cef50120 | 320 | } |
c072a388 | 321 | |
931ea9d1 LJ |
322 | /* |
323 | * Increment the ->completed counter so that future SRCU readers will | |
f2c46896 | 324 | * use the other rank of the ->(un)lock_count[] arrays. This allows |
931ea9d1 LJ |
325 | * us to wait for pre-existing readers in a starvation-free manner. |
326 | */ | |
18108ebf | 327 | static void srcu_flip(struct srcu_struct *sp) |
944ce9af | 328 | { |
7f554a3d PM |
329 | WRITE_ONCE(sp->completed, sp->completed + 1); |
330 | ||
331 | /* | |
332 | * Ensure that if the updater misses an __srcu_read_unlock() | |
333 | * increment, that task's next __srcu_read_lock() will see the | |
334 | * above counter update. Note that both this memory barrier | |
335 | * and the one in srcu_readers_active_idx_check() provide the | |
336 | * guarantee for __srcu_read_lock(). | |
337 | */ | |
338 | smp_mb(); /* D */ /* Pairs with C. */ | |
944ce9af LJ |
339 | } |
340 | ||
931ea9d1 LJ |
341 | /* |
342 | * Enqueue an SRCU callback on the specified srcu_struct structure, | |
343 | * initiating grace-period processing if it is not already running. | |
bc72d962 PM |
344 | * |
345 | * Note that all CPUs must agree that the grace period extended beyond | |
346 | * all pre-existing SRCU read-side critical section. On systems with | |
347 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
348 | * is guaranteed to have executed a full memory barrier since the end of | |
349 | * its last corresponding SRCU read-side critical section whose beginning | |
350 | * preceded the call to call_rcu(). It also means that each CPU executing | |
351 | * an SRCU read-side critical section that continues beyond the start of | |
352 | * "func()" must have executed a memory barrier after the call_rcu() | |
353 | * but before the beginning of that SRCU read-side critical section. | |
354 | * Note that these guarantees include CPUs that are offline, idle, or | |
355 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
356 | * | |
357 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | |
358 | * resulting SRCU callback function "func()", then both CPU A and CPU | |
359 | * B are guaranteed to execute a full memory barrier during the time | |
360 | * interval between the call to call_rcu() and the invocation of "func()". | |
361 | * This guarantee applies even if CPU A and CPU B are the same CPU (but | |
362 | * again only if the system has more than one CPU). | |
363 | * | |
364 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
365 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
366 | * srcu_struct structure. | |
931ea9d1 LJ |
367 | */ |
368 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | |
b6a4ae76 | 369 | rcu_callback_t func) |
931ea9d1 LJ |
370 | { |
371 | unsigned long flags; | |
372 | ||
373 | head->next = NULL; | |
374 | head->func = func; | |
375 | spin_lock_irqsave(&sp->queue_lock, flags); | |
d85b62f1 | 376 | smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ |
dad81a20 PM |
377 | rcu_batch_queue(&sp->batch_queue, head); |
378 | if (!sp->running) { | |
379 | sp->running = true; | |
ae167033 | 380 | queue_delayed_work(system_power_efficient_wq, &sp->work, 0); |
931ea9d1 LJ |
381 | } |
382 | spin_unlock_irqrestore(&sp->queue_lock, flags); | |
383 | } | |
384 | EXPORT_SYMBOL_GPL(call_srcu); | |
385 | ||
dad81a20 PM |
386 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount); |
387 | static void srcu_reschedule(struct srcu_struct *sp); | |
931ea9d1 | 388 | |
0cd397d3 PM |
389 | /* |
390 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
621934ee | 391 | */ |
dad81a20 | 392 | static void __synchronize_srcu(struct srcu_struct *sp, int trycount) |
621934ee | 393 | { |
931ea9d1 LJ |
394 | struct rcu_synchronize rcu; |
395 | struct rcu_head *head = &rcu.head; | |
dad81a20 | 396 | bool done = false; |
18108ebf | 397 | |
f78f5b90 PM |
398 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || |
399 | lock_is_held(&rcu_bh_lock_map) || | |
400 | lock_is_held(&rcu_lock_map) || | |
401 | lock_is_held(&rcu_sched_lock_map), | |
402 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); | |
fe15d706 | 403 | |
6e6f1b30 | 404 | might_sleep(); |
931ea9d1 LJ |
405 | init_completion(&rcu.completion); |
406 | ||
407 | head->next = NULL; | |
408 | head->func = wakeme_after_rcu; | |
409 | spin_lock_irq(&sp->queue_lock); | |
d85b62f1 | 410 | smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ |
dad81a20 | 411 | if (!sp->running) { |
931ea9d1 | 412 | /* steal the processing owner */ |
dad81a20 PM |
413 | sp->running = true; |
414 | rcu_batch_queue(&sp->batch_check0, head); | |
931ea9d1 | 415 | spin_unlock_irq(&sp->queue_lock); |
dad81a20 PM |
416 | |
417 | srcu_advance_batches(sp, trycount); | |
418 | if (!rcu_batch_empty(&sp->batch_done)) { | |
419 | BUG_ON(sp->batch_done.head != head); | |
420 | rcu_batch_dequeue(&sp->batch_done); | |
421 | done = true; | |
422 | } | |
931ea9d1 | 423 | /* give the processing owner to work_struct */ |
dad81a20 | 424 | srcu_reschedule(sp); |
931ea9d1 | 425 | } else { |
dad81a20 | 426 | rcu_batch_queue(&sp->batch_queue, head); |
931ea9d1 LJ |
427 | spin_unlock_irq(&sp->queue_lock); |
428 | } | |
944ce9af | 429 | |
dad81a20 PM |
430 | if (!done) { |
431 | wait_for_completion(&rcu.completion); | |
432 | smp_mb(); /* Caller's later accesses after GP. */ | |
f60d231a | 433 | } |
dad81a20 | 434 | |
f60d231a | 435 | } |
f60d231a | 436 | |
0cd397d3 PM |
437 | /** |
438 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
439 | * @sp: srcu_struct with which to synchronize. | |
440 | * | |
34a64b6b LJ |
441 | * Wait for the count to drain to zero of both indexes. To avoid the |
442 | * possible starvation of synchronize_srcu(), it waits for the count of | |
443 | * the index=((->completed & 1) ^ 1) to drain to zero at first, | |
444 | * and then flip the completed and wait for the count of the other index. | |
445 | * | |
446 | * Can block; must be called from process context. | |
0cd397d3 PM |
447 | * |
448 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
449 | * SRCU read-side critical section; doing so will result in deadlock. | |
450 | * However, it is perfectly legal to call synchronize_srcu() on one | |
bc72d962 PM |
451 | * srcu_struct from some other srcu_struct's read-side critical section, |
452 | * as long as the resulting graph of srcu_structs is acyclic. | |
453 | * | |
454 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
455 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
456 | * each CPU is guaranteed to have executed a full memory barrier since | |
457 | * the end of its last corresponding SRCU-sched read-side critical section | |
458 | * whose beginning preceded the call to synchronize_srcu(). In addition, | |
459 | * each CPU having an SRCU read-side critical section that extends beyond | |
460 | * the return from synchronize_srcu() is guaranteed to have executed a | |
461 | * full memory barrier after the beginning of synchronize_srcu() and before | |
462 | * the beginning of that SRCU read-side critical section. Note that these | |
463 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
464 | * as well as CPUs that are executing in the kernel. | |
465 | * | |
466 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
467 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
468 | * to have executed a full memory barrier during the execution of | |
469 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
470 | * are the same CPU, but again only if the system has more than one CPU. | |
471 | * | |
472 | * Of course, these memory-ordering guarantees apply only when | |
473 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
474 | * passed the same srcu_struct structure. | |
0cd397d3 PM |
475 | */ |
476 | void synchronize_srcu(struct srcu_struct *sp) | |
477 | { | |
dad81a20 PM |
478 | __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal()) |
479 | ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT | |
480 | : SYNCHRONIZE_SRCU_TRYCOUNT); | |
0cd397d3 PM |
481 | } |
482 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
483 | ||
dad81a20 PM |
484 | /** |
485 | * synchronize_srcu_expedited - Brute-force SRCU grace period | |
486 | * @sp: srcu_struct with which to synchronize. | |
487 | * | |
488 | * Wait for an SRCU grace period to elapse, but be more aggressive about | |
489 | * spinning rather than blocking when waiting. | |
490 | * | |
491 | * Note that synchronize_srcu_expedited() has the same deadlock and | |
492 | * memory-ordering properties as does synchronize_srcu(). | |
493 | */ | |
494 | void synchronize_srcu_expedited(struct srcu_struct *sp) | |
495 | { | |
496 | __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT); | |
497 | } | |
498 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
499 | ||
931ea9d1 LJ |
500 | /** |
501 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
4461212a | 502 | * @sp: srcu_struct on which to wait for in-flight callbacks. |
931ea9d1 LJ |
503 | */ |
504 | void srcu_barrier(struct srcu_struct *sp) | |
505 | { | |
506 | synchronize_srcu(sp); | |
507 | } | |
508 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
509 | ||
621934ee PM |
510 | /** |
511 | * srcu_batches_completed - return batches completed. | |
512 | * @sp: srcu_struct on which to report batch completion. | |
513 | * | |
514 | * Report the number of batches, correlated with, but not necessarily | |
515 | * precisely the same as, the number of grace periods that have elapsed. | |
516 | */ | |
a5c198f4 | 517 | unsigned long srcu_batches_completed(struct srcu_struct *sp) |
621934ee PM |
518 | { |
519 | return sp->completed; | |
520 | } | |
621934ee | 521 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
931ea9d1 | 522 | |
dad81a20 PM |
523 | #define SRCU_CALLBACK_BATCH 10 |
524 | #define SRCU_INTERVAL 1 | |
525 | ||
526 | /* | |
527 | * Move any new SRCU callbacks to the first stage of the SRCU grace | |
528 | * period pipeline. | |
529 | */ | |
530 | static void srcu_collect_new(struct srcu_struct *sp) | |
531 | { | |
532 | if (!rcu_batch_empty(&sp->batch_queue)) { | |
533 | spin_lock_irq(&sp->queue_lock); | |
534 | rcu_batch_move(&sp->batch_check0, &sp->batch_queue); | |
535 | spin_unlock_irq(&sp->queue_lock); | |
536 | } | |
537 | } | |
538 | ||
931ea9d1 LJ |
539 | /* |
540 | * Core SRCU state machine. Advance callbacks from ->batch_check0 to | |
541 | * ->batch_check1 and then to ->batch_done as readers drain. | |
542 | */ | |
dad81a20 | 543 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount) |
931ea9d1 | 544 | { |
dad81a20 | 545 | int idx = 1 ^ (sp->completed & 1); |
c2a8ec07 | 546 | |
931ea9d1 LJ |
547 | /* |
548 | * Because readers might be delayed for an extended period after | |
549 | * fetching ->completed for their index, at any point in time there | |
550 | * might well be readers using both idx=0 and idx=1. We therefore | |
551 | * need to wait for readers to clear from both index values before | |
552 | * invoking a callback. | |
553 | */ | |
554 | ||
dad81a20 PM |
555 | if (rcu_batch_empty(&sp->batch_check0) && |
556 | rcu_batch_empty(&sp->batch_check1)) | |
557 | return; /* no callbacks need to be advanced */ | |
931ea9d1 | 558 | |
dad81a20 PM |
559 | if (!try_check_zero(sp, idx, trycount)) |
560 | return; /* failed to advance, will try after SRCU_INTERVAL */ | |
c2a8ec07 | 561 | |
dad81a20 PM |
562 | /* |
563 | * The callbacks in ->batch_check1 have already done with their | |
564 | * first zero check and flip back when they were enqueued on | |
565 | * ->batch_check0 in a previous invocation of srcu_advance_batches(). | |
566 | * (Presumably try_check_zero() returned false during that | |
567 | * invocation, leaving the callbacks stranded on ->batch_check1.) | |
568 | * They are therefore ready to invoke, so move them to ->batch_done. | |
569 | */ | |
570 | rcu_batch_move(&sp->batch_done, &sp->batch_check1); | |
571 | ||
572 | if (rcu_batch_empty(&sp->batch_check0)) | |
573 | return; /* no callbacks need to be advanced */ | |
574 | srcu_flip(sp); | |
575 | ||
576 | /* | |
577 | * The callbacks in ->batch_check0 just finished their | |
578 | * first check zero and flip, so move them to ->batch_check1 | |
579 | * for future checking on the other idx. | |
580 | */ | |
581 | rcu_batch_move(&sp->batch_check1, &sp->batch_check0); | |
582 | ||
583 | /* | |
584 | * SRCU read-side critical sections are normally short, so check | |
585 | * at least twice in quick succession after a flip. | |
586 | */ | |
587 | trycount = trycount < 2 ? 2 : trycount; | |
588 | if (!try_check_zero(sp, idx^1, trycount)) | |
589 | return; /* failed to advance, will try after SRCU_INTERVAL */ | |
590 | ||
591 | /* | |
592 | * The callbacks in ->batch_check1 have now waited for all | |
593 | * pre-existing readers using both idx values. They are therefore | |
594 | * ready to invoke, so move them to ->batch_done. | |
595 | */ | |
596 | rcu_batch_move(&sp->batch_done, &sp->batch_check1); | |
931ea9d1 LJ |
597 | } |
598 | ||
599 | /* | |
600 | * Invoke a limited number of SRCU callbacks that have passed through | |
601 | * their grace period. If there are more to do, SRCU will reschedule | |
d85b62f1 PM |
602 | * the workqueue. Note that needed memory barriers have been executed |
603 | * in this task's context by srcu_readers_active_idx_check(). | |
931ea9d1 LJ |
604 | */ |
605 | static void srcu_invoke_callbacks(struct srcu_struct *sp) | |
606 | { | |
dad81a20 PM |
607 | int i; |
608 | struct rcu_head *head; | |
931ea9d1 | 609 | |
dad81a20 PM |
610 | for (i = 0; i < SRCU_CALLBACK_BATCH; i++) { |
611 | head = rcu_batch_dequeue(&sp->batch_done); | |
612 | if (!head) | |
613 | break; | |
931ea9d1 | 614 | local_bh_disable(); |
dad81a20 | 615 | head->func(head); |
931ea9d1 LJ |
616 | local_bh_enable(); |
617 | } | |
618 | } | |
619 | ||
620 | /* | |
621 | * Finished one round of SRCU grace period. Start another if there are | |
622 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
623 | */ | |
dad81a20 | 624 | static void srcu_reschedule(struct srcu_struct *sp) |
931ea9d1 LJ |
625 | { |
626 | bool pending = true; | |
627 | ||
dad81a20 PM |
628 | if (rcu_batch_empty(&sp->batch_done) && |
629 | rcu_batch_empty(&sp->batch_check1) && | |
630 | rcu_batch_empty(&sp->batch_check0) && | |
631 | rcu_batch_empty(&sp->batch_queue)) { | |
931ea9d1 | 632 | spin_lock_irq(&sp->queue_lock); |
dad81a20 PM |
633 | if (rcu_batch_empty(&sp->batch_done) && |
634 | rcu_batch_empty(&sp->batch_check1) && | |
635 | rcu_batch_empty(&sp->batch_check0) && | |
636 | rcu_batch_empty(&sp->batch_queue)) { | |
637 | sp->running = false; | |
931ea9d1 | 638 | pending = false; |
dad81a20 | 639 | } |
931ea9d1 LJ |
640 | spin_unlock_irq(&sp->queue_lock); |
641 | } | |
642 | ||
643 | if (pending) | |
dad81a20 PM |
644 | queue_delayed_work(system_power_efficient_wq, |
645 | &sp->work, SRCU_INTERVAL); | |
931ea9d1 LJ |
646 | } |
647 | ||
648 | /* | |
649 | * This is the work-queue function that handles SRCU grace periods. | |
650 | */ | |
f2ebfbc9 | 651 | void process_srcu(struct work_struct *work) |
931ea9d1 LJ |
652 | { |
653 | struct srcu_struct *sp; | |
654 | ||
655 | sp = container_of(work, struct srcu_struct, work.work); | |
656 | ||
dad81a20 PM |
657 | srcu_collect_new(sp); |
658 | srcu_advance_batches(sp, 1); | |
931ea9d1 | 659 | srcu_invoke_callbacks(sp); |
dad81a20 | 660 | srcu_reschedule(sp); |
931ea9d1 | 661 | } |
f2ebfbc9 | 662 | EXPORT_SYMBOL_GPL(process_srcu); |