]>
Commit | Line | Data |
---|---|---|
dad81a20 PM |
1 | /* |
2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, you can access it online at | |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2006 | |
19 | * Copyright (C) Fujitsu, 2012 | |
20 | * | |
21 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
22 | * Lai Jiangshan <laijs@cn.fujitsu.com> | |
23 | * | |
24 | * For detailed explanation of Read-Copy Update mechanism see - | |
25 | * Documentation/RCU/ *.txt | |
26 | * | |
27 | */ | |
28 | ||
29 | #include <linux/export.h> | |
30 | #include <linux/mutex.h> | |
31 | #include <linux/percpu.h> | |
32 | #include <linux/preempt.h> | |
33 | #include <linux/rcupdate_wait.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/smp.h> | |
36 | #include <linux/delay.h> | |
22607d66 | 37 | #include <linux/module.h> |
dad81a20 PM |
38 | #include <linux/srcu.h> |
39 | ||
dad81a20 | 40 | #include "rcu.h" |
45753c5f | 41 | #include "rcu_segcblist.h" |
dad81a20 | 42 | |
0c8e0e3c PM |
43 | /* Holdoff in nanoseconds for auto-expediting. */ |
44 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) | |
45 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; | |
22607d66 PM |
46 | module_param(exp_holdoff, ulong, 0444); |
47 | ||
c350c008 PM |
48 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
49 | static ulong counter_wrap_check = (ULONG_MAX >> 2); | |
50 | module_param(counter_wrap_check, ulong, 0444); | |
51 | ||
da915ad5 PM |
52 | static void srcu_invoke_callbacks(struct work_struct *work); |
53 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); | |
54 | ||
55 | /* | |
56 | * Initialize SRCU combining tree. Note that statically allocated | |
57 | * srcu_struct structures might already have srcu_read_lock() and | |
58 | * srcu_read_unlock() running against them. So if the is_static parameter | |
59 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | |
60 | */ | |
61 | static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |
dad81a20 | 62 | { |
da915ad5 PM |
63 | int cpu; |
64 | int i; | |
65 | int level = 0; | |
66 | int levelspread[RCU_NUM_LVLS]; | |
67 | struct srcu_data *sdp; | |
68 | struct srcu_node *snp; | |
69 | struct srcu_node *snp_first; | |
70 | ||
71 | /* Work out the overall tree geometry. */ | |
72 | sp->level[0] = &sp->node[0]; | |
73 | for (i = 1; i < rcu_num_lvls; i++) | |
74 | sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; | |
75 | rcu_init_levelspread(levelspread, num_rcu_lvl); | |
76 | ||
77 | /* Each pass through this loop initializes one srcu_node structure. */ | |
78 | rcu_for_each_node_breadth_first(sp, snp) { | |
79 | spin_lock_init(&snp->lock); | |
c7e88067 PM |
80 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
81 | ARRAY_SIZE(snp->srcu_data_have_cbs)); | |
82 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { | |
da915ad5 | 83 | snp->srcu_have_cbs[i] = 0; |
c7e88067 PM |
84 | snp->srcu_data_have_cbs[i] = 0; |
85 | } | |
1e9a038b | 86 | snp->srcu_gp_seq_needed_exp = 0; |
da915ad5 PM |
87 | snp->grplo = -1; |
88 | snp->grphi = -1; | |
89 | if (snp == &sp->node[0]) { | |
90 | /* Root node, special case. */ | |
91 | snp->srcu_parent = NULL; | |
92 | continue; | |
93 | } | |
94 | ||
95 | /* Non-root node. */ | |
96 | if (snp == sp->level[level + 1]) | |
97 | level++; | |
98 | snp->srcu_parent = sp->level[level - 1] + | |
99 | (snp - sp->level[level]) / | |
100 | levelspread[level - 1]; | |
101 | } | |
102 | ||
103 | /* | |
104 | * Initialize the per-CPU srcu_data array, which feeds into the | |
105 | * leaves of the srcu_node tree. | |
106 | */ | |
107 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | |
108 | ARRAY_SIZE(sdp->srcu_unlock_count)); | |
109 | level = rcu_num_lvls - 1; | |
110 | snp_first = sp->level[level]; | |
111 | for_each_possible_cpu(cpu) { | |
112 | sdp = per_cpu_ptr(sp->sda, cpu); | |
113 | spin_lock_init(&sdp->lock); | |
114 | rcu_segcblist_init(&sdp->srcu_cblist); | |
115 | sdp->srcu_cblist_invoking = false; | |
116 | sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; | |
1e9a038b | 117 | sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; |
da915ad5 PM |
118 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
119 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | |
120 | if (snp->grplo < 0) | |
121 | snp->grplo = cpu; | |
122 | snp->grphi = cpu; | |
123 | } | |
124 | sdp->cpu = cpu; | |
125 | INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); | |
126 | sdp->sp = sp; | |
c7e88067 | 127 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
da915ad5 PM |
128 | if (is_static) |
129 | continue; | |
130 | ||
131 | /* Dynamically allocated, better be no srcu_read_locks()! */ | |
132 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { | |
133 | sdp->srcu_lock_count[i] = 0; | |
134 | sdp->srcu_unlock_count[i] = 0; | |
135 | } | |
136 | } | |
137 | } | |
138 | ||
139 | /* | |
140 | * Initialize non-compile-time initialized fields, including the | |
141 | * associated srcu_node and srcu_data structures. The is_static | |
142 | * parameter is passed through to init_srcu_struct_nodes(), and | |
143 | * also tells us that ->sda has already been wired up to srcu_data. | |
144 | */ | |
145 | static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) | |
146 | { | |
147 | mutex_init(&sp->srcu_cb_mutex); | |
148 | mutex_init(&sp->srcu_gp_mutex); | |
149 | sp->srcu_idx = 0; | |
dad81a20 | 150 | sp->srcu_gp_seq = 0; |
da915ad5 PM |
151 | sp->srcu_barrier_seq = 0; |
152 | mutex_init(&sp->srcu_barrier_mutex); | |
153 | atomic_set(&sp->srcu_barrier_cpu_cnt, 0); | |
dad81a20 | 154 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
da915ad5 PM |
155 | if (!is_static) |
156 | sp->sda = alloc_percpu(struct srcu_data); | |
157 | init_srcu_struct_nodes(sp, is_static); | |
1e9a038b | 158 | sp->srcu_gp_seq_needed_exp = 0; |
22607d66 | 159 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
da915ad5 PM |
160 | smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ |
161 | return sp->sda ? 0 : -ENOMEM; | |
dad81a20 PM |
162 | } |
163 | ||
164 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
165 | ||
166 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |
167 | struct lock_class_key *key) | |
168 | { | |
169 | /* Don't re-initialize a lock while it is held. */ | |
170 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | |
171 | lockdep_init_map(&sp->dep_map, name, key, 0); | |
da915ad5 PM |
172 | spin_lock_init(&sp->gp_lock); |
173 | return init_srcu_struct_fields(sp, false); | |
dad81a20 PM |
174 | } |
175 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
176 | ||
177 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
178 | ||
179 | /** | |
180 | * init_srcu_struct - initialize a sleep-RCU structure | |
181 | * @sp: structure to initialize. | |
182 | * | |
183 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
184 | * to any other function. Each srcu_struct represents a separate domain | |
185 | * of SRCU protection. | |
186 | */ | |
187 | int init_srcu_struct(struct srcu_struct *sp) | |
188 | { | |
da915ad5 PM |
189 | spin_lock_init(&sp->gp_lock); |
190 | return init_srcu_struct_fields(sp, false); | |
dad81a20 PM |
191 | } |
192 | EXPORT_SYMBOL_GPL(init_srcu_struct); | |
193 | ||
194 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
195 | ||
196 | /* | |
da915ad5 PM |
197 | * First-use initialization of statically allocated srcu_struct |
198 | * structure. Wiring up the combining tree is more than can be | |
199 | * done with compile-time initialization, so this check is added | |
200 | * to each update-side SRCU primitive. Use ->gp_lock, which -is- | |
201 | * compile-time initialized, to resolve races involving multiple | |
202 | * CPUs trying to garner first-use privileges. | |
203 | */ | |
204 | static void check_init_srcu_struct(struct srcu_struct *sp) | |
205 | { | |
206 | unsigned long flags; | |
207 | ||
208 | WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); | |
209 | /* The smp_load_acquire() pairs with the smp_store_release(). */ | |
210 | if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ | |
211 | return; /* Already initialized. */ | |
212 | spin_lock_irqsave(&sp->gp_lock, flags); | |
213 | if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { | |
214 | spin_unlock_irqrestore(&sp->gp_lock, flags); | |
215 | return; | |
216 | } | |
217 | init_srcu_struct_fields(sp, true); | |
218 | spin_unlock_irqrestore(&sp->gp_lock, flags); | |
219 | } | |
220 | ||
221 | /* | |
222 | * Returns approximate total of the readers' ->srcu_lock_count[] values | |
223 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 PM |
224 | */ |
225 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) | |
226 | { | |
227 | int cpu; | |
228 | unsigned long sum = 0; | |
229 | ||
230 | for_each_possible_cpu(cpu) { | |
da915ad5 | 231 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
dad81a20 | 232 | |
da915ad5 | 233 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
dad81a20 PM |
234 | } |
235 | return sum; | |
236 | } | |
237 | ||
238 | /* | |
da915ad5 PM |
239 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
240 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 PM |
241 | */ |
242 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) | |
243 | { | |
244 | int cpu; | |
245 | unsigned long sum = 0; | |
246 | ||
247 | for_each_possible_cpu(cpu) { | |
da915ad5 | 248 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
dad81a20 | 249 | |
da915ad5 | 250 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
dad81a20 PM |
251 | } |
252 | return sum; | |
253 | } | |
254 | ||
255 | /* | |
256 | * Return true if the number of pre-existing readers is determined to | |
257 | * be zero. | |
258 | */ | |
259 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) | |
260 | { | |
261 | unsigned long unlocks; | |
262 | ||
263 | unlocks = srcu_readers_unlock_idx(sp, idx); | |
264 | ||
265 | /* | |
266 | * Make sure that a lock is always counted if the corresponding | |
267 | * unlock is counted. Needs to be a smp_mb() as the read side may | |
268 | * contain a read from a variable that is written to before the | |
269 | * synchronize_srcu() in the write side. In this case smp_mb()s | |
270 | * A and B act like the store buffering pattern. | |
271 | * | |
272 | * This smp_mb() also pairs with smp_mb() C to prevent accesses | |
273 | * after the synchronize_srcu() from being executed before the | |
274 | * grace period ends. | |
275 | */ | |
276 | smp_mb(); /* A */ | |
277 | ||
278 | /* | |
279 | * If the locks are the same as the unlocks, then there must have | |
280 | * been no readers on this index at some time in between. This does | |
281 | * not mean that there are no more readers, as one could have read | |
282 | * the current index but not have incremented the lock counter yet. | |
283 | * | |
881ec9d2 PM |
284 | * So suppose that the updater is preempted here for so long |
285 | * that more than ULONG_MAX non-nested readers come and go in | |
286 | * the meantime. It turns out that this cannot result in overflow | |
287 | * because if a reader modifies its unlock count after we read it | |
288 | * above, then that reader's next load of ->srcu_idx is guaranteed | |
289 | * to get the new value, which will cause it to operate on the | |
290 | * other bank of counters, where it cannot contribute to the | |
291 | * overflow of these counters. This means that there is a maximum | |
292 | * of 2*NR_CPUS increments, which cannot overflow given current | |
293 | * systems, especially not on 64-bit systems. | |
294 | * | |
295 | * OK, how about nesting? This does impose a limit on nesting | |
296 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | |
297 | * especially on 64-bit systems. | |
dad81a20 PM |
298 | */ |
299 | return srcu_readers_lock_idx(sp, idx) == unlocks; | |
300 | } | |
301 | ||
302 | /** | |
303 | * srcu_readers_active - returns true if there are readers. and false | |
304 | * otherwise | |
305 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). | |
306 | * | |
307 | * Note that this is not an atomic primitive, and can therefore suffer | |
308 | * severe errors when invoked on an active srcu_struct. That said, it | |
309 | * can be useful as an error check at cleanup time. | |
310 | */ | |
311 | static bool srcu_readers_active(struct srcu_struct *sp) | |
312 | { | |
313 | int cpu; | |
314 | unsigned long sum = 0; | |
315 | ||
316 | for_each_possible_cpu(cpu) { | |
da915ad5 | 317 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
dad81a20 | 318 | |
da915ad5 PM |
319 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
320 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); | |
321 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); | |
322 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); | |
dad81a20 PM |
323 | } |
324 | return sum; | |
325 | } | |
326 | ||
327 | #define SRCU_INTERVAL 1 | |
328 | ||
1e9a038b PM |
329 | /* |
330 | * Return grace-period delay, zero if there are expedited grace | |
331 | * periods pending, SRCU_INTERVAL otherwise. | |
332 | */ | |
333 | static unsigned long srcu_get_delay(struct srcu_struct *sp) | |
334 | { | |
335 | if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), | |
336 | READ_ONCE(sp->srcu_gp_seq_needed_exp))) | |
337 | return 0; | |
338 | return SRCU_INTERVAL; | |
339 | } | |
340 | ||
dad81a20 PM |
341 | /** |
342 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
343 | * @sp: structure to clean up. | |
344 | * | |
345 | * Must invoke this after you are finished using a given srcu_struct that | |
346 | * was initialized via init_srcu_struct(), else you leak memory. | |
347 | */ | |
348 | void cleanup_srcu_struct(struct srcu_struct *sp) | |
349 | { | |
da915ad5 PM |
350 | int cpu; |
351 | ||
1e9a038b PM |
352 | if (WARN_ON(!srcu_get_delay(sp))) |
353 | return; /* Leakage unless caller handles error. */ | |
dad81a20 PM |
354 | if (WARN_ON(srcu_readers_active(sp))) |
355 | return; /* Leakage unless caller handles error. */ | |
dad81a20 | 356 | flush_delayed_work(&sp->work); |
da915ad5 PM |
357 | for_each_possible_cpu(cpu) |
358 | flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); | |
359 | if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || | |
360 | WARN_ON(srcu_readers_active(sp))) { | |
361 | pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); | |
dad81a20 PM |
362 | return; /* Caller forgot to stop doing call_srcu()? */ |
363 | } | |
da915ad5 PM |
364 | free_percpu(sp->sda); |
365 | sp->sda = NULL; | |
dad81a20 PM |
366 | } |
367 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | |
368 | ||
369 | /* | |
370 | * Counts the new reader in the appropriate per-CPU element of the | |
cdf7abc4 | 371 | * srcu_struct. |
dad81a20 PM |
372 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
373 | */ | |
374 | int __srcu_read_lock(struct srcu_struct *sp) | |
375 | { | |
376 | int idx; | |
377 | ||
da915ad5 | 378 | idx = READ_ONCE(sp->srcu_idx) & 0x1; |
cdf7abc4 | 379 | this_cpu_inc(sp->sda->srcu_lock_count[idx]); |
dad81a20 PM |
380 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
381 | return idx; | |
382 | } | |
383 | EXPORT_SYMBOL_GPL(__srcu_read_lock); | |
384 | ||
385 | /* | |
386 | * Removes the count for the old reader from the appropriate per-CPU | |
387 | * element of the srcu_struct. Note that this may well be a different | |
388 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
dad81a20 PM |
389 | */ |
390 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) | |
391 | { | |
392 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | |
da915ad5 | 393 | this_cpu_inc(sp->sda->srcu_unlock_count[idx]); |
dad81a20 PM |
394 | } |
395 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | |
396 | ||
397 | /* | |
398 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
399 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
400 | * (defined below) to allow SRCU readers to exit their read-side critical | |
401 | * sections. If there are still some readers after a few microseconds, | |
402 | * we repeatedly block for 1-millisecond time periods. | |
403 | */ | |
404 | #define SRCU_RETRY_CHECK_DELAY 5 | |
405 | ||
406 | /* | |
407 | * Start an SRCU grace period. | |
408 | */ | |
409 | static void srcu_gp_start(struct srcu_struct *sp) | |
410 | { | |
da915ad5 | 411 | struct srcu_data *sdp = this_cpu_ptr(sp->sda); |
dad81a20 PM |
412 | int state; |
413 | ||
da915ad5 PM |
414 | RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock), |
415 | "Invoked srcu_gp_start() without ->gp_lock!"); | |
416 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); | |
417 | rcu_segcblist_advance(&sdp->srcu_cblist, | |
418 | rcu_seq_current(&sp->srcu_gp_seq)); | |
419 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | |
420 | rcu_seq_snap(&sp->srcu_gp_seq)); | |
2da4b2a7 | 421 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
dad81a20 PM |
422 | rcu_seq_start(&sp->srcu_gp_seq); |
423 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); | |
424 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); | |
425 | } | |
426 | ||
da915ad5 PM |
427 | /* |
428 | * Track online CPUs to guide callback workqueue placement. | |
429 | */ | |
430 | DEFINE_PER_CPU(bool, srcu_online); | |
431 | ||
432 | void srcu_online_cpu(unsigned int cpu) | |
433 | { | |
434 | WRITE_ONCE(per_cpu(srcu_online, cpu), true); | |
435 | } | |
436 | ||
437 | void srcu_offline_cpu(unsigned int cpu) | |
438 | { | |
439 | WRITE_ONCE(per_cpu(srcu_online, cpu), false); | |
440 | } | |
441 | ||
442 | /* | |
443 | * Place the workqueue handler on the specified CPU if online, otherwise | |
444 | * just run it whereever. This is useful for placing workqueue handlers | |
445 | * that are to invoke the specified CPU's callbacks. | |
446 | */ | |
447 | static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |
448 | struct delayed_work *dwork, | |
449 | unsigned long delay) | |
450 | { | |
451 | bool ret; | |
452 | ||
453 | preempt_disable(); | |
454 | if (READ_ONCE(per_cpu(srcu_online, cpu))) | |
455 | ret = queue_delayed_work_on(cpu, wq, dwork, delay); | |
456 | else | |
457 | ret = queue_delayed_work(wq, dwork, delay); | |
458 | preempt_enable(); | |
459 | return ret; | |
460 | } | |
461 | ||
462 | /* | |
463 | * Schedule callback invocation for the specified srcu_data structure, | |
464 | * if possible, on the corresponding CPU. | |
465 | */ | |
466 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | |
467 | { | |
468 | srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, | |
469 | &sdp->work, delay); | |
470 | } | |
471 | ||
472 | /* | |
473 | * Schedule callback invocation for all srcu_data structures associated | |
c7e88067 PM |
474 | * with the specified srcu_node structure that have callbacks for the |
475 | * just-completed grace period, the one corresponding to idx. If possible, | |
476 | * schedule this invocation on the corresponding CPUs. | |
da915ad5 | 477 | */ |
c7e88067 | 478 | static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, |
1e9a038b | 479 | unsigned long mask, unsigned long delay) |
da915ad5 PM |
480 | { |
481 | int cpu; | |
482 | ||
c7e88067 PM |
483 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
484 | if (!(mask & (1 << (cpu - snp->grplo)))) | |
485 | continue; | |
1e9a038b | 486 | srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); |
c7e88067 | 487 | } |
da915ad5 PM |
488 | } |
489 | ||
490 | /* | |
491 | * Note the end of an SRCU grace period. Initiates callback invocation | |
492 | * and starts a new grace period if needed. | |
493 | * | |
494 | * The ->srcu_cb_mutex acquisition does not protect any data, but | |
495 | * instead prevents more than one grace period from starting while we | |
496 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] | |
497 | * array to have a finite number of elements. | |
498 | */ | |
499 | static void srcu_gp_end(struct srcu_struct *sp) | |
500 | { | |
1e9a038b | 501 | unsigned long cbdelay; |
da915ad5 | 502 | bool cbs; |
c350c008 PM |
503 | int cpu; |
504 | unsigned long flags; | |
da915ad5 PM |
505 | unsigned long gpseq; |
506 | int idx; | |
507 | int idxnext; | |
c7e88067 | 508 | unsigned long mask; |
c350c008 | 509 | struct srcu_data *sdp; |
da915ad5 PM |
510 | struct srcu_node *snp; |
511 | ||
512 | /* Prevent more than one additional grace period. */ | |
513 | mutex_lock(&sp->srcu_cb_mutex); | |
514 | ||
515 | /* End the current grace period. */ | |
516 | spin_lock_irq(&sp->gp_lock); | |
517 | idx = rcu_seq_state(sp->srcu_gp_seq); | |
518 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); | |
1e9a038b | 519 | cbdelay = srcu_get_delay(sp); |
22607d66 | 520 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
da915ad5 PM |
521 | rcu_seq_end(&sp->srcu_gp_seq); |
522 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); | |
1e9a038b PM |
523 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) |
524 | sp->srcu_gp_seq_needed_exp = gpseq; | |
da915ad5 PM |
525 | spin_unlock_irq(&sp->gp_lock); |
526 | mutex_unlock(&sp->srcu_gp_mutex); | |
527 | /* A new grace period can start at this point. But only one. */ | |
528 | ||
529 | /* Initiate callback invocation as needed. */ | |
530 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | |
531 | idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); | |
532 | rcu_for_each_node_breadth_first(sp, snp) { | |
533 | spin_lock_irq(&snp->lock); | |
534 | cbs = false; | |
535 | if (snp >= sp->level[rcu_num_lvls - 1]) | |
536 | cbs = snp->srcu_have_cbs[idx] == gpseq; | |
537 | snp->srcu_have_cbs[idx] = gpseq; | |
538 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); | |
1e9a038b PM |
539 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
540 | snp->srcu_gp_seq_needed_exp = gpseq; | |
c7e88067 PM |
541 | mask = snp->srcu_data_have_cbs[idx]; |
542 | snp->srcu_data_have_cbs[idx] = 0; | |
da915ad5 PM |
543 | spin_unlock_irq(&snp->lock); |
544 | if (cbs) { | |
545 | smp_mb(); /* GP end before CB invocation. */ | |
1e9a038b | 546 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); |
da915ad5 | 547 | } |
c350c008 PM |
548 | |
549 | /* Occasionally prevent srcu_data counter wrap. */ | |
550 | if (!(gpseq & counter_wrap_check)) | |
551 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | |
552 | sdp = per_cpu_ptr(sp->sda, cpu); | |
553 | spin_lock_irqsave(&sdp->lock, flags); | |
554 | if (ULONG_CMP_GE(gpseq, | |
555 | sdp->srcu_gp_seq_needed + 100)) | |
556 | sdp->srcu_gp_seq_needed = gpseq; | |
557 | spin_unlock_irqrestore(&sdp->lock, flags); | |
558 | } | |
da915ad5 PM |
559 | } |
560 | ||
561 | /* Callback initiation done, allow grace periods after next. */ | |
562 | mutex_unlock(&sp->srcu_cb_mutex); | |
563 | ||
564 | /* Start a new grace period if needed. */ | |
565 | spin_lock_irq(&sp->gp_lock); | |
566 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); | |
567 | if (!rcu_seq_state(gpseq) && | |
568 | ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { | |
569 | srcu_gp_start(sp); | |
570 | spin_unlock_irq(&sp->gp_lock); | |
571 | /* Throttle expedited grace periods: Should be rare! */ | |
1e9a038b PM |
572 | srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff |
573 | ? 0 : SRCU_INTERVAL); | |
da915ad5 PM |
574 | } else { |
575 | spin_unlock_irq(&sp->gp_lock); | |
576 | } | |
577 | } | |
578 | ||
1e9a038b PM |
579 | /* |
580 | * Funnel-locking scheme to scalably mediate many concurrent expedited | |
581 | * grace-period requests. This function is invoked for the first known | |
582 | * expedited request for a grace period that has already been requested, | |
583 | * but without expediting. To start a completely new grace period, | |
584 | * whether expedited or not, use srcu_funnel_gp_start() instead. | |
585 | */ | |
586 | static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, | |
587 | unsigned long s) | |
588 | { | |
589 | unsigned long flags; | |
590 | ||
591 | for (; snp != NULL; snp = snp->srcu_parent) { | |
592 | if (rcu_seq_done(&sp->srcu_gp_seq, s) || | |
593 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) | |
594 | return; | |
595 | spin_lock_irqsave(&snp->lock, flags); | |
596 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { | |
597 | spin_unlock_irqrestore(&snp->lock, flags); | |
598 | return; | |
599 | } | |
600 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | |
601 | spin_unlock_irqrestore(&snp->lock, flags); | |
602 | } | |
603 | spin_lock_irqsave(&sp->gp_lock, flags); | |
604 | if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) | |
605 | sp->srcu_gp_seq_needed_exp = s; | |
606 | spin_unlock_irqrestore(&sp->gp_lock, flags); | |
607 | } | |
608 | ||
da915ad5 PM |
609 | /* |
610 | * Funnel-locking scheme to scalably mediate many concurrent grace-period | |
611 | * requests. The winner has to do the work of actually starting grace | |
612 | * period s. Losers must either ensure that their desired grace-period | |
613 | * number is recorded on at least their leaf srcu_node structure, or they | |
614 | * must take steps to invoke their own callbacks. | |
615 | */ | |
1e9a038b PM |
616 | static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, |
617 | unsigned long s, bool do_norm) | |
da915ad5 PM |
618 | { |
619 | unsigned long flags; | |
620 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); | |
621 | struct srcu_node *snp = sdp->mynode; | |
622 | unsigned long snp_seq; | |
623 | ||
624 | /* Each pass through the loop does one level of the srcu_node tree. */ | |
625 | for (; snp != NULL; snp = snp->srcu_parent) { | |
626 | if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) | |
627 | return; /* GP already done and CBs recorded. */ | |
628 | spin_lock_irqsave(&snp->lock, flags); | |
629 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { | |
630 | snp_seq = snp->srcu_have_cbs[idx]; | |
c7e88067 PM |
631 | if (snp == sdp->mynode && snp_seq == s) |
632 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
da915ad5 PM |
633 | spin_unlock_irqrestore(&snp->lock, flags); |
634 | if (snp == sdp->mynode && snp_seq != s) { | |
635 | smp_mb(); /* CBs after GP! */ | |
1e9a038b PM |
636 | srcu_schedule_cbs_sdp(sdp, do_norm |
637 | ? SRCU_INTERVAL | |
638 | : 0); | |
639 | return; | |
da915ad5 | 640 | } |
1e9a038b PM |
641 | if (!do_norm) |
642 | srcu_funnel_exp_start(sp, snp, s); | |
da915ad5 PM |
643 | return; |
644 | } | |
645 | snp->srcu_have_cbs[idx] = s; | |
c7e88067 PM |
646 | if (snp == sdp->mynode) |
647 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
1e9a038b PM |
648 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
649 | snp->srcu_gp_seq_needed_exp = s; | |
da915ad5 PM |
650 | spin_unlock_irqrestore(&snp->lock, flags); |
651 | } | |
652 | ||
653 | /* Top of tree, must ensure the grace period will be started. */ | |
654 | spin_lock_irqsave(&sp->gp_lock, flags); | |
655 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { | |
656 | /* | |
657 | * Record need for grace period s. Pair with load | |
658 | * acquire setting up for initialization. | |
659 | */ | |
660 | smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ | |
661 | } | |
1e9a038b PM |
662 | if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
663 | sp->srcu_gp_seq_needed_exp = s; | |
da915ad5 PM |
664 | |
665 | /* If grace period not already done and none in progress, start it. */ | |
666 | if (!rcu_seq_done(&sp->srcu_gp_seq, s) && | |
667 | rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { | |
668 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); | |
669 | srcu_gp_start(sp); | |
670 | queue_delayed_work(system_power_efficient_wq, &sp->work, | |
1e9a038b | 671 | srcu_get_delay(sp)); |
da915ad5 PM |
672 | } |
673 | spin_unlock_irqrestore(&sp->gp_lock, flags); | |
674 | } | |
675 | ||
dad81a20 PM |
676 | /* |
677 | * Wait until all readers counted by array index idx complete, but | |
678 | * loop an additional time if there is an expedited grace period pending. | |
da915ad5 | 679 | * The caller must ensure that ->srcu_idx is not changed while checking. |
dad81a20 PM |
680 | */ |
681 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) | |
682 | { | |
683 | for (;;) { | |
684 | if (srcu_readers_active_idx_check(sp, idx)) | |
685 | return true; | |
1e9a038b | 686 | if (--trycount + !srcu_get_delay(sp) <= 0) |
dad81a20 PM |
687 | return false; |
688 | udelay(SRCU_RETRY_CHECK_DELAY); | |
689 | } | |
690 | } | |
691 | ||
692 | /* | |
da915ad5 PM |
693 | * Increment the ->srcu_idx counter so that future SRCU readers will |
694 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows | |
dad81a20 PM |
695 | * us to wait for pre-existing readers in a starvation-free manner. |
696 | */ | |
697 | static void srcu_flip(struct srcu_struct *sp) | |
698 | { | |
881ec9d2 PM |
699 | /* |
700 | * Ensure that if this updater saw a given reader's increment | |
701 | * from __srcu_read_lock(), that reader was using an old value | |
702 | * of ->srcu_idx. Also ensure that if a given reader sees the | |
703 | * new value of ->srcu_idx, this updater's earlier scans cannot | |
704 | * have seen that reader's increments (which is OK, because this | |
705 | * grace period need not wait on that reader). | |
706 | */ | |
707 | smp_mb(); /* E */ /* Pairs with B and C. */ | |
708 | ||
da915ad5 | 709 | WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); |
dad81a20 PM |
710 | |
711 | /* | |
712 | * Ensure that if the updater misses an __srcu_read_unlock() | |
713 | * increment, that task's next __srcu_read_lock() will see the | |
714 | * above counter update. Note that both this memory barrier | |
715 | * and the one in srcu_readers_active_idx_check() provide the | |
716 | * guarantee for __srcu_read_lock(). | |
717 | */ | |
718 | smp_mb(); /* D */ /* Pairs with C. */ | |
719 | } | |
720 | ||
2da4b2a7 PM |
721 | /* |
722 | * If SRCU is likely idle, return true, otherwise return false. | |
723 | * | |
724 | * Note that it is OK for several current from-idle requests for a new | |
725 | * grace period from idle to specify expediting because they will all end | |
726 | * up requesting the same grace period anyhow. So no loss. | |
727 | * | |
728 | * Note also that if any CPU (including the current one) is still invoking | |
729 | * callbacks, this function will nevertheless say "idle". This is not | |
730 | * ideal, but the overhead of checking all CPUs' callback lists is even | |
731 | * less ideal, especially on large systems. Furthermore, the wakeup | |
732 | * can happen before the callback is fully removed, so we have no choice | |
733 | * but to accept this type of error. | |
734 | * | |
735 | * This function is also subject to counter-wrap errors, but let's face | |
736 | * it, if this function was preempted for enough time for the counters | |
737 | * to wrap, it really doesn't matter whether or not we expedite the grace | |
738 | * period. The extra overhead of a needlessly expedited grace period is | |
739 | * negligible when amoritized over that time period, and the extra latency | |
740 | * of a needlessly non-expedited grace period is similarly negligible. | |
741 | */ | |
742 | static bool srcu_might_be_idle(struct srcu_struct *sp) | |
743 | { | |
22607d66 | 744 | unsigned long curseq; |
2da4b2a7 PM |
745 | unsigned long flags; |
746 | struct srcu_data *sdp; | |
22607d66 | 747 | unsigned long t; |
2da4b2a7 PM |
748 | |
749 | /* If the local srcu_data structure has callbacks, not idle. */ | |
750 | local_irq_save(flags); | |
751 | sdp = this_cpu_ptr(sp->sda); | |
752 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { | |
753 | local_irq_restore(flags); | |
754 | return false; /* Callbacks already present, so not idle. */ | |
755 | } | |
756 | local_irq_restore(flags); | |
757 | ||
758 | /* | |
759 | * No local callbacks, so probabalistically probe global state. | |
760 | * Exact information would require acquiring locks, which would | |
761 | * kill scalability, hence the probabalistic nature of the probe. | |
762 | */ | |
22607d66 PM |
763 | |
764 | /* First, see if enough time has passed since the last GP. */ | |
765 | t = ktime_get_mono_fast_ns(); | |
766 | if (exp_holdoff == 0 || | |
767 | time_in_range_open(t, sp->srcu_last_gp_end, | |
768 | sp->srcu_last_gp_end + exp_holdoff)) | |
769 | return false; /* Too soon after last GP. */ | |
770 | ||
771 | /* Next, check for probable idleness. */ | |
2da4b2a7 PM |
772 | curseq = rcu_seq_current(&sp->srcu_gp_seq); |
773 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ | |
774 | if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) | |
775 | return false; /* Grace period in progress, so not idle. */ | |
776 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | |
777 | if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) | |
778 | return false; /* GP # changed, so not idle. */ | |
779 | return true; /* With reasonable probability, idle! */ | |
780 | } | |
781 | ||
a602538e PM |
782 | /* |
783 | * SRCU callback function to leak a callback. | |
784 | */ | |
785 | static void srcu_leak_callback(struct rcu_head *rhp) | |
786 | { | |
787 | } | |
788 | ||
dad81a20 | 789 | /* |
da915ad5 PM |
790 | * Enqueue an SRCU callback on the srcu_data structure associated with |
791 | * the current CPU and the specified srcu_struct structure, initiating | |
792 | * grace-period processing if it is not already running. | |
dad81a20 PM |
793 | * |
794 | * Note that all CPUs must agree that the grace period extended beyond | |
795 | * all pre-existing SRCU read-side critical section. On systems with | |
796 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
797 | * is guaranteed to have executed a full memory barrier since the end of | |
798 | * its last corresponding SRCU read-side critical section whose beginning | |
799 | * preceded the call to call_rcu(). It also means that each CPU executing | |
800 | * an SRCU read-side critical section that continues beyond the start of | |
801 | * "func()" must have executed a memory barrier after the call_rcu() | |
802 | * but before the beginning of that SRCU read-side critical section. | |
803 | * Note that these guarantees include CPUs that are offline, idle, or | |
804 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
805 | * | |
806 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | |
807 | * resulting SRCU callback function "func()", then both CPU A and CPU | |
808 | * B are guaranteed to execute a full memory barrier during the time | |
809 | * interval between the call to call_rcu() and the invocation of "func()". | |
810 | * This guarantee applies even if CPU A and CPU B are the same CPU (but | |
811 | * again only if the system has more than one CPU). | |
812 | * | |
813 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
814 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
815 | * srcu_struct structure. | |
816 | */ | |
1e9a038b PM |
817 | void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
818 | rcu_callback_t func, bool do_norm) | |
dad81a20 PM |
819 | { |
820 | unsigned long flags; | |
1e9a038b | 821 | bool needexp = false; |
da915ad5 PM |
822 | bool needgp = false; |
823 | unsigned long s; | |
824 | struct srcu_data *sdp; | |
825 | ||
826 | check_init_srcu_struct(sp); | |
a602538e PM |
827 | if (debug_rcu_head_queue(rhp)) { |
828 | /* Probable double call_srcu(), so leak the callback. */ | |
829 | WRITE_ONCE(rhp->func, srcu_leak_callback); | |
830 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); | |
831 | return; | |
832 | } | |
da915ad5 PM |
833 | rhp->func = func; |
834 | local_irq_save(flags); | |
835 | sdp = this_cpu_ptr(sp->sda); | |
836 | spin_lock(&sdp->lock); | |
837 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); | |
838 | rcu_segcblist_advance(&sdp->srcu_cblist, | |
839 | rcu_seq_current(&sp->srcu_gp_seq)); | |
840 | s = rcu_seq_snap(&sp->srcu_gp_seq); | |
841 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); | |
842 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { | |
843 | sdp->srcu_gp_seq_needed = s; | |
844 | needgp = true; | |
dad81a20 | 845 | } |
1e9a038b PM |
846 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
847 | sdp->srcu_gp_seq_needed_exp = s; | |
848 | needexp = true; | |
849 | } | |
da915ad5 PM |
850 | spin_unlock_irqrestore(&sdp->lock, flags); |
851 | if (needgp) | |
1e9a038b PM |
852 | srcu_funnel_gp_start(sp, sdp, s, do_norm); |
853 | else if (needexp) | |
854 | srcu_funnel_exp_start(sp, sdp->mynode, s); | |
855 | } | |
856 | ||
5a0465e1 PM |
857 | /** |
858 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | |
859 | * @sp: srcu_struct in queue the callback | |
860 | * @head: structure to be used for queueing the SRCU callback. | |
861 | * @func: function to be invoked after the SRCU grace period | |
862 | * | |
863 | * The callback function will be invoked some time after a full SRCU | |
864 | * grace period elapses, in other words after all pre-existing SRCU | |
865 | * read-side critical sections have completed. However, the callback | |
866 | * function might well execute concurrently with other SRCU read-side | |
867 | * critical sections that started after call_srcu() was invoked. SRCU | |
868 | * read-side critical sections are delimited by srcu_read_lock() and | |
869 | * srcu_read_unlock(), and may be nested. | |
870 | * | |
871 | * The callback will be invoked from process context, but must nevertheless | |
872 | * be fast and must not block. | |
873 | */ | |
1e9a038b PM |
874 | void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
875 | rcu_callback_t func) | |
876 | { | |
877 | __call_srcu(sp, rhp, func, true); | |
dad81a20 PM |
878 | } |
879 | EXPORT_SYMBOL_GPL(call_srcu); | |
880 | ||
dad81a20 PM |
881 | /* |
882 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
883 | */ | |
1e9a038b | 884 | static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) |
dad81a20 PM |
885 | { |
886 | struct rcu_synchronize rcu; | |
dad81a20 PM |
887 | |
888 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || | |
889 | lock_is_held(&rcu_bh_lock_map) || | |
890 | lock_is_held(&rcu_lock_map) || | |
891 | lock_is_held(&rcu_sched_lock_map), | |
892 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); | |
893 | ||
894 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | |
895 | return; | |
896 | might_sleep(); | |
da915ad5 | 897 | check_init_srcu_struct(sp); |
dad81a20 | 898 | init_completion(&rcu.completion); |
da915ad5 | 899 | init_rcu_head_on_stack(&rcu.head); |
1e9a038b | 900 | __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); |
dad81a20 | 901 | wait_for_completion(&rcu.completion); |
da915ad5 | 902 | destroy_rcu_head_on_stack(&rcu.head); |
dad81a20 PM |
903 | } |
904 | ||
905 | /** | |
906 | * synchronize_srcu_expedited - Brute-force SRCU grace period | |
907 | * @sp: srcu_struct with which to synchronize. | |
908 | * | |
909 | * Wait for an SRCU grace period to elapse, but be more aggressive about | |
910 | * spinning rather than blocking when waiting. | |
911 | * | |
912 | * Note that synchronize_srcu_expedited() has the same deadlock and | |
913 | * memory-ordering properties as does synchronize_srcu(). | |
914 | */ | |
915 | void synchronize_srcu_expedited(struct srcu_struct *sp) | |
916 | { | |
1e9a038b | 917 | __synchronize_srcu(sp, rcu_gp_is_normal()); |
dad81a20 PM |
918 | } |
919 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
920 | ||
921 | /** | |
922 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
923 | * @sp: srcu_struct with which to synchronize. | |
924 | * | |
925 | * Wait for the count to drain to zero of both indexes. To avoid the | |
926 | * possible starvation of synchronize_srcu(), it waits for the count of | |
da915ad5 PM |
927 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
928 | * and then flip the srcu_idx and wait for the count of the other index. | |
dad81a20 PM |
929 | * |
930 | * Can block; must be called from process context. | |
931 | * | |
932 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
933 | * SRCU read-side critical section; doing so will result in deadlock. | |
934 | * However, it is perfectly legal to call synchronize_srcu() on one | |
935 | * srcu_struct from some other srcu_struct's read-side critical section, | |
936 | * as long as the resulting graph of srcu_structs is acyclic. | |
937 | * | |
938 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
939 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
940 | * each CPU is guaranteed to have executed a full memory barrier since | |
941 | * the end of its last corresponding SRCU-sched read-side critical section | |
942 | * whose beginning preceded the call to synchronize_srcu(). In addition, | |
943 | * each CPU having an SRCU read-side critical section that extends beyond | |
944 | * the return from synchronize_srcu() is guaranteed to have executed a | |
945 | * full memory barrier after the beginning of synchronize_srcu() and before | |
946 | * the beginning of that SRCU read-side critical section. Note that these | |
947 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
948 | * as well as CPUs that are executing in the kernel. | |
949 | * | |
950 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
951 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
952 | * to have executed a full memory barrier during the execution of | |
953 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
954 | * are the same CPU, but again only if the system has more than one CPU. | |
955 | * | |
956 | * Of course, these memory-ordering guarantees apply only when | |
957 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
958 | * passed the same srcu_struct structure. | |
2da4b2a7 PM |
959 | * |
960 | * If SRCU is likely idle, expedite the first request. This semantic | |
961 | * was provided by Classic SRCU, and is relied upon by its users, so TREE | |
962 | * SRCU must also provide it. Note that detecting idleness is heuristic | |
963 | * and subject to both false positives and negatives. | |
dad81a20 PM |
964 | */ |
965 | void synchronize_srcu(struct srcu_struct *sp) | |
966 | { | |
2da4b2a7 | 967 | if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) |
dad81a20 PM |
968 | synchronize_srcu_expedited(sp); |
969 | else | |
1e9a038b | 970 | __synchronize_srcu(sp, true); |
dad81a20 PM |
971 | } |
972 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
973 | ||
da915ad5 PM |
974 | /* |
975 | * Callback function for srcu_barrier() use. | |
976 | */ | |
977 | static void srcu_barrier_cb(struct rcu_head *rhp) | |
978 | { | |
979 | struct srcu_data *sdp; | |
980 | struct srcu_struct *sp; | |
981 | ||
982 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | |
983 | sp = sdp->sp; | |
984 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) | |
985 | complete(&sp->srcu_barrier_completion); | |
986 | } | |
987 | ||
dad81a20 PM |
988 | /** |
989 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
990 | * @sp: srcu_struct on which to wait for in-flight callbacks. | |
991 | */ | |
992 | void srcu_barrier(struct srcu_struct *sp) | |
993 | { | |
da915ad5 PM |
994 | int cpu; |
995 | struct srcu_data *sdp; | |
996 | unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); | |
997 | ||
998 | check_init_srcu_struct(sp); | |
999 | mutex_lock(&sp->srcu_barrier_mutex); | |
1000 | if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { | |
1001 | smp_mb(); /* Force ordering following return. */ | |
1002 | mutex_unlock(&sp->srcu_barrier_mutex); | |
1003 | return; /* Someone else did our work for us. */ | |
1004 | } | |
1005 | rcu_seq_start(&sp->srcu_barrier_seq); | |
1006 | init_completion(&sp->srcu_barrier_completion); | |
1007 | ||
1008 | /* Initial count prevents reaching zero until all CBs are posted. */ | |
1009 | atomic_set(&sp->srcu_barrier_cpu_cnt, 1); | |
1010 | ||
1011 | /* | |
1012 | * Each pass through this loop enqueues a callback, but only | |
1013 | * on CPUs already having callbacks enqueued. Note that if | |
1014 | * a CPU already has callbacks enqueue, it must have already | |
1015 | * registered the need for a future grace period, so all we | |
1016 | * need do is enqueue a callback that will use the same | |
1017 | * grace period as the last callback already in the queue. | |
1018 | */ | |
1019 | for_each_possible_cpu(cpu) { | |
1020 | sdp = per_cpu_ptr(sp->sda, cpu); | |
1021 | spin_lock_irq(&sdp->lock); | |
1022 | atomic_inc(&sp->srcu_barrier_cpu_cnt); | |
1023 | sdp->srcu_barrier_head.func = srcu_barrier_cb; | |
a602538e | 1024 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
da915ad5 | 1025 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
a602538e PM |
1026 | &sdp->srcu_barrier_head, 0)) { |
1027 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); | |
da915ad5 | 1028 | atomic_dec(&sp->srcu_barrier_cpu_cnt); |
a602538e | 1029 | } |
da915ad5 PM |
1030 | spin_unlock_irq(&sdp->lock); |
1031 | } | |
1032 | ||
1033 | /* Remove the initial count, at which point reaching zero can happen. */ | |
1034 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) | |
1035 | complete(&sp->srcu_barrier_completion); | |
1036 | wait_for_completion(&sp->srcu_barrier_completion); | |
1037 | ||
1038 | rcu_seq_end(&sp->srcu_barrier_seq); | |
1039 | mutex_unlock(&sp->srcu_barrier_mutex); | |
dad81a20 PM |
1040 | } |
1041 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
1042 | ||
1043 | /** | |
1044 | * srcu_batches_completed - return batches completed. | |
1045 | * @sp: srcu_struct on which to report batch completion. | |
1046 | * | |
1047 | * Report the number of batches, correlated with, but not necessarily | |
1048 | * precisely the same as, the number of grace periods that have elapsed. | |
1049 | */ | |
1050 | unsigned long srcu_batches_completed(struct srcu_struct *sp) | |
1051 | { | |
da915ad5 | 1052 | return sp->srcu_idx; |
dad81a20 PM |
1053 | } |
1054 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | |
1055 | ||
1056 | /* | |
da915ad5 PM |
1057 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
1058 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | |
1059 | * completed in that state. | |
dad81a20 | 1060 | */ |
da915ad5 | 1061 | static void srcu_advance_state(struct srcu_struct *sp) |
dad81a20 PM |
1062 | { |
1063 | int idx; | |
1064 | ||
da915ad5 PM |
1065 | mutex_lock(&sp->srcu_gp_mutex); |
1066 | ||
dad81a20 PM |
1067 | /* |
1068 | * Because readers might be delayed for an extended period after | |
da915ad5 | 1069 | * fetching ->srcu_idx for their index, at any point in time there |
dad81a20 PM |
1070 | * might well be readers using both idx=0 and idx=1. We therefore |
1071 | * need to wait for readers to clear from both index values before | |
1072 | * invoking a callback. | |
1073 | * | |
1074 | * The load-acquire ensures that we see the accesses performed | |
1075 | * by the prior grace period. | |
1076 | */ | |
1077 | idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ | |
1078 | if (idx == SRCU_STATE_IDLE) { | |
da915ad5 PM |
1079 | spin_lock_irq(&sp->gp_lock); |
1080 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { | |
1081 | WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); | |
1082 | spin_unlock_irq(&sp->gp_lock); | |
1083 | mutex_unlock(&sp->srcu_gp_mutex); | |
dad81a20 PM |
1084 | return; |
1085 | } | |
1086 | idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); | |
1087 | if (idx == SRCU_STATE_IDLE) | |
1088 | srcu_gp_start(sp); | |
da915ad5 PM |
1089 | spin_unlock_irq(&sp->gp_lock); |
1090 | if (idx != SRCU_STATE_IDLE) { | |
1091 | mutex_unlock(&sp->srcu_gp_mutex); | |
dad81a20 | 1092 | return; /* Someone else started the grace period. */ |
da915ad5 | 1093 | } |
dad81a20 PM |
1094 | } |
1095 | ||
1096 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { | |
da915ad5 PM |
1097 | idx = 1 ^ (sp->srcu_idx & 1); |
1098 | if (!try_check_zero(sp, idx, 1)) { | |
1099 | mutex_unlock(&sp->srcu_gp_mutex); | |
dad81a20 | 1100 | return; /* readers present, retry later. */ |
da915ad5 | 1101 | } |
dad81a20 PM |
1102 | srcu_flip(sp); |
1103 | rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); | |
1104 | } | |
1105 | ||
1106 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { | |
1107 | ||
1108 | /* | |
1109 | * SRCU read-side critical sections are normally short, | |
1110 | * so check at least twice in quick succession after a flip. | |
1111 | */ | |
da915ad5 PM |
1112 | idx = 1 ^ (sp->srcu_idx & 1); |
1113 | if (!try_check_zero(sp, idx, 2)) { | |
1114 | mutex_unlock(&sp->srcu_gp_mutex); | |
1115 | return; /* readers present, retry later. */ | |
1116 | } | |
1117 | srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ | |
dad81a20 PM |
1118 | } |
1119 | } | |
1120 | ||
1121 | /* | |
1122 | * Invoke a limited number of SRCU callbacks that have passed through | |
1123 | * their grace period. If there are more to do, SRCU will reschedule | |
1124 | * the workqueue. Note that needed memory barriers have been executed | |
1125 | * in this task's context by srcu_readers_active_idx_check(). | |
1126 | */ | |
da915ad5 | 1127 | static void srcu_invoke_callbacks(struct work_struct *work) |
dad81a20 | 1128 | { |
da915ad5 | 1129 | bool more; |
dad81a20 PM |
1130 | struct rcu_cblist ready_cbs; |
1131 | struct rcu_head *rhp; | |
da915ad5 PM |
1132 | struct srcu_data *sdp; |
1133 | struct srcu_struct *sp; | |
dad81a20 | 1134 | |
da915ad5 PM |
1135 | sdp = container_of(work, struct srcu_data, work.work); |
1136 | sp = sdp->sp; | |
dad81a20 | 1137 | rcu_cblist_init(&ready_cbs); |
da915ad5 PM |
1138 | spin_lock_irq(&sdp->lock); |
1139 | smp_mb(); /* Old grace periods before callback invocation! */ | |
1140 | rcu_segcblist_advance(&sdp->srcu_cblist, | |
1141 | rcu_seq_current(&sp->srcu_gp_seq)); | |
1142 | if (sdp->srcu_cblist_invoking || | |
1143 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | |
1144 | spin_unlock_irq(&sdp->lock); | |
1145 | return; /* Someone else on the job or nothing to do. */ | |
1146 | } | |
1147 | ||
1148 | /* We are on the job! Extract and invoke ready callbacks. */ | |
1149 | sdp->srcu_cblist_invoking = true; | |
1150 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); | |
1151 | spin_unlock_irq(&sdp->lock); | |
dad81a20 PM |
1152 | rhp = rcu_cblist_dequeue(&ready_cbs); |
1153 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { | |
a602538e | 1154 | debug_rcu_head_unqueue(rhp); |
dad81a20 PM |
1155 | local_bh_disable(); |
1156 | rhp->func(rhp); | |
1157 | local_bh_enable(); | |
1158 | } | |
da915ad5 PM |
1159 | |
1160 | /* | |
1161 | * Update counts, accelerate new callbacks, and if needed, | |
1162 | * schedule another round of callback invocation. | |
1163 | */ | |
1164 | spin_lock_irq(&sdp->lock); | |
1165 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); | |
1166 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | |
1167 | rcu_seq_snap(&sp->srcu_gp_seq)); | |
1168 | sdp->srcu_cblist_invoking = false; | |
1169 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | |
1170 | spin_unlock_irq(&sdp->lock); | |
1171 | if (more) | |
1172 | srcu_schedule_cbs_sdp(sdp, 0); | |
dad81a20 PM |
1173 | } |
1174 | ||
1175 | /* | |
1176 | * Finished one round of SRCU grace period. Start another if there are | |
1177 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
1178 | */ | |
1179 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) | |
1180 | { | |
da915ad5 | 1181 | bool pushgp = true; |
dad81a20 | 1182 | |
da915ad5 PM |
1183 | spin_lock_irq(&sp->gp_lock); |
1184 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { | |
1185 | if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { | |
1186 | /* All requests fulfilled, time to go idle. */ | |
1187 | pushgp = false; | |
1188 | } | |
1189 | } else if (!rcu_seq_state(sp->srcu_gp_seq)) { | |
1190 | /* Outstanding request and no GP. Start one. */ | |
1191 | srcu_gp_start(sp); | |
dad81a20 | 1192 | } |
da915ad5 | 1193 | spin_unlock_irq(&sp->gp_lock); |
dad81a20 | 1194 | |
da915ad5 | 1195 | if (pushgp) |
dad81a20 PM |
1196 | queue_delayed_work(system_power_efficient_wq, &sp->work, delay); |
1197 | } | |
1198 | ||
1199 | /* | |
1200 | * This is the work-queue function that handles SRCU grace periods. | |
1201 | */ | |
1202 | void process_srcu(struct work_struct *work) | |
1203 | { | |
1204 | struct srcu_struct *sp; | |
1205 | ||
1206 | sp = container_of(work, struct srcu_struct, work.work); | |
1207 | ||
da915ad5 | 1208 | srcu_advance_state(sp); |
1e9a038b | 1209 | srcu_reschedule(sp, srcu_get_delay(sp)); |
dad81a20 PM |
1210 | } |
1211 | EXPORT_SYMBOL_GPL(process_srcu); | |
7f6733c3 PM |
1212 | |
1213 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
1e9a038b PM |
1214 | struct srcu_struct *sp, int *flags, |
1215 | unsigned long *gpnum, unsigned long *completed) | |
7f6733c3 PM |
1216 | { |
1217 | if (test_type != SRCU_FLAVOR) | |
1218 | return; | |
1219 | *flags = 0; | |
1220 | *completed = rcu_seq_ctr(sp->srcu_gp_seq); | |
1221 | *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); | |
1222 | } | |
1223 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | |
1f4f6da1 PM |
1224 | |
1225 | static int __init srcu_bootup_announce(void) | |
1226 | { | |
1227 | pr_info("Hierarchical SRCU implementation.\n"); | |
0c8e0e3c PM |
1228 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
1229 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); | |
1f4f6da1 PM |
1230 | return 0; |
1231 | } | |
1232 | early_initcall(srcu_bootup_announce); |