]>
Commit | Line | Data |
---|---|---|
e7ee1501 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
dad81a20 PM |
2 | /* |
3 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
4 | * | |
dad81a20 PM |
5 | * Copyright (C) IBM Corporation, 2006 |
6 | * Copyright (C) Fujitsu, 2012 | |
7 | * | |
65bb0dc4 | 8 | * Authors: Paul McKenney <paulmck@linux.ibm.com> |
dad81a20 PM |
9 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
10 | * | |
11 | * For detailed explanation of Read-Copy Update mechanism see - | |
12 | * Documentation/RCU/ *.txt | |
13 | * | |
14 | */ | |
15 | ||
a7538352 JP |
16 | #define pr_fmt(fmt) "rcu: " fmt |
17 | ||
dad81a20 PM |
18 | #include <linux/export.h> |
19 | #include <linux/mutex.h> | |
20 | #include <linux/percpu.h> | |
21 | #include <linux/preempt.h> | |
22 | #include <linux/rcupdate_wait.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/delay.h> | |
22607d66 | 26 | #include <linux/module.h> |
dad81a20 PM |
27 | #include <linux/srcu.h> |
28 | ||
dad81a20 | 29 | #include "rcu.h" |
45753c5f | 30 | #include "rcu_segcblist.h" |
dad81a20 | 31 | |
0c8e0e3c PM |
32 | /* Holdoff in nanoseconds for auto-expediting. */ |
33 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) | |
34 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; | |
22607d66 PM |
35 | module_param(exp_holdoff, ulong, 0444); |
36 | ||
c350c008 PM |
37 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
38 | static ulong counter_wrap_check = (ULONG_MAX >> 2); | |
39 | module_param(counter_wrap_check, ulong, 0444); | |
40 | ||
e0fcba9a PM |
41 | /* Early-boot callback-management, so early that no lock is required! */ |
42 | static LIST_HEAD(srcu_boot_list); | |
43 | static bool __read_mostly srcu_init_done; | |
44 | ||
da915ad5 | 45 | static void srcu_invoke_callbacks(struct work_struct *work); |
aacb5d91 | 46 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
0d8a1e83 | 47 | static void process_srcu(struct work_struct *work); |
e81baf4c | 48 | static void srcu_delay_timer(struct timer_list *t); |
da915ad5 | 49 | |
d6331980 PM |
50 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
51 | #define spin_lock_rcu_node(p) \ | |
52 | do { \ | |
53 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ | |
54 | smp_mb__after_unlock_lock(); \ | |
55 | } while (0) | |
56 | ||
57 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) | |
58 | ||
59 | #define spin_lock_irq_rcu_node(p) \ | |
60 | do { \ | |
61 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
62 | smp_mb__after_unlock_lock(); \ | |
63 | } while (0) | |
64 | ||
65 | #define spin_unlock_irq_rcu_node(p) \ | |
66 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) | |
67 | ||
68 | #define spin_lock_irqsave_rcu_node(p, flags) \ | |
69 | do { \ | |
70 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ | |
71 | smp_mb__after_unlock_lock(); \ | |
72 | } while (0) | |
73 | ||
74 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ | |
75 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ | |
76 | ||
da915ad5 PM |
77 | /* |
78 | * Initialize SRCU combining tree. Note that statically allocated | |
79 | * srcu_struct structures might already have srcu_read_lock() and | |
80 | * srcu_read_unlock() running against them. So if the is_static parameter | |
81 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | |
82 | */ | |
94df76a1 | 83 | static void init_srcu_struct_nodes(struct srcu_struct *ssp) |
dad81a20 | 84 | { |
da915ad5 PM |
85 | int cpu; |
86 | int i; | |
87 | int level = 0; | |
88 | int levelspread[RCU_NUM_LVLS]; | |
89 | struct srcu_data *sdp; | |
90 | struct srcu_node *snp; | |
91 | struct srcu_node *snp_first; | |
92 | ||
b5befe84 FW |
93 | /* Initialize geometry if it has not already been initialized. */ |
94 | rcu_init_geometry(); | |
95 | ||
da915ad5 | 96 | /* Work out the overall tree geometry. */ |
aacb5d91 | 97 | ssp->level[0] = &ssp->node[0]; |
da915ad5 | 98 | for (i = 1; i < rcu_num_lvls; i++) |
aacb5d91 | 99 | ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
da915ad5 PM |
100 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
101 | ||
102 | /* Each pass through this loop initializes one srcu_node structure. */ | |
aacb5d91 | 103 | srcu_for_each_node_breadth_first(ssp, snp) { |
d6331980 | 104 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
c7e88067 PM |
105 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
106 | ARRAY_SIZE(snp->srcu_data_have_cbs)); | |
107 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { | |
da915ad5 | 108 | snp->srcu_have_cbs[i] = 0; |
c7e88067 PM |
109 | snp->srcu_data_have_cbs[i] = 0; |
110 | } | |
1e9a038b | 111 | snp->srcu_gp_seq_needed_exp = 0; |
da915ad5 PM |
112 | snp->grplo = -1; |
113 | snp->grphi = -1; | |
aacb5d91 | 114 | if (snp == &ssp->node[0]) { |
da915ad5 PM |
115 | /* Root node, special case. */ |
116 | snp->srcu_parent = NULL; | |
117 | continue; | |
118 | } | |
119 | ||
120 | /* Non-root node. */ | |
aacb5d91 | 121 | if (snp == ssp->level[level + 1]) |
da915ad5 | 122 | level++; |
aacb5d91 PM |
123 | snp->srcu_parent = ssp->level[level - 1] + |
124 | (snp - ssp->level[level]) / | |
da915ad5 PM |
125 | levelspread[level - 1]; |
126 | } | |
127 | ||
128 | /* | |
129 | * Initialize the per-CPU srcu_data array, which feeds into the | |
130 | * leaves of the srcu_node tree. | |
131 | */ | |
132 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | |
133 | ARRAY_SIZE(sdp->srcu_unlock_count)); | |
134 | level = rcu_num_lvls - 1; | |
aacb5d91 | 135 | snp_first = ssp->level[level]; |
da915ad5 | 136 | for_each_possible_cpu(cpu) { |
aacb5d91 | 137 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 138 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
da915ad5 PM |
139 | rcu_segcblist_init(&sdp->srcu_cblist); |
140 | sdp->srcu_cblist_invoking = false; | |
aacb5d91 PM |
141 | sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
142 | sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; | |
da915ad5 PM |
143 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
144 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | |
145 | if (snp->grplo < 0) | |
146 | snp->grplo = cpu; | |
147 | snp->grphi = cpu; | |
148 | } | |
149 | sdp->cpu = cpu; | |
e81baf4c SAS |
150 | INIT_WORK(&sdp->work, srcu_invoke_callbacks); |
151 | timer_setup(&sdp->delay_work, srcu_delay_timer, 0); | |
aacb5d91 | 152 | sdp->ssp = ssp; |
c7e88067 | 153 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
da915ad5 PM |
154 | } |
155 | } | |
156 | ||
157 | /* | |
158 | * Initialize non-compile-time initialized fields, including the | |
159 | * associated srcu_node and srcu_data structures. The is_static | |
160 | * parameter is passed through to init_srcu_struct_nodes(), and | |
161 | * also tells us that ->sda has already been wired up to srcu_data. | |
162 | */ | |
aacb5d91 | 163 | static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
da915ad5 | 164 | { |
aacb5d91 PM |
165 | mutex_init(&ssp->srcu_cb_mutex); |
166 | mutex_init(&ssp->srcu_gp_mutex); | |
167 | ssp->srcu_idx = 0; | |
168 | ssp->srcu_gp_seq = 0; | |
169 | ssp->srcu_barrier_seq = 0; | |
170 | mutex_init(&ssp->srcu_barrier_mutex); | |
171 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); | |
172 | INIT_DELAYED_WORK(&ssp->work, process_srcu); | |
da915ad5 | 173 | if (!is_static) |
aacb5d91 | 174 | ssp->sda = alloc_percpu(struct srcu_data); |
50edb988 PM |
175 | if (!ssp->sda) |
176 | return -ENOMEM; | |
94df76a1 | 177 | init_srcu_struct_nodes(ssp); |
aacb5d91 PM |
178 | ssp->srcu_gp_seq_needed_exp = 0; |
179 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); | |
180 | smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ | |
50edb988 | 181 | return 0; |
dad81a20 PM |
182 | } |
183 | ||
184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
185 | ||
aacb5d91 | 186 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
dad81a20 PM |
187 | struct lock_class_key *key) |
188 | { | |
189 | /* Don't re-initialize a lock while it is held. */ | |
aacb5d91 PM |
190 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
191 | lockdep_init_map(&ssp->dep_map, name, key, 0); | |
192 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); | |
193 | return init_srcu_struct_fields(ssp, false); | |
dad81a20 PM |
194 | } |
195 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
196 | ||
197 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
198 | ||
199 | /** | |
200 | * init_srcu_struct - initialize a sleep-RCU structure | |
aacb5d91 | 201 | * @ssp: structure to initialize. |
dad81a20 PM |
202 | * |
203 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
204 | * to any other function. Each srcu_struct represents a separate domain | |
205 | * of SRCU protection. | |
206 | */ | |
aacb5d91 | 207 | int init_srcu_struct(struct srcu_struct *ssp) |
dad81a20 | 208 | { |
aacb5d91 PM |
209 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
210 | return init_srcu_struct_fields(ssp, false); | |
dad81a20 PM |
211 | } |
212 | EXPORT_SYMBOL_GPL(init_srcu_struct); | |
213 | ||
214 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
215 | ||
216 | /* | |
da915ad5 PM |
217 | * First-use initialization of statically allocated srcu_struct |
218 | * structure. Wiring up the combining tree is more than can be | |
219 | * done with compile-time initialization, so this check is added | |
aacb5d91 | 220 | * to each update-side SRCU primitive. Use ssp->lock, which -is- |
da915ad5 PM |
221 | * compile-time initialized, to resolve races involving multiple |
222 | * CPUs trying to garner first-use privileges. | |
223 | */ | |
aacb5d91 | 224 | static void check_init_srcu_struct(struct srcu_struct *ssp) |
da915ad5 PM |
225 | { |
226 | unsigned long flags; | |
227 | ||
da915ad5 | 228 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
aacb5d91 | 229 | if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
da915ad5 | 230 | return; /* Already initialized. */ |
aacb5d91 PM |
231 | spin_lock_irqsave_rcu_node(ssp, flags); |
232 | if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { | |
233 | spin_unlock_irqrestore_rcu_node(ssp, flags); | |
da915ad5 PM |
234 | return; |
235 | } | |
aacb5d91 PM |
236 | init_srcu_struct_fields(ssp, true); |
237 | spin_unlock_irqrestore_rcu_node(ssp, flags); | |
da915ad5 PM |
238 | } |
239 | ||
240 | /* | |
241 | * Returns approximate total of the readers' ->srcu_lock_count[] values | |
242 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 | 243 | */ |
aacb5d91 | 244 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
245 | { |
246 | int cpu; | |
247 | unsigned long sum = 0; | |
248 | ||
249 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 250 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 251 | |
da915ad5 | 252 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
dad81a20 PM |
253 | } |
254 | return sum; | |
255 | } | |
256 | ||
257 | /* | |
da915ad5 PM |
258 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
259 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 | 260 | */ |
aacb5d91 | 261 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
262 | { |
263 | int cpu; | |
264 | unsigned long sum = 0; | |
265 | ||
266 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 267 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 268 | |
da915ad5 | 269 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
dad81a20 PM |
270 | } |
271 | return sum; | |
272 | } | |
273 | ||
274 | /* | |
275 | * Return true if the number of pre-existing readers is determined to | |
276 | * be zero. | |
277 | */ | |
aacb5d91 | 278 | static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
279 | { |
280 | unsigned long unlocks; | |
281 | ||
aacb5d91 | 282 | unlocks = srcu_readers_unlock_idx(ssp, idx); |
dad81a20 PM |
283 | |
284 | /* | |
285 | * Make sure that a lock is always counted if the corresponding | |
286 | * unlock is counted. Needs to be a smp_mb() as the read side may | |
287 | * contain a read from a variable that is written to before the | |
288 | * synchronize_srcu() in the write side. In this case smp_mb()s | |
289 | * A and B act like the store buffering pattern. | |
290 | * | |
291 | * This smp_mb() also pairs with smp_mb() C to prevent accesses | |
292 | * after the synchronize_srcu() from being executed before the | |
293 | * grace period ends. | |
294 | */ | |
295 | smp_mb(); /* A */ | |
296 | ||
297 | /* | |
298 | * If the locks are the same as the unlocks, then there must have | |
299 | * been no readers on this index at some time in between. This does | |
300 | * not mean that there are no more readers, as one could have read | |
301 | * the current index but not have incremented the lock counter yet. | |
302 | * | |
881ec9d2 PM |
303 | * So suppose that the updater is preempted here for so long |
304 | * that more than ULONG_MAX non-nested readers come and go in | |
305 | * the meantime. It turns out that this cannot result in overflow | |
306 | * because if a reader modifies its unlock count after we read it | |
307 | * above, then that reader's next load of ->srcu_idx is guaranteed | |
308 | * to get the new value, which will cause it to operate on the | |
309 | * other bank of counters, where it cannot contribute to the | |
310 | * overflow of these counters. This means that there is a maximum | |
311 | * of 2*NR_CPUS increments, which cannot overflow given current | |
312 | * systems, especially not on 64-bit systems. | |
313 | * | |
314 | * OK, how about nesting? This does impose a limit on nesting | |
315 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | |
316 | * especially on 64-bit systems. | |
dad81a20 | 317 | */ |
aacb5d91 | 318 | return srcu_readers_lock_idx(ssp, idx) == unlocks; |
dad81a20 PM |
319 | } |
320 | ||
321 | /** | |
322 | * srcu_readers_active - returns true if there are readers. and false | |
323 | * otherwise | |
aacb5d91 | 324 | * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
dad81a20 PM |
325 | * |
326 | * Note that this is not an atomic primitive, and can therefore suffer | |
327 | * severe errors when invoked on an active srcu_struct. That said, it | |
328 | * can be useful as an error check at cleanup time. | |
329 | */ | |
aacb5d91 | 330 | static bool srcu_readers_active(struct srcu_struct *ssp) |
dad81a20 PM |
331 | { |
332 | int cpu; | |
333 | unsigned long sum = 0; | |
334 | ||
335 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 336 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 337 | |
da915ad5 PM |
338 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
339 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); | |
340 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); | |
341 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); | |
dad81a20 PM |
342 | } |
343 | return sum; | |
344 | } | |
345 | ||
346 | #define SRCU_INTERVAL 1 | |
347 | ||
1e9a038b PM |
348 | /* |
349 | * Return grace-period delay, zero if there are expedited grace | |
350 | * periods pending, SRCU_INTERVAL otherwise. | |
351 | */ | |
aacb5d91 | 352 | static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
1e9a038b | 353 | { |
aacb5d91 PM |
354 | if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
355 | READ_ONCE(ssp->srcu_gp_seq_needed_exp))) | |
1e9a038b PM |
356 | return 0; |
357 | return SRCU_INTERVAL; | |
358 | } | |
359 | ||
f5ad3991 PM |
360 | /** |
361 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
362 | * @ssp: structure to clean up. | |
363 | * | |
364 | * Must invoke this after you are finished using a given srcu_struct that | |
365 | * was initialized via init_srcu_struct(), else you leak memory. | |
366 | */ | |
367 | void cleanup_srcu_struct(struct srcu_struct *ssp) | |
dad81a20 | 368 | { |
da915ad5 PM |
369 | int cpu; |
370 | ||
aacb5d91 | 371 | if (WARN_ON(!srcu_get_delay(ssp))) |
f7194ac3 | 372 | return; /* Just leak it! */ |
aacb5d91 | 373 | if (WARN_ON(srcu_readers_active(ssp))) |
f7194ac3 | 374 | return; /* Just leak it! */ |
f5ad3991 | 375 | flush_delayed_work(&ssp->work); |
e81baf4c SAS |
376 | for_each_possible_cpu(cpu) { |
377 | struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); | |
378 | ||
f5ad3991 PM |
379 | del_timer_sync(&sdp->delay_work); |
380 | flush_work(&sdp->work); | |
5cdfd174 PM |
381 | if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) |
382 | return; /* Forgot srcu_barrier(), so just leak it! */ | |
e81baf4c | 383 | } |
aacb5d91 PM |
384 | if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
385 | WARN_ON(srcu_readers_active(ssp))) { | |
a7538352 | 386 | pr_info("%s: Active srcu_struct %p state: %d\n", |
aacb5d91 | 387 | __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
dad81a20 PM |
388 | return; /* Caller forgot to stop doing call_srcu()? */ |
389 | } | |
aacb5d91 PM |
390 | free_percpu(ssp->sda); |
391 | ssp->sda = NULL; | |
dad81a20 | 392 | } |
f5ad3991 | 393 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
dad81a20 PM |
394 | |
395 | /* | |
396 | * Counts the new reader in the appropriate per-CPU element of the | |
cdf7abc4 | 397 | * srcu_struct. |
dad81a20 PM |
398 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
399 | */ | |
aacb5d91 | 400 | int __srcu_read_lock(struct srcu_struct *ssp) |
dad81a20 PM |
401 | { |
402 | int idx; | |
403 | ||
aacb5d91 PM |
404 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
405 | this_cpu_inc(ssp->sda->srcu_lock_count[idx]); | |
dad81a20 PM |
406 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
407 | return idx; | |
408 | } | |
409 | EXPORT_SYMBOL_GPL(__srcu_read_lock); | |
410 | ||
411 | /* | |
412 | * Removes the count for the old reader from the appropriate per-CPU | |
413 | * element of the srcu_struct. Note that this may well be a different | |
414 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
dad81a20 | 415 | */ |
aacb5d91 | 416 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
417 | { |
418 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | |
aacb5d91 | 419 | this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
dad81a20 PM |
420 | } |
421 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | |
422 | ||
423 | /* | |
424 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
425 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
426 | * (defined below) to allow SRCU readers to exit their read-side critical | |
427 | * sections. If there are still some readers after a few microseconds, | |
428 | * we repeatedly block for 1-millisecond time periods. | |
429 | */ | |
430 | #define SRCU_RETRY_CHECK_DELAY 5 | |
431 | ||
432 | /* | |
433 | * Start an SRCU grace period. | |
434 | */ | |
aacb5d91 | 435 | static void srcu_gp_start(struct srcu_struct *ssp) |
dad81a20 | 436 | { |
aacb5d91 | 437 | struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
dad81a20 PM |
438 | int state; |
439 | ||
aacb5d91 PM |
440 | lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
441 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); | |
eb4c2382 | 442 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
da915ad5 | 443 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 | 444 | rcu_seq_current(&ssp->srcu_gp_seq)); |
da915ad5 | 445 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
aacb5d91 | 446 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
eb4c2382 | 447 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
2da4b2a7 | 448 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
aacb5d91 | 449 | rcu_seq_start(&ssp->srcu_gp_seq); |
71042606 | 450 | state = rcu_seq_state(ssp->srcu_gp_seq); |
dad81a20 PM |
451 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
452 | } | |
453 | ||
da915ad5 | 454 | |
e81baf4c | 455 | static void srcu_delay_timer(struct timer_list *t) |
da915ad5 | 456 | { |
e81baf4c | 457 | struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); |
da915ad5 | 458 | |
e81baf4c | 459 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
da915ad5 PM |
460 | } |
461 | ||
e81baf4c | 462 | static void srcu_queue_delayed_work_on(struct srcu_data *sdp, |
da915ad5 PM |
463 | unsigned long delay) |
464 | { | |
e81baf4c SAS |
465 | if (!delay) { |
466 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); | |
467 | return; | |
468 | } | |
da915ad5 | 469 | |
e81baf4c | 470 | timer_reduce(&sdp->delay_work, jiffies + delay); |
da915ad5 PM |
471 | } |
472 | ||
473 | /* | |
474 | * Schedule callback invocation for the specified srcu_data structure, | |
475 | * if possible, on the corresponding CPU. | |
476 | */ | |
477 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | |
478 | { | |
e81baf4c | 479 | srcu_queue_delayed_work_on(sdp, delay); |
da915ad5 PM |
480 | } |
481 | ||
482 | /* | |
483 | * Schedule callback invocation for all srcu_data structures associated | |
c7e88067 PM |
484 | * with the specified srcu_node structure that have callbacks for the |
485 | * just-completed grace period, the one corresponding to idx. If possible, | |
486 | * schedule this invocation on the corresponding CPUs. | |
da915ad5 | 487 | */ |
aacb5d91 | 488 | static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
1e9a038b | 489 | unsigned long mask, unsigned long delay) |
da915ad5 PM |
490 | { |
491 | int cpu; | |
492 | ||
c7e88067 PM |
493 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
494 | if (!(mask & (1 << (cpu - snp->grplo)))) | |
495 | continue; | |
aacb5d91 | 496 | srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
c7e88067 | 497 | } |
da915ad5 PM |
498 | } |
499 | ||
500 | /* | |
501 | * Note the end of an SRCU grace period. Initiates callback invocation | |
502 | * and starts a new grace period if needed. | |
503 | * | |
504 | * The ->srcu_cb_mutex acquisition does not protect any data, but | |
505 | * instead prevents more than one grace period from starting while we | |
506 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] | |
507 | * array to have a finite number of elements. | |
508 | */ | |
aacb5d91 | 509 | static void srcu_gp_end(struct srcu_struct *ssp) |
da915ad5 | 510 | { |
1e9a038b | 511 | unsigned long cbdelay; |
da915ad5 | 512 | bool cbs; |
8ddbd883 | 513 | bool last_lvl; |
c350c008 PM |
514 | int cpu; |
515 | unsigned long flags; | |
da915ad5 PM |
516 | unsigned long gpseq; |
517 | int idx; | |
c7e88067 | 518 | unsigned long mask; |
c350c008 | 519 | struct srcu_data *sdp; |
da915ad5 PM |
520 | struct srcu_node *snp; |
521 | ||
522 | /* Prevent more than one additional grace period. */ | |
aacb5d91 | 523 | mutex_lock(&ssp->srcu_cb_mutex); |
da915ad5 PM |
524 | |
525 | /* End the current grace period. */ | |
aacb5d91 PM |
526 | spin_lock_irq_rcu_node(ssp); |
527 | idx = rcu_seq_state(ssp->srcu_gp_seq); | |
da915ad5 | 528 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
aacb5d91 | 529 | cbdelay = srcu_get_delay(ssp); |
844a378d | 530 | WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); |
aacb5d91 PM |
531 | rcu_seq_end(&ssp->srcu_gp_seq); |
532 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); | |
533 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) | |
8c9e0cb3 | 534 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); |
aacb5d91 PM |
535 | spin_unlock_irq_rcu_node(ssp); |
536 | mutex_unlock(&ssp->srcu_gp_mutex); | |
da915ad5 PM |
537 | /* A new grace period can start at this point. But only one. */ |
538 | ||
539 | /* Initiate callback invocation as needed. */ | |
540 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | |
aacb5d91 | 541 | srcu_for_each_node_breadth_first(ssp, snp) { |
d6331980 | 542 | spin_lock_irq_rcu_node(snp); |
da915ad5 | 543 | cbs = false; |
aacb5d91 | 544 | last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
8ddbd883 | 545 | if (last_lvl) |
da915ad5 PM |
546 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
547 | snp->srcu_have_cbs[idx] = gpseq; | |
548 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); | |
1e9a038b | 549 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
7ff8b450 | 550 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); |
c7e88067 PM |
551 | mask = snp->srcu_data_have_cbs[idx]; |
552 | snp->srcu_data_have_cbs[idx] = 0; | |
d6331980 | 553 | spin_unlock_irq_rcu_node(snp); |
a3883df3 | 554 | if (cbs) |
aacb5d91 | 555 | srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
c350c008 PM |
556 | |
557 | /* Occasionally prevent srcu_data counter wrap. */ | |
8ddbd883 | 558 | if (!(gpseq & counter_wrap_check) && last_lvl) |
c350c008 | 559 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
aacb5d91 | 560 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 561 | spin_lock_irqsave_rcu_node(sdp, flags); |
c350c008 PM |
562 | if (ULONG_CMP_GE(gpseq, |
563 | sdp->srcu_gp_seq_needed + 100)) | |
564 | sdp->srcu_gp_seq_needed = gpseq; | |
a35d13ec II |
565 | if (ULONG_CMP_GE(gpseq, |
566 | sdp->srcu_gp_seq_needed_exp + 100)) | |
567 | sdp->srcu_gp_seq_needed_exp = gpseq; | |
d6331980 | 568 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
c350c008 | 569 | } |
da915ad5 PM |
570 | } |
571 | ||
572 | /* Callback initiation done, allow grace periods after next. */ | |
aacb5d91 | 573 | mutex_unlock(&ssp->srcu_cb_mutex); |
da915ad5 PM |
574 | |
575 | /* Start a new grace period if needed. */ | |
aacb5d91 PM |
576 | spin_lock_irq_rcu_node(ssp); |
577 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); | |
da915ad5 | 578 | if (!rcu_seq_state(gpseq) && |
aacb5d91 PM |
579 | ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
580 | srcu_gp_start(ssp); | |
581 | spin_unlock_irq_rcu_node(ssp); | |
582 | srcu_reschedule(ssp, 0); | |
da915ad5 | 583 | } else { |
aacb5d91 | 584 | spin_unlock_irq_rcu_node(ssp); |
da915ad5 PM |
585 | } |
586 | } | |
587 | ||
1e9a038b PM |
588 | /* |
589 | * Funnel-locking scheme to scalably mediate many concurrent expedited | |
590 | * grace-period requests. This function is invoked for the first known | |
591 | * expedited request for a grace period that has already been requested, | |
592 | * but without expediting. To start a completely new grace period, | |
593 | * whether expedited or not, use srcu_funnel_gp_start() instead. | |
594 | */ | |
aacb5d91 | 595 | static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
1e9a038b PM |
596 | unsigned long s) |
597 | { | |
598 | unsigned long flags; | |
599 | ||
600 | for (; snp != NULL; snp = snp->srcu_parent) { | |
aacb5d91 | 601 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
1e9a038b PM |
602 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
603 | return; | |
d6331980 | 604 | spin_lock_irqsave_rcu_node(snp, flags); |
1e9a038b | 605 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
d6331980 | 606 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b PM |
607 | return; |
608 | } | |
609 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | |
d6331980 | 610 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b | 611 | } |
aacb5d91 PM |
612 | spin_lock_irqsave_rcu_node(ssp, flags); |
613 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) | |
8c9e0cb3 | 614 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
aacb5d91 | 615 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
1e9a038b PM |
616 | } |
617 | ||
da915ad5 PM |
618 | /* |
619 | * Funnel-locking scheme to scalably mediate many concurrent grace-period | |
620 | * requests. The winner has to do the work of actually starting grace | |
621 | * period s. Losers must either ensure that their desired grace-period | |
622 | * number is recorded on at least their leaf srcu_node structure, or they | |
623 | * must take steps to invoke their own callbacks. | |
17294ce6 PM |
624 | * |
625 | * Note that this function also does the work of srcu_funnel_exp_start(), | |
626 | * in some cases by directly invoking it. | |
da915ad5 | 627 | */ |
aacb5d91 | 628 | static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
1e9a038b | 629 | unsigned long s, bool do_norm) |
da915ad5 PM |
630 | { |
631 | unsigned long flags; | |
632 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); | |
633 | struct srcu_node *snp = sdp->mynode; | |
634 | unsigned long snp_seq; | |
635 | ||
636 | /* Each pass through the loop does one level of the srcu_node tree. */ | |
637 | for (; snp != NULL; snp = snp->srcu_parent) { | |
aacb5d91 | 638 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
da915ad5 | 639 | return; /* GP already done and CBs recorded. */ |
d6331980 | 640 | spin_lock_irqsave_rcu_node(snp, flags); |
da915ad5 PM |
641 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
642 | snp_seq = snp->srcu_have_cbs[idx]; | |
c7e88067 PM |
643 | if (snp == sdp->mynode && snp_seq == s) |
644 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
d6331980 | 645 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 | 646 | if (snp == sdp->mynode && snp_seq != s) { |
1e9a038b PM |
647 | srcu_schedule_cbs_sdp(sdp, do_norm |
648 | ? SRCU_INTERVAL | |
649 | : 0); | |
650 | return; | |
da915ad5 | 651 | } |
1e9a038b | 652 | if (!do_norm) |
aacb5d91 | 653 | srcu_funnel_exp_start(ssp, snp, s); |
da915ad5 PM |
654 | return; |
655 | } | |
656 | snp->srcu_have_cbs[idx] = s; | |
c7e88067 PM |
657 | if (snp == sdp->mynode) |
658 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
1e9a038b | 659 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
7ff8b450 | 660 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
d6331980 | 661 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 PM |
662 | } |
663 | ||
664 | /* Top of tree, must ensure the grace period will be started. */ | |
aacb5d91 PM |
665 | spin_lock_irqsave_rcu_node(ssp, flags); |
666 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { | |
da915ad5 PM |
667 | /* |
668 | * Record need for grace period s. Pair with load | |
669 | * acquire setting up for initialization. | |
670 | */ | |
aacb5d91 | 671 | smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
da915ad5 | 672 | } |
aacb5d91 | 673 | if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
8c9e0cb3 | 674 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
da915ad5 PM |
675 | |
676 | /* If grace period not already done and none in progress, start it. */ | |
aacb5d91 PM |
677 | if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
678 | rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { | |
679 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); | |
680 | srcu_gp_start(ssp); | |
e0fcba9a | 681 | if (likely(srcu_init_done)) |
aacb5d91 PM |
682 | queue_delayed_work(rcu_gp_wq, &ssp->work, |
683 | srcu_get_delay(ssp)); | |
684 | else if (list_empty(&ssp->work.work.entry)) | |
685 | list_add(&ssp->work.work.entry, &srcu_boot_list); | |
da915ad5 | 686 | } |
aacb5d91 | 687 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
da915ad5 PM |
688 | } |
689 | ||
dad81a20 PM |
690 | /* |
691 | * Wait until all readers counted by array index idx complete, but | |
692 | * loop an additional time if there is an expedited grace period pending. | |
da915ad5 | 693 | * The caller must ensure that ->srcu_idx is not changed while checking. |
dad81a20 | 694 | */ |
aacb5d91 | 695 | static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
dad81a20 PM |
696 | { |
697 | for (;;) { | |
aacb5d91 | 698 | if (srcu_readers_active_idx_check(ssp, idx)) |
dad81a20 | 699 | return true; |
aacb5d91 | 700 | if (--trycount + !srcu_get_delay(ssp) <= 0) |
dad81a20 PM |
701 | return false; |
702 | udelay(SRCU_RETRY_CHECK_DELAY); | |
703 | } | |
704 | } | |
705 | ||
706 | /* | |
da915ad5 PM |
707 | * Increment the ->srcu_idx counter so that future SRCU readers will |
708 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows | |
dad81a20 PM |
709 | * us to wait for pre-existing readers in a starvation-free manner. |
710 | */ | |
aacb5d91 | 711 | static void srcu_flip(struct srcu_struct *ssp) |
dad81a20 | 712 | { |
881ec9d2 PM |
713 | /* |
714 | * Ensure that if this updater saw a given reader's increment | |
715 | * from __srcu_read_lock(), that reader was using an old value | |
716 | * of ->srcu_idx. Also ensure that if a given reader sees the | |
717 | * new value of ->srcu_idx, this updater's earlier scans cannot | |
718 | * have seen that reader's increments (which is OK, because this | |
719 | * grace period need not wait on that reader). | |
720 | */ | |
721 | smp_mb(); /* E */ /* Pairs with B and C. */ | |
722 | ||
aacb5d91 | 723 | WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
dad81a20 PM |
724 | |
725 | /* | |
726 | * Ensure that if the updater misses an __srcu_read_unlock() | |
727 | * increment, that task's next __srcu_read_lock() will see the | |
728 | * above counter update. Note that both this memory barrier | |
729 | * and the one in srcu_readers_active_idx_check() provide the | |
730 | * guarantee for __srcu_read_lock(). | |
731 | */ | |
732 | smp_mb(); /* D */ /* Pairs with C. */ | |
733 | } | |
734 | ||
2da4b2a7 PM |
735 | /* |
736 | * If SRCU is likely idle, return true, otherwise return false. | |
737 | * | |
738 | * Note that it is OK for several current from-idle requests for a new | |
739 | * grace period from idle to specify expediting because they will all end | |
740 | * up requesting the same grace period anyhow. So no loss. | |
741 | * | |
742 | * Note also that if any CPU (including the current one) is still invoking | |
743 | * callbacks, this function will nevertheless say "idle". This is not | |
744 | * ideal, but the overhead of checking all CPUs' callback lists is even | |
745 | * less ideal, especially on large systems. Furthermore, the wakeup | |
746 | * can happen before the callback is fully removed, so we have no choice | |
747 | * but to accept this type of error. | |
748 | * | |
749 | * This function is also subject to counter-wrap errors, but let's face | |
750 | * it, if this function was preempted for enough time for the counters | |
751 | * to wrap, it really doesn't matter whether or not we expedite the grace | |
752 | * period. The extra overhead of a needlessly expedited grace period is | |
7fef6cff | 753 | * negligible when amortized over that time period, and the extra latency |
2da4b2a7 PM |
754 | * of a needlessly non-expedited grace period is similarly negligible. |
755 | */ | |
aacb5d91 | 756 | static bool srcu_might_be_idle(struct srcu_struct *ssp) |
2da4b2a7 | 757 | { |
22607d66 | 758 | unsigned long curseq; |
2da4b2a7 PM |
759 | unsigned long flags; |
760 | struct srcu_data *sdp; | |
22607d66 | 761 | unsigned long t; |
844a378d | 762 | unsigned long tlast; |
2da4b2a7 | 763 | |
bde50d8f | 764 | check_init_srcu_struct(ssp); |
2da4b2a7 | 765 | /* If the local srcu_data structure has callbacks, not idle. */ |
bde50d8f SAS |
766 | sdp = raw_cpu_ptr(ssp->sda); |
767 | spin_lock_irqsave_rcu_node(sdp, flags); | |
2da4b2a7 | 768 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
bde50d8f | 769 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
2da4b2a7 PM |
770 | return false; /* Callbacks already present, so not idle. */ |
771 | } | |
bde50d8f | 772 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
2da4b2a7 PM |
773 | |
774 | /* | |
a616aec9 | 775 | * No local callbacks, so probabilistically probe global state. |
2da4b2a7 | 776 | * Exact information would require acquiring locks, which would |
a616aec9 | 777 | * kill scalability, hence the probabilistic nature of the probe. |
2da4b2a7 | 778 | */ |
22607d66 PM |
779 | |
780 | /* First, see if enough time has passed since the last GP. */ | |
781 | t = ktime_get_mono_fast_ns(); | |
844a378d | 782 | tlast = READ_ONCE(ssp->srcu_last_gp_end); |
22607d66 | 783 | if (exp_holdoff == 0 || |
844a378d | 784 | time_in_range_open(t, tlast, tlast + exp_holdoff)) |
22607d66 PM |
785 | return false; /* Too soon after last GP. */ |
786 | ||
787 | /* Next, check for probable idleness. */ | |
aacb5d91 | 788 | curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
2da4b2a7 | 789 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
aacb5d91 | 790 | if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
2da4b2a7 PM |
791 | return false; /* Grace period in progress, so not idle. */ |
792 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | |
aacb5d91 | 793 | if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
2da4b2a7 PM |
794 | return false; /* GP # changed, so not idle. */ |
795 | return true; /* With reasonable probability, idle! */ | |
796 | } | |
797 | ||
a602538e PM |
798 | /* |
799 | * SRCU callback function to leak a callback. | |
800 | */ | |
801 | static void srcu_leak_callback(struct rcu_head *rhp) | |
802 | { | |
803 | } | |
804 | ||
29d2bb94 PM |
805 | /* |
806 | * Start an SRCU grace period, and also queue the callback if non-NULL. | |
807 | */ | |
5358c9fa PM |
808 | static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, |
809 | struct rcu_head *rhp, bool do_norm) | |
29d2bb94 PM |
810 | { |
811 | unsigned long flags; | |
812 | int idx; | |
813 | bool needexp = false; | |
814 | bool needgp = false; | |
815 | unsigned long s; | |
816 | struct srcu_data *sdp; | |
817 | ||
5358c9fa | 818 | check_init_srcu_struct(ssp); |
29d2bb94 PM |
819 | idx = srcu_read_lock(ssp); |
820 | sdp = raw_cpu_ptr(ssp->sda); | |
821 | spin_lock_irqsave_rcu_node(sdp, flags); | |
5358c9fa PM |
822 | if (rhp) |
823 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); | |
29d2bb94 PM |
824 | rcu_segcblist_advance(&sdp->srcu_cblist, |
825 | rcu_seq_current(&ssp->srcu_gp_seq)); | |
826 | s = rcu_seq_snap(&ssp->srcu_gp_seq); | |
827 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); | |
828 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { | |
829 | sdp->srcu_gp_seq_needed = s; | |
830 | needgp = true; | |
831 | } | |
832 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { | |
833 | sdp->srcu_gp_seq_needed_exp = s; | |
834 | needexp = true; | |
835 | } | |
836 | spin_unlock_irqrestore_rcu_node(sdp, flags); | |
837 | if (needgp) | |
838 | srcu_funnel_gp_start(ssp, sdp, s, do_norm); | |
839 | else if (needexp) | |
840 | srcu_funnel_exp_start(ssp, sdp->mynode, s); | |
841 | srcu_read_unlock(ssp, idx); | |
5358c9fa | 842 | return s; |
29d2bb94 PM |
843 | } |
844 | ||
dad81a20 | 845 | /* |
da915ad5 PM |
846 | * Enqueue an SRCU callback on the srcu_data structure associated with |
847 | * the current CPU and the specified srcu_struct structure, initiating | |
848 | * grace-period processing if it is not already running. | |
dad81a20 PM |
849 | * |
850 | * Note that all CPUs must agree that the grace period extended beyond | |
851 | * all pre-existing SRCU read-side critical section. On systems with | |
852 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
853 | * is guaranteed to have executed a full memory barrier since the end of | |
854 | * its last corresponding SRCU read-side critical section whose beginning | |
5ef98a63 | 855 | * preceded the call to call_srcu(). It also means that each CPU executing |
dad81a20 | 856 | * an SRCU read-side critical section that continues beyond the start of |
5ef98a63 | 857 | * "func()" must have executed a memory barrier after the call_srcu() |
dad81a20 PM |
858 | * but before the beginning of that SRCU read-side critical section. |
859 | * Note that these guarantees include CPUs that are offline, idle, or | |
860 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
861 | * | |
5ef98a63 | 862 | * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the |
dad81a20 PM |
863 | * resulting SRCU callback function "func()", then both CPU A and CPU |
864 | * B are guaranteed to execute a full memory barrier during the time | |
5ef98a63 | 865 | * interval between the call to call_srcu() and the invocation of "func()". |
dad81a20 PM |
866 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
867 | * again only if the system has more than one CPU). | |
868 | * | |
869 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
870 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
871 | * srcu_struct structure. | |
872 | */ | |
11b00045 JB |
873 | static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
874 | rcu_callback_t func, bool do_norm) | |
dad81a20 | 875 | { |
a602538e PM |
876 | if (debug_rcu_head_queue(rhp)) { |
877 | /* Probable double call_srcu(), so leak the callback. */ | |
878 | WRITE_ONCE(rhp->func, srcu_leak_callback); | |
879 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); | |
880 | return; | |
881 | } | |
da915ad5 | 882 | rhp->func = func; |
5358c9fa | 883 | (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); |
1e9a038b PM |
884 | } |
885 | ||
5a0465e1 PM |
886 | /** |
887 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | |
aacb5d91 | 888 | * @ssp: srcu_struct in queue the callback |
27fdb35f | 889 | * @rhp: structure to be used for queueing the SRCU callback. |
5a0465e1 PM |
890 | * @func: function to be invoked after the SRCU grace period |
891 | * | |
892 | * The callback function will be invoked some time after a full SRCU | |
893 | * grace period elapses, in other words after all pre-existing SRCU | |
894 | * read-side critical sections have completed. However, the callback | |
895 | * function might well execute concurrently with other SRCU read-side | |
896 | * critical sections that started after call_srcu() was invoked. SRCU | |
897 | * read-side critical sections are delimited by srcu_read_lock() and | |
898 | * srcu_read_unlock(), and may be nested. | |
899 | * | |
900 | * The callback will be invoked from process context, but must nevertheless | |
901 | * be fast and must not block. | |
902 | */ | |
aacb5d91 | 903 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
1e9a038b PM |
904 | rcu_callback_t func) |
905 | { | |
aacb5d91 | 906 | __call_srcu(ssp, rhp, func, true); |
dad81a20 PM |
907 | } |
908 | EXPORT_SYMBOL_GPL(call_srcu); | |
909 | ||
dad81a20 PM |
910 | /* |
911 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
912 | */ | |
aacb5d91 | 913 | static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
dad81a20 PM |
914 | { |
915 | struct rcu_synchronize rcu; | |
dad81a20 | 916 | |
f505d434 | 917 | RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || |
dad81a20 PM |
918 | lock_is_held(&rcu_bh_lock_map) || |
919 | lock_is_held(&rcu_lock_map) || | |
920 | lock_is_held(&rcu_sched_lock_map), | |
921 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); | |
922 | ||
923 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | |
924 | return; | |
925 | might_sleep(); | |
aacb5d91 | 926 | check_init_srcu_struct(ssp); |
dad81a20 | 927 | init_completion(&rcu.completion); |
da915ad5 | 928 | init_rcu_head_on_stack(&rcu.head); |
aacb5d91 | 929 | __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
dad81a20 | 930 | wait_for_completion(&rcu.completion); |
da915ad5 | 931 | destroy_rcu_head_on_stack(&rcu.head); |
35732cf9 PM |
932 | |
933 | /* | |
934 | * Make sure that later code is ordered after the SRCU grace | |
d6331980 | 935 | * period. This pairs with the spin_lock_irq_rcu_node() |
35732cf9 PM |
936 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
937 | * because the current CPU might have been totally uninvolved with | |
938 | * (and thus unordered against) that grace period. | |
939 | */ | |
940 | smp_mb(); | |
dad81a20 PM |
941 | } |
942 | ||
943 | /** | |
944 | * synchronize_srcu_expedited - Brute-force SRCU grace period | |
aacb5d91 | 945 | * @ssp: srcu_struct with which to synchronize. |
dad81a20 PM |
946 | * |
947 | * Wait for an SRCU grace period to elapse, but be more aggressive about | |
948 | * spinning rather than blocking when waiting. | |
949 | * | |
950 | * Note that synchronize_srcu_expedited() has the same deadlock and | |
951 | * memory-ordering properties as does synchronize_srcu(). | |
952 | */ | |
aacb5d91 | 953 | void synchronize_srcu_expedited(struct srcu_struct *ssp) |
dad81a20 | 954 | { |
aacb5d91 | 955 | __synchronize_srcu(ssp, rcu_gp_is_normal()); |
dad81a20 PM |
956 | } |
957 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
958 | ||
959 | /** | |
960 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
aacb5d91 | 961 | * @ssp: srcu_struct with which to synchronize. |
dad81a20 PM |
962 | * |
963 | * Wait for the count to drain to zero of both indexes. To avoid the | |
964 | * possible starvation of synchronize_srcu(), it waits for the count of | |
da915ad5 PM |
965 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
966 | * and then flip the srcu_idx and wait for the count of the other index. | |
dad81a20 PM |
967 | * |
968 | * Can block; must be called from process context. | |
969 | * | |
970 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
971 | * SRCU read-side critical section; doing so will result in deadlock. | |
972 | * However, it is perfectly legal to call synchronize_srcu() on one | |
973 | * srcu_struct from some other srcu_struct's read-side critical section, | |
974 | * as long as the resulting graph of srcu_structs is acyclic. | |
975 | * | |
976 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
977 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
978 | * each CPU is guaranteed to have executed a full memory barrier since | |
6eb95cc4 | 979 | * the end of its last corresponding SRCU read-side critical section |
dad81a20 PM |
980 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
981 | * each CPU having an SRCU read-side critical section that extends beyond | |
982 | * the return from synchronize_srcu() is guaranteed to have executed a | |
983 | * full memory barrier after the beginning of synchronize_srcu() and before | |
984 | * the beginning of that SRCU read-side critical section. Note that these | |
985 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
986 | * as well as CPUs that are executing in the kernel. | |
987 | * | |
988 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
989 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
990 | * to have executed a full memory barrier during the execution of | |
991 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
992 | * are the same CPU, but again only if the system has more than one CPU. | |
993 | * | |
994 | * Of course, these memory-ordering guarantees apply only when | |
995 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
996 | * passed the same srcu_struct structure. | |
2da4b2a7 | 997 | * |
3d3a0d1b PM |
998 | * Implementation of these memory-ordering guarantees is similar to |
999 | * that of synchronize_rcu(). | |
1000 | * | |
2da4b2a7 PM |
1001 | * If SRCU is likely idle, expedite the first request. This semantic |
1002 | * was provided by Classic SRCU, and is relied upon by its users, so TREE | |
1003 | * SRCU must also provide it. Note that detecting idleness is heuristic | |
1004 | * and subject to both false positives and negatives. | |
dad81a20 | 1005 | */ |
aacb5d91 | 1006 | void synchronize_srcu(struct srcu_struct *ssp) |
dad81a20 | 1007 | { |
aacb5d91 PM |
1008 | if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
1009 | synchronize_srcu_expedited(ssp); | |
dad81a20 | 1010 | else |
aacb5d91 | 1011 | __synchronize_srcu(ssp, true); |
dad81a20 PM |
1012 | } |
1013 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
1014 | ||
5358c9fa PM |
1015 | /** |
1016 | * get_state_synchronize_srcu - Provide an end-of-grace-period cookie | |
1017 | * @ssp: srcu_struct to provide cookie for. | |
1018 | * | |
1019 | * This function returns a cookie that can be passed to | |
1020 | * poll_state_synchronize_srcu(), which will return true if a full grace | |
1021 | * period has elapsed in the meantime. It is the caller's responsibility | |
1022 | * to make sure that grace period happens, for example, by invoking | |
1023 | * call_srcu() after return from get_state_synchronize_srcu(). | |
1024 | */ | |
1025 | unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) | |
1026 | { | |
1027 | // Any prior manipulation of SRCU-protected data must happen | |
1028 | // before the load from ->srcu_gp_seq. | |
1029 | smp_mb(); | |
1030 | return rcu_seq_snap(&ssp->srcu_gp_seq); | |
1031 | } | |
1032 | EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); | |
1033 | ||
1034 | /** | |
1035 | * start_poll_synchronize_srcu - Provide cookie and start grace period | |
1036 | * @ssp: srcu_struct to provide cookie for. | |
1037 | * | |
1038 | * This function returns a cookie that can be passed to | |
1039 | * poll_state_synchronize_srcu(), which will return true if a full grace | |
1040 | * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), | |
1041 | * this function also ensures that any needed SRCU grace period will be | |
1042 | * started. This convenience does come at a cost in terms of CPU overhead. | |
1043 | */ | |
1044 | unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) | |
1045 | { | |
1046 | return srcu_gp_start_if_needed(ssp, NULL, true); | |
1047 | } | |
1048 | EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); | |
1049 | ||
1050 | /** | |
1051 | * poll_state_synchronize_srcu - Has cookie's grace period ended? | |
1052 | * @ssp: srcu_struct to provide cookie for. | |
1053 | * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). | |
1054 | * | |
1055 | * This function takes the cookie that was returned from either | |
1056 | * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and | |
1057 | * returns @true if an SRCU grace period elapsed since the time that the | |
1058 | * cookie was created. | |
4e7ccfae PM |
1059 | * |
1060 | * Because cookies are finite in size, wrapping/overflow is possible. | |
1061 | * This is more pronounced on 32-bit systems where cookies are 32 bits, | |
1062 | * where in theory wrapping could happen in about 14 hours assuming | |
1063 | * 25-microsecond expedited SRCU grace periods. However, a more likely | |
1064 | * overflow lower bound is on the order of 24 days in the case of | |
1065 | * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit | |
1066 | * system requires geologic timespans, as in more than seven million years | |
1067 | * even for expedited SRCU grace periods. | |
1068 | * | |
1069 | * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems | |
1070 | * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses | |
1071 | * a 16-bit cookie, which rcutorture routinely wraps in a matter of a | |
1072 | * few minutes. If this proves to be a problem, this counter will be | |
1073 | * expanded to the same size as for Tree SRCU. | |
5358c9fa PM |
1074 | */ |
1075 | bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) | |
1076 | { | |
1077 | if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) | |
1078 | return false; | |
1079 | // Ensure that the end of the SRCU grace period happens before | |
1080 | // any subsequent code that the caller might execute. | |
1081 | smp_mb(); // ^^^ | |
1082 | return true; | |
1083 | } | |
1084 | EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); | |
1085 | ||
da915ad5 PM |
1086 | /* |
1087 | * Callback function for srcu_barrier() use. | |
1088 | */ | |
1089 | static void srcu_barrier_cb(struct rcu_head *rhp) | |
1090 | { | |
1091 | struct srcu_data *sdp; | |
aacb5d91 | 1092 | struct srcu_struct *ssp; |
da915ad5 PM |
1093 | |
1094 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | |
aacb5d91 PM |
1095 | ssp = sdp->ssp; |
1096 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) | |
1097 | complete(&ssp->srcu_barrier_completion); | |
da915ad5 PM |
1098 | } |
1099 | ||
dad81a20 PM |
1100 | /** |
1101 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
aacb5d91 | 1102 | * @ssp: srcu_struct on which to wait for in-flight callbacks. |
dad81a20 | 1103 | */ |
aacb5d91 | 1104 | void srcu_barrier(struct srcu_struct *ssp) |
dad81a20 | 1105 | { |
da915ad5 PM |
1106 | int cpu; |
1107 | struct srcu_data *sdp; | |
aacb5d91 | 1108 | unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
da915ad5 | 1109 | |
aacb5d91 PM |
1110 | check_init_srcu_struct(ssp); |
1111 | mutex_lock(&ssp->srcu_barrier_mutex); | |
1112 | if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { | |
da915ad5 | 1113 | smp_mb(); /* Force ordering following return. */ |
aacb5d91 | 1114 | mutex_unlock(&ssp->srcu_barrier_mutex); |
da915ad5 PM |
1115 | return; /* Someone else did our work for us. */ |
1116 | } | |
aacb5d91 PM |
1117 | rcu_seq_start(&ssp->srcu_barrier_seq); |
1118 | init_completion(&ssp->srcu_barrier_completion); | |
da915ad5 PM |
1119 | |
1120 | /* Initial count prevents reaching zero until all CBs are posted. */ | |
aacb5d91 | 1121 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
da915ad5 PM |
1122 | |
1123 | /* | |
1124 | * Each pass through this loop enqueues a callback, but only | |
1125 | * on CPUs already having callbacks enqueued. Note that if | |
1126 | * a CPU already has callbacks enqueue, it must have already | |
1127 | * registered the need for a future grace period, so all we | |
1128 | * need do is enqueue a callback that will use the same | |
1129 | * grace period as the last callback already in the queue. | |
1130 | */ | |
1131 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 1132 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 1133 | spin_lock_irq_rcu_node(sdp); |
aacb5d91 | 1134 | atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
da915ad5 | 1135 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
a602538e | 1136 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
da915ad5 | 1137 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
77a40f97 | 1138 | &sdp->srcu_barrier_head)) { |
a602538e | 1139 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
aacb5d91 | 1140 | atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
a602538e | 1141 | } |
d6331980 | 1142 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1143 | } |
1144 | ||
1145 | /* Remove the initial count, at which point reaching zero can happen. */ | |
aacb5d91 PM |
1146 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
1147 | complete(&ssp->srcu_barrier_completion); | |
1148 | wait_for_completion(&ssp->srcu_barrier_completion); | |
da915ad5 | 1149 | |
aacb5d91 PM |
1150 | rcu_seq_end(&ssp->srcu_barrier_seq); |
1151 | mutex_unlock(&ssp->srcu_barrier_mutex); | |
dad81a20 PM |
1152 | } |
1153 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
1154 | ||
1155 | /** | |
1156 | * srcu_batches_completed - return batches completed. | |
aacb5d91 | 1157 | * @ssp: srcu_struct on which to report batch completion. |
dad81a20 PM |
1158 | * |
1159 | * Report the number of batches, correlated with, but not necessarily | |
1160 | * precisely the same as, the number of grace periods that have elapsed. | |
1161 | */ | |
aacb5d91 | 1162 | unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
dad81a20 | 1163 | { |
39f91504 | 1164 | return READ_ONCE(ssp->srcu_idx); |
dad81a20 PM |
1165 | } |
1166 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | |
1167 | ||
1168 | /* | |
da915ad5 PM |
1169 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
1170 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | |
1171 | * completed in that state. | |
dad81a20 | 1172 | */ |
aacb5d91 | 1173 | static void srcu_advance_state(struct srcu_struct *ssp) |
dad81a20 PM |
1174 | { |
1175 | int idx; | |
1176 | ||
aacb5d91 | 1177 | mutex_lock(&ssp->srcu_gp_mutex); |
da915ad5 | 1178 | |
dad81a20 PM |
1179 | /* |
1180 | * Because readers might be delayed for an extended period after | |
da915ad5 | 1181 | * fetching ->srcu_idx for their index, at any point in time there |
dad81a20 PM |
1182 | * might well be readers using both idx=0 and idx=1. We therefore |
1183 | * need to wait for readers to clear from both index values before | |
1184 | * invoking a callback. | |
1185 | * | |
1186 | * The load-acquire ensures that we see the accesses performed | |
1187 | * by the prior grace period. | |
1188 | */ | |
aacb5d91 | 1189 | idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
dad81a20 | 1190 | if (idx == SRCU_STATE_IDLE) { |
aacb5d91 PM |
1191 | spin_lock_irq_rcu_node(ssp); |
1192 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { | |
1193 | WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); | |
1194 | spin_unlock_irq_rcu_node(ssp); | |
1195 | mutex_unlock(&ssp->srcu_gp_mutex); | |
dad81a20 PM |
1196 | return; |
1197 | } | |
aacb5d91 | 1198 | idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
dad81a20 | 1199 | if (idx == SRCU_STATE_IDLE) |
aacb5d91 PM |
1200 | srcu_gp_start(ssp); |
1201 | spin_unlock_irq_rcu_node(ssp); | |
da915ad5 | 1202 | if (idx != SRCU_STATE_IDLE) { |
aacb5d91 | 1203 | mutex_unlock(&ssp->srcu_gp_mutex); |
dad81a20 | 1204 | return; /* Someone else started the grace period. */ |
da915ad5 | 1205 | } |
dad81a20 PM |
1206 | } |
1207 | ||
aacb5d91 PM |
1208 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
1209 | idx = 1 ^ (ssp->srcu_idx & 1); | |
1210 | if (!try_check_zero(ssp, idx, 1)) { | |
1211 | mutex_unlock(&ssp->srcu_gp_mutex); | |
dad81a20 | 1212 | return; /* readers present, retry later. */ |
da915ad5 | 1213 | } |
aacb5d91 | 1214 | srcu_flip(ssp); |
71042606 | 1215 | spin_lock_irq_rcu_node(ssp); |
aacb5d91 | 1216 | rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
71042606 | 1217 | spin_unlock_irq_rcu_node(ssp); |
dad81a20 PM |
1218 | } |
1219 | ||
aacb5d91 | 1220 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
dad81a20 PM |
1221 | |
1222 | /* | |
1223 | * SRCU read-side critical sections are normally short, | |
1224 | * so check at least twice in quick succession after a flip. | |
1225 | */ | |
aacb5d91 PM |
1226 | idx = 1 ^ (ssp->srcu_idx & 1); |
1227 | if (!try_check_zero(ssp, idx, 2)) { | |
1228 | mutex_unlock(&ssp->srcu_gp_mutex); | |
da915ad5 PM |
1229 | return; /* readers present, retry later. */ |
1230 | } | |
aacb5d91 | 1231 | srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
dad81a20 PM |
1232 | } |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * Invoke a limited number of SRCU callbacks that have passed through | |
1237 | * their grace period. If there are more to do, SRCU will reschedule | |
1238 | * the workqueue. Note that needed memory barriers have been executed | |
1239 | * in this task's context by srcu_readers_active_idx_check(). | |
1240 | */ | |
da915ad5 | 1241 | static void srcu_invoke_callbacks(struct work_struct *work) |
dad81a20 | 1242 | { |
ae5c2341 | 1243 | long len; |
da915ad5 | 1244 | bool more; |
dad81a20 PM |
1245 | struct rcu_cblist ready_cbs; |
1246 | struct rcu_head *rhp; | |
da915ad5 | 1247 | struct srcu_data *sdp; |
aacb5d91 | 1248 | struct srcu_struct *ssp; |
dad81a20 | 1249 | |
e81baf4c SAS |
1250 | sdp = container_of(work, struct srcu_data, work); |
1251 | ||
aacb5d91 | 1252 | ssp = sdp->ssp; |
dad81a20 | 1253 | rcu_cblist_init(&ready_cbs); |
d6331980 | 1254 | spin_lock_irq_rcu_node(sdp); |
da915ad5 | 1255 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 | 1256 | rcu_seq_current(&ssp->srcu_gp_seq)); |
da915ad5 PM |
1257 | if (sdp->srcu_cblist_invoking || |
1258 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | |
d6331980 | 1259 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1260 | return; /* Someone else on the job or nothing to do. */ |
1261 | } | |
1262 | ||
1263 | /* We are on the job! Extract and invoke ready callbacks. */ | |
1264 | sdp->srcu_cblist_invoking = true; | |
1265 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); | |
ae5c2341 | 1266 | len = ready_cbs.len; |
d6331980 | 1267 | spin_unlock_irq_rcu_node(sdp); |
dad81a20 PM |
1268 | rhp = rcu_cblist_dequeue(&ready_cbs); |
1269 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { | |
a602538e | 1270 | debug_rcu_head_unqueue(rhp); |
dad81a20 PM |
1271 | local_bh_disable(); |
1272 | rhp->func(rhp); | |
1273 | local_bh_enable(); | |
1274 | } | |
ae5c2341 | 1275 | WARN_ON_ONCE(ready_cbs.len); |
da915ad5 PM |
1276 | |
1277 | /* | |
1278 | * Update counts, accelerate new callbacks, and if needed, | |
1279 | * schedule another round of callback invocation. | |
1280 | */ | |
d6331980 | 1281 | spin_lock_irq_rcu_node(sdp); |
ae5c2341 | 1282 | rcu_segcblist_add_len(&sdp->srcu_cblist, -len); |
da915ad5 | 1283 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
aacb5d91 | 1284 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
da915ad5 PM |
1285 | sdp->srcu_cblist_invoking = false; |
1286 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | |
d6331980 | 1287 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1288 | if (more) |
1289 | srcu_schedule_cbs_sdp(sdp, 0); | |
dad81a20 PM |
1290 | } |
1291 | ||
1292 | /* | |
1293 | * Finished one round of SRCU grace period. Start another if there are | |
1294 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
1295 | */ | |
aacb5d91 | 1296 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
dad81a20 | 1297 | { |
da915ad5 | 1298 | bool pushgp = true; |
dad81a20 | 1299 | |
aacb5d91 PM |
1300 | spin_lock_irq_rcu_node(ssp); |
1301 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { | |
1302 | if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { | |
da915ad5 PM |
1303 | /* All requests fulfilled, time to go idle. */ |
1304 | pushgp = false; | |
1305 | } | |
aacb5d91 | 1306 | } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
da915ad5 | 1307 | /* Outstanding request and no GP. Start one. */ |
aacb5d91 | 1308 | srcu_gp_start(ssp); |
dad81a20 | 1309 | } |
aacb5d91 | 1310 | spin_unlock_irq_rcu_node(ssp); |
dad81a20 | 1311 | |
da915ad5 | 1312 | if (pushgp) |
aacb5d91 | 1313 | queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
dad81a20 PM |
1314 | } |
1315 | ||
1316 | /* | |
1317 | * This is the work-queue function that handles SRCU grace periods. | |
1318 | */ | |
0d8a1e83 | 1319 | static void process_srcu(struct work_struct *work) |
dad81a20 | 1320 | { |
aacb5d91 | 1321 | struct srcu_struct *ssp; |
dad81a20 | 1322 | |
aacb5d91 | 1323 | ssp = container_of(work, struct srcu_struct, work.work); |
dad81a20 | 1324 | |
aacb5d91 PM |
1325 | srcu_advance_state(ssp); |
1326 | srcu_reschedule(ssp, srcu_get_delay(ssp)); | |
dad81a20 | 1327 | } |
7f6733c3 PM |
1328 | |
1329 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
aacb5d91 | 1330 | struct srcu_struct *ssp, int *flags, |
aebc8264 | 1331 | unsigned long *gp_seq) |
7f6733c3 PM |
1332 | { |
1333 | if (test_type != SRCU_FLAVOR) | |
1334 | return; | |
1335 | *flags = 0; | |
aacb5d91 | 1336 | *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
7f6733c3 PM |
1337 | } |
1338 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | |
1f4f6da1 | 1339 | |
aacb5d91 | 1340 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
115a1a52 PM |
1341 | { |
1342 | int cpu; | |
1343 | int idx; | |
ac3748c6 | 1344 | unsigned long s0 = 0, s1 = 0; |
115a1a52 | 1345 | |
aacb5d91 | 1346 | idx = ssp->srcu_idx & 0x1; |
52e17ba1 | 1347 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
aacb5d91 | 1348 | tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
115a1a52 PM |
1349 | for_each_possible_cpu(cpu) { |
1350 | unsigned long l0, l1; | |
1351 | unsigned long u0, u1; | |
1352 | long c0, c1; | |
5ab07a8d | 1353 | struct srcu_data *sdp; |
115a1a52 | 1354 | |
aacb5d91 | 1355 | sdp = per_cpu_ptr(ssp->sda, cpu); |
b68c6146 PM |
1356 | u0 = data_race(sdp->srcu_unlock_count[!idx]); |
1357 | u1 = data_race(sdp->srcu_unlock_count[idx]); | |
115a1a52 PM |
1358 | |
1359 | /* | |
1360 | * Make sure that a lock is always counted if the corresponding | |
1361 | * unlock is counted. | |
1362 | */ | |
1363 | smp_rmb(); | |
1364 | ||
b68c6146 PM |
1365 | l0 = data_race(sdp->srcu_lock_count[!idx]); |
1366 | l1 = data_race(sdp->srcu_lock_count[idx]); | |
115a1a52 PM |
1367 | |
1368 | c0 = l0 - u0; | |
1369 | c1 = l1 - u1; | |
7e210a65 PM |
1370 | pr_cont(" %d(%ld,%ld %c)", |
1371 | cpu, c0, c1, | |
1372 | "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); | |
ac3748c6 PM |
1373 | s0 += c0; |
1374 | s1 += c1; | |
115a1a52 | 1375 | } |
ac3748c6 | 1376 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
115a1a52 PM |
1377 | } |
1378 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); | |
1379 | ||
1f4f6da1 PM |
1380 | static int __init srcu_bootup_announce(void) |
1381 | { | |
1382 | pr_info("Hierarchical SRCU implementation.\n"); | |
0c8e0e3c PM |
1383 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
1384 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); | |
1f4f6da1 PM |
1385 | return 0; |
1386 | } | |
1387 | early_initcall(srcu_bootup_announce); | |
e0fcba9a PM |
1388 | |
1389 | void __init srcu_init(void) | |
1390 | { | |
aacb5d91 | 1391 | struct srcu_struct *ssp; |
e0fcba9a | 1392 | |
8e9c01c7 FW |
1393 | /* |
1394 | * Once that is set, call_srcu() can follow the normal path and | |
1395 | * queue delayed work. This must follow RCU workqueues creation | |
1396 | * and timers initialization. | |
1397 | */ | |
e0fcba9a PM |
1398 | srcu_init_done = true; |
1399 | while (!list_empty(&srcu_boot_list)) { | |
aacb5d91 | 1400 | ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
4e6ea4ef | 1401 | work.work.entry); |
aacb5d91 PM |
1402 | list_del_init(&ssp->work.work.entry); |
1403 | queue_work(rcu_gp_wq, &ssp->work.work); | |
e0fcba9a PM |
1404 | } |
1405 | } | |
fe15b50c PM |
1406 | |
1407 | #ifdef CONFIG_MODULES | |
1408 | ||
1409 | /* Initialize any global-scope srcu_struct structures used by this module. */ | |
1410 | static int srcu_module_coming(struct module *mod) | |
1411 | { | |
1412 | int i; | |
1413 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; | |
1414 | int ret; | |
1415 | ||
1416 | for (i = 0; i < mod->num_srcu_structs; i++) { | |
1417 | ret = init_srcu_struct(*(sspp++)); | |
1418 | if (WARN_ON_ONCE(ret)) | |
1419 | return ret; | |
1420 | } | |
1421 | return 0; | |
1422 | } | |
1423 | ||
1424 | /* Clean up any global-scope srcu_struct structures used by this module. */ | |
1425 | static void srcu_module_going(struct module *mod) | |
1426 | { | |
1427 | int i; | |
1428 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; | |
1429 | ||
1430 | for (i = 0; i < mod->num_srcu_structs; i++) | |
1431 | cleanup_srcu_struct(*(sspp++)); | |
1432 | } | |
1433 | ||
1434 | /* Handle one module, either coming or going. */ | |
1435 | static int srcu_module_notify(struct notifier_block *self, | |
1436 | unsigned long val, void *data) | |
1437 | { | |
1438 | struct module *mod = data; | |
1439 | int ret = 0; | |
1440 | ||
1441 | switch (val) { | |
1442 | case MODULE_STATE_COMING: | |
1443 | ret = srcu_module_coming(mod); | |
1444 | break; | |
1445 | case MODULE_STATE_GOING: | |
1446 | srcu_module_going(mod); | |
1447 | break; | |
1448 | default: | |
1449 | break; | |
1450 | } | |
1451 | return ret; | |
1452 | } | |
1453 | ||
1454 | static struct notifier_block srcu_module_nb = { | |
1455 | .notifier_call = srcu_module_notify, | |
1456 | .priority = 0, | |
1457 | }; | |
1458 | ||
1459 | static __init int init_srcu_module_notifier(void) | |
1460 | { | |
1461 | int ret; | |
1462 | ||
1463 | ret = register_module_notifier(&srcu_module_nb); | |
1464 | if (ret) | |
1465 | pr_warn("Failed to register srcu module notifier\n"); | |
1466 | return ret; | |
1467 | } | |
1468 | late_initcall(init_srcu_module_notifier); | |
1469 | ||
1470 | #endif /* #ifdef CONFIG_MODULES */ |