2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
29 #include <linux/export.h>
30 #include <linux/mutex.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate_wait.h>
34 #include <linux/sched.h>
35 #include <linux/smp.h>
36 #include <linux/delay.h>
37 #include <linux/module.h>
38 #include <linux/srcu.h>
41 #include "rcu_segcblist.h"
43 /* Holdoff in nanoseconds for auto-expediting. */
44 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
45 static ulong exp_holdoff
= DEFAULT_SRCU_EXP_HOLDOFF
;
46 module_param(exp_holdoff
, ulong
, 0444);
48 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
49 static ulong counter_wrap_check
= (ULONG_MAX
>> 2);
50 module_param(counter_wrap_check
, ulong
, 0444);
52 static void srcu_invoke_callbacks(struct work_struct
*work
);
53 static void srcu_reschedule(struct srcu_struct
*sp
, unsigned long delay
);
56 * Initialize SRCU combining tree. Note that statically allocated
57 * srcu_struct structures might already have srcu_read_lock() and
58 * srcu_read_unlock() running against them. So if the is_static parameter
59 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
61 static void init_srcu_struct_nodes(struct srcu_struct
*sp
, bool is_static
)
66 int levelspread
[RCU_NUM_LVLS
];
67 struct srcu_data
*sdp
;
68 struct srcu_node
*snp
;
69 struct srcu_node
*snp_first
;
71 /* Work out the overall tree geometry. */
72 sp
->level
[0] = &sp
->node
[0];
73 for (i
= 1; i
< rcu_num_lvls
; i
++)
74 sp
->level
[i
] = sp
->level
[i
- 1] + num_rcu_lvl
[i
- 1];
75 rcu_init_levelspread(levelspread
, num_rcu_lvl
);
77 /* Each pass through this loop initializes one srcu_node structure. */
78 rcu_for_each_node_breadth_first(sp
, snp
) {
79 raw_spin_lock_init(&ACCESS_PRIVATE(snp
, lock
));
80 WARN_ON_ONCE(ARRAY_SIZE(snp
->srcu_have_cbs
) !=
81 ARRAY_SIZE(snp
->srcu_data_have_cbs
));
82 for (i
= 0; i
< ARRAY_SIZE(snp
->srcu_have_cbs
); i
++) {
83 snp
->srcu_have_cbs
[i
] = 0;
84 snp
->srcu_data_have_cbs
[i
] = 0;
86 snp
->srcu_gp_seq_needed_exp
= 0;
89 if (snp
== &sp
->node
[0]) {
90 /* Root node, special case. */
91 snp
->srcu_parent
= NULL
;
96 if (snp
== sp
->level
[level
+ 1])
98 snp
->srcu_parent
= sp
->level
[level
- 1] +
99 (snp
- sp
->level
[level
]) /
100 levelspread
[level
- 1];
104 * Initialize the per-CPU srcu_data array, which feeds into the
105 * leaves of the srcu_node tree.
107 WARN_ON_ONCE(ARRAY_SIZE(sdp
->srcu_lock_count
) !=
108 ARRAY_SIZE(sdp
->srcu_unlock_count
));
109 level
= rcu_num_lvls
- 1;
110 snp_first
= sp
->level
[level
];
111 for_each_possible_cpu(cpu
) {
112 sdp
= per_cpu_ptr(sp
->sda
, cpu
);
113 raw_spin_lock_init(&ACCESS_PRIVATE(sdp
, lock
));
114 rcu_segcblist_init(&sdp
->srcu_cblist
);
115 sdp
->srcu_cblist_invoking
= false;
116 sdp
->srcu_gp_seq_needed
= sp
->srcu_gp_seq
;
117 sdp
->srcu_gp_seq_needed_exp
= sp
->srcu_gp_seq
;
118 sdp
->mynode
= &snp_first
[cpu
/ levelspread
[level
]];
119 for (snp
= sdp
->mynode
; snp
!= NULL
; snp
= snp
->srcu_parent
) {
125 INIT_DELAYED_WORK(&sdp
->work
, srcu_invoke_callbacks
);
127 sdp
->grpmask
= 1 << (cpu
- sdp
->mynode
->grplo
);
131 /* Dynamically allocated, better be no srcu_read_locks()! */
132 for (i
= 0; i
< ARRAY_SIZE(sdp
->srcu_lock_count
); i
++) {
133 sdp
->srcu_lock_count
[i
] = 0;
134 sdp
->srcu_unlock_count
[i
] = 0;
140 * Initialize non-compile-time initialized fields, including the
141 * associated srcu_node and srcu_data structures. The is_static
142 * parameter is passed through to init_srcu_struct_nodes(), and
143 * also tells us that ->sda has already been wired up to srcu_data.
145 static int init_srcu_struct_fields(struct srcu_struct
*sp
, bool is_static
)
147 mutex_init(&sp
->srcu_cb_mutex
);
148 mutex_init(&sp
->srcu_gp_mutex
);
151 sp
->srcu_barrier_seq
= 0;
152 mutex_init(&sp
->srcu_barrier_mutex
);
153 atomic_set(&sp
->srcu_barrier_cpu_cnt
, 0);
154 INIT_DELAYED_WORK(&sp
->work
, process_srcu
);
156 sp
->sda
= alloc_percpu(struct srcu_data
);
157 init_srcu_struct_nodes(sp
, is_static
);
158 sp
->srcu_gp_seq_needed_exp
= 0;
159 sp
->srcu_last_gp_end
= ktime_get_mono_fast_ns();
160 smp_store_release(&sp
->srcu_gp_seq_needed
, 0); /* Init done. */
161 return sp
->sda
? 0 : -ENOMEM
;
164 #ifdef CONFIG_DEBUG_LOCK_ALLOC
166 int __init_srcu_struct(struct srcu_struct
*sp
, const char *name
,
167 struct lock_class_key
*key
)
169 /* Don't re-initialize a lock while it is held. */
170 debug_check_no_locks_freed((void *)sp
, sizeof(*sp
));
171 lockdep_init_map(&sp
->dep_map
, name
, key
, 0);
172 raw_spin_lock_init(&ACCESS_PRIVATE(sp
, lock
));
173 return init_srcu_struct_fields(sp
, false);
175 EXPORT_SYMBOL_GPL(__init_srcu_struct
);
177 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
180 * init_srcu_struct - initialize a sleep-RCU structure
181 * @sp: structure to initialize.
183 * Must invoke this on a given srcu_struct before passing that srcu_struct
184 * to any other function. Each srcu_struct represents a separate domain
185 * of SRCU protection.
187 int init_srcu_struct(struct srcu_struct
*sp
)
189 raw_spin_lock_init(&ACCESS_PRIVATE(sp
, lock
));
190 return init_srcu_struct_fields(sp
, false);
192 EXPORT_SYMBOL_GPL(init_srcu_struct
);
194 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
197 * First-use initialization of statically allocated srcu_struct
198 * structure. Wiring up the combining tree is more than can be
199 * done with compile-time initialization, so this check is added
200 * to each update-side SRCU primitive. Use sp->lock, which -is-
201 * compile-time initialized, to resolve races involving multiple
202 * CPUs trying to garner first-use privileges.
204 static void check_init_srcu_struct(struct srcu_struct
*sp
)
208 WARN_ON_ONCE(rcu_scheduler_active
== RCU_SCHEDULER_INIT
);
209 /* The smp_load_acquire() pairs with the smp_store_release(). */
210 if (!rcu_seq_state(smp_load_acquire(&sp
->srcu_gp_seq_needed
))) /*^^^*/
211 return; /* Already initialized. */
212 raw_spin_lock_irqsave_rcu_node(sp
, flags
);
213 if (!rcu_seq_state(sp
->srcu_gp_seq_needed
)) {
214 raw_spin_unlock_irqrestore_rcu_node(sp
, flags
);
217 init_srcu_struct_fields(sp
, true);
218 raw_spin_unlock_irqrestore_rcu_node(sp
, flags
);
222 * Returns approximate total of the readers' ->srcu_lock_count[] values
223 * for the rank of per-CPU counters specified by idx.
225 static unsigned long srcu_readers_lock_idx(struct srcu_struct
*sp
, int idx
)
228 unsigned long sum
= 0;
230 for_each_possible_cpu(cpu
) {
231 struct srcu_data
*cpuc
= per_cpu_ptr(sp
->sda
, cpu
);
233 sum
+= READ_ONCE(cpuc
->srcu_lock_count
[idx
]);
239 * Returns approximate total of the readers' ->srcu_unlock_count[] values
240 * for the rank of per-CPU counters specified by idx.
242 static unsigned long srcu_readers_unlock_idx(struct srcu_struct
*sp
, int idx
)
245 unsigned long sum
= 0;
247 for_each_possible_cpu(cpu
) {
248 struct srcu_data
*cpuc
= per_cpu_ptr(sp
->sda
, cpu
);
250 sum
+= READ_ONCE(cpuc
->srcu_unlock_count
[idx
]);
256 * Return true if the number of pre-existing readers is determined to
259 static bool srcu_readers_active_idx_check(struct srcu_struct
*sp
, int idx
)
261 unsigned long unlocks
;
263 unlocks
= srcu_readers_unlock_idx(sp
, idx
);
266 * Make sure that a lock is always counted if the corresponding
267 * unlock is counted. Needs to be a smp_mb() as the read side may
268 * contain a read from a variable that is written to before the
269 * synchronize_srcu() in the write side. In this case smp_mb()s
270 * A and B act like the store buffering pattern.
272 * This smp_mb() also pairs with smp_mb() C to prevent accesses
273 * after the synchronize_srcu() from being executed before the
279 * If the locks are the same as the unlocks, then there must have
280 * been no readers on this index at some time in between. This does
281 * not mean that there are no more readers, as one could have read
282 * the current index but not have incremented the lock counter yet.
284 * So suppose that the updater is preempted here for so long
285 * that more than ULONG_MAX non-nested readers come and go in
286 * the meantime. It turns out that this cannot result in overflow
287 * because if a reader modifies its unlock count after we read it
288 * above, then that reader's next load of ->srcu_idx is guaranteed
289 * to get the new value, which will cause it to operate on the
290 * other bank of counters, where it cannot contribute to the
291 * overflow of these counters. This means that there is a maximum
292 * of 2*NR_CPUS increments, which cannot overflow given current
293 * systems, especially not on 64-bit systems.
295 * OK, how about nesting? This does impose a limit on nesting
296 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
297 * especially on 64-bit systems.
299 return srcu_readers_lock_idx(sp
, idx
) == unlocks
;
303 * srcu_readers_active - returns true if there are readers. and false
305 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
307 * Note that this is not an atomic primitive, and can therefore suffer
308 * severe errors when invoked on an active srcu_struct. That said, it
309 * can be useful as an error check at cleanup time.
311 static bool srcu_readers_active(struct srcu_struct
*sp
)
314 unsigned long sum
= 0;
316 for_each_possible_cpu(cpu
) {
317 struct srcu_data
*cpuc
= per_cpu_ptr(sp
->sda
, cpu
);
319 sum
+= READ_ONCE(cpuc
->srcu_lock_count
[0]);
320 sum
+= READ_ONCE(cpuc
->srcu_lock_count
[1]);
321 sum
-= READ_ONCE(cpuc
->srcu_unlock_count
[0]);
322 sum
-= READ_ONCE(cpuc
->srcu_unlock_count
[1]);
327 #define SRCU_INTERVAL 1
330 * Return grace-period delay, zero if there are expedited grace
331 * periods pending, SRCU_INTERVAL otherwise.
333 static unsigned long srcu_get_delay(struct srcu_struct
*sp
)
335 if (ULONG_CMP_LT(READ_ONCE(sp
->srcu_gp_seq
),
336 READ_ONCE(sp
->srcu_gp_seq_needed_exp
)))
338 return SRCU_INTERVAL
;
342 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
343 * @sp: structure to clean up.
345 * Must invoke this after you are finished using a given srcu_struct that
346 * was initialized via init_srcu_struct(), else you leak memory.
348 void cleanup_srcu_struct(struct srcu_struct
*sp
)
352 if (WARN_ON(!srcu_get_delay(sp
)))
353 return; /* Leakage unless caller handles error. */
354 if (WARN_ON(srcu_readers_active(sp
)))
355 return; /* Leakage unless caller handles error. */
356 flush_delayed_work(&sp
->work
);
357 for_each_possible_cpu(cpu
)
358 flush_delayed_work(&per_cpu_ptr(sp
->sda
, cpu
)->work
);
359 if (WARN_ON(rcu_seq_state(READ_ONCE(sp
->srcu_gp_seq
)) != SRCU_STATE_IDLE
) ||
360 WARN_ON(srcu_readers_active(sp
))) {
361 pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp
, rcu_seq_state(READ_ONCE(sp
->srcu_gp_seq
)));
362 return; /* Caller forgot to stop doing call_srcu()? */
364 free_percpu(sp
->sda
);
367 EXPORT_SYMBOL_GPL(cleanup_srcu_struct
);
370 * Counts the new reader in the appropriate per-CPU element of the
372 * Returns an index that must be passed to the matching srcu_read_unlock().
374 int __srcu_read_lock(struct srcu_struct
*sp
)
378 idx
= READ_ONCE(sp
->srcu_idx
) & 0x1;
379 this_cpu_inc(sp
->sda
->srcu_lock_count
[idx
]);
380 smp_mb(); /* B */ /* Avoid leaking the critical section. */
383 EXPORT_SYMBOL_GPL(__srcu_read_lock
);
386 * Removes the count for the old reader from the appropriate per-CPU
387 * element of the srcu_struct. Note that this may well be a different
388 * CPU than that which was incremented by the corresponding srcu_read_lock().
390 void __srcu_read_unlock(struct srcu_struct
*sp
, int idx
)
392 smp_mb(); /* C */ /* Avoid leaking the critical section. */
393 this_cpu_inc(sp
->sda
->srcu_unlock_count
[idx
]);
395 EXPORT_SYMBOL_GPL(__srcu_read_unlock
);
398 * We use an adaptive strategy for synchronize_srcu() and especially for
399 * synchronize_srcu_expedited(). We spin for a fixed time period
400 * (defined below) to allow SRCU readers to exit their read-side critical
401 * sections. If there are still some readers after a few microseconds,
402 * we repeatedly block for 1-millisecond time periods.
404 #define SRCU_RETRY_CHECK_DELAY 5
407 * Start an SRCU grace period.
409 static void srcu_gp_start(struct srcu_struct
*sp
)
411 struct srcu_data
*sdp
= this_cpu_ptr(sp
->sda
);
414 lockdep_assert_held(&sp
->lock
);
415 WARN_ON_ONCE(ULONG_CMP_GE(sp
->srcu_gp_seq
, sp
->srcu_gp_seq_needed
));
416 rcu_segcblist_advance(&sdp
->srcu_cblist
,
417 rcu_seq_current(&sp
->srcu_gp_seq
));
418 (void)rcu_segcblist_accelerate(&sdp
->srcu_cblist
,
419 rcu_seq_snap(&sp
->srcu_gp_seq
));
420 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
421 rcu_seq_start(&sp
->srcu_gp_seq
);
422 state
= rcu_seq_state(READ_ONCE(sp
->srcu_gp_seq
));
423 WARN_ON_ONCE(state
!= SRCU_STATE_SCAN1
);
427 * Track online CPUs to guide callback workqueue placement.
429 DEFINE_PER_CPU(bool, srcu_online
);
431 void srcu_online_cpu(unsigned int cpu
)
433 WRITE_ONCE(per_cpu(srcu_online
, cpu
), true);
436 void srcu_offline_cpu(unsigned int cpu
)
438 WRITE_ONCE(per_cpu(srcu_online
, cpu
), false);
442 * Place the workqueue handler on the specified CPU if online, otherwise
443 * just run it whereever. This is useful for placing workqueue handlers
444 * that are to invoke the specified CPU's callbacks.
446 static bool srcu_queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
447 struct delayed_work
*dwork
,
453 if (READ_ONCE(per_cpu(srcu_online
, cpu
)))
454 ret
= queue_delayed_work_on(cpu
, wq
, dwork
, delay
);
456 ret
= queue_delayed_work(wq
, dwork
, delay
);
462 * Schedule callback invocation for the specified srcu_data structure,
463 * if possible, on the corresponding CPU.
465 static void srcu_schedule_cbs_sdp(struct srcu_data
*sdp
, unsigned long delay
)
467 srcu_queue_delayed_work_on(sdp
->cpu
, system_power_efficient_wq
,
472 * Schedule callback invocation for all srcu_data structures associated
473 * with the specified srcu_node structure that have callbacks for the
474 * just-completed grace period, the one corresponding to idx. If possible,
475 * schedule this invocation on the corresponding CPUs.
477 static void srcu_schedule_cbs_snp(struct srcu_struct
*sp
, struct srcu_node
*snp
,
478 unsigned long mask
, unsigned long delay
)
482 for (cpu
= snp
->grplo
; cpu
<= snp
->grphi
; cpu
++) {
483 if (!(mask
& (1 << (cpu
- snp
->grplo
))))
485 srcu_schedule_cbs_sdp(per_cpu_ptr(sp
->sda
, cpu
), delay
);
490 * Note the end of an SRCU grace period. Initiates callback invocation
491 * and starts a new grace period if needed.
493 * The ->srcu_cb_mutex acquisition does not protect any data, but
494 * instead prevents more than one grace period from starting while we
495 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
496 * array to have a finite number of elements.
498 static void srcu_gp_end(struct srcu_struct
*sp
)
500 unsigned long cbdelay
;
508 struct srcu_data
*sdp
;
509 struct srcu_node
*snp
;
511 /* Prevent more than one additional grace period. */
512 mutex_lock(&sp
->srcu_cb_mutex
);
514 /* End the current grace period. */
515 raw_spin_lock_irq_rcu_node(sp
);
516 idx
= rcu_seq_state(sp
->srcu_gp_seq
);
517 WARN_ON_ONCE(idx
!= SRCU_STATE_SCAN2
);
518 cbdelay
= srcu_get_delay(sp
);
519 sp
->srcu_last_gp_end
= ktime_get_mono_fast_ns();
520 rcu_seq_end(&sp
->srcu_gp_seq
);
521 gpseq
= rcu_seq_current(&sp
->srcu_gp_seq
);
522 if (ULONG_CMP_LT(sp
->srcu_gp_seq_needed_exp
, gpseq
))
523 sp
->srcu_gp_seq_needed_exp
= gpseq
;
524 raw_spin_unlock_irq_rcu_node(sp
);
525 mutex_unlock(&sp
->srcu_gp_mutex
);
526 /* A new grace period can start at this point. But only one. */
528 /* Initiate callback invocation as needed. */
529 idx
= rcu_seq_ctr(gpseq
) % ARRAY_SIZE(snp
->srcu_have_cbs
);
530 idxnext
= (idx
+ 1) % ARRAY_SIZE(snp
->srcu_have_cbs
);
531 rcu_for_each_node_breadth_first(sp
, snp
) {
532 raw_spin_lock_irq_rcu_node(snp
);
534 if (snp
>= sp
->level
[rcu_num_lvls
- 1])
535 cbs
= snp
->srcu_have_cbs
[idx
] == gpseq
;
536 snp
->srcu_have_cbs
[idx
] = gpseq
;
537 rcu_seq_set_state(&snp
->srcu_have_cbs
[idx
], 1);
538 if (ULONG_CMP_LT(snp
->srcu_gp_seq_needed_exp
, gpseq
))
539 snp
->srcu_gp_seq_needed_exp
= gpseq
;
540 mask
= snp
->srcu_data_have_cbs
[idx
];
541 snp
->srcu_data_have_cbs
[idx
] = 0;
542 raw_spin_unlock_irq_rcu_node(snp
);
544 srcu_schedule_cbs_snp(sp
, snp
, mask
, cbdelay
);
546 /* Occasionally prevent srcu_data counter wrap. */
547 if (!(gpseq
& counter_wrap_check
))
548 for (cpu
= snp
->grplo
; cpu
<= snp
->grphi
; cpu
++) {
549 sdp
= per_cpu_ptr(sp
->sda
, cpu
);
550 raw_spin_lock_irqsave_rcu_node(sdp
, flags
);
551 if (ULONG_CMP_GE(gpseq
,
552 sdp
->srcu_gp_seq_needed
+ 100))
553 sdp
->srcu_gp_seq_needed
= gpseq
;
554 raw_spin_unlock_irqrestore_rcu_node(sdp
, flags
);
558 /* Callback initiation done, allow grace periods after next. */
559 mutex_unlock(&sp
->srcu_cb_mutex
);
561 /* Start a new grace period if needed. */
562 raw_spin_lock_irq_rcu_node(sp
);
563 gpseq
= rcu_seq_current(&sp
->srcu_gp_seq
);
564 if (!rcu_seq_state(gpseq
) &&
565 ULONG_CMP_LT(gpseq
, sp
->srcu_gp_seq_needed
)) {
567 raw_spin_unlock_irq_rcu_node(sp
);
568 /* Throttle expedited grace periods: Should be rare! */
569 srcu_reschedule(sp
, rcu_seq_ctr(gpseq
) & 0x3ff
570 ? 0 : SRCU_INTERVAL
);
572 raw_spin_unlock_irq_rcu_node(sp
);
577 * Funnel-locking scheme to scalably mediate many concurrent expedited
578 * grace-period requests. This function is invoked for the first known
579 * expedited request for a grace period that has already been requested,
580 * but without expediting. To start a completely new grace period,
581 * whether expedited or not, use srcu_funnel_gp_start() instead.
583 static void srcu_funnel_exp_start(struct srcu_struct
*sp
, struct srcu_node
*snp
,
588 for (; snp
!= NULL
; snp
= snp
->srcu_parent
) {
589 if (rcu_seq_done(&sp
->srcu_gp_seq
, s
) ||
590 ULONG_CMP_GE(READ_ONCE(snp
->srcu_gp_seq_needed_exp
), s
))
592 raw_spin_lock_irqsave_rcu_node(snp
, flags
);
593 if (ULONG_CMP_GE(snp
->srcu_gp_seq_needed_exp
, s
)) {
594 raw_spin_unlock_irqrestore_rcu_node(snp
, flags
);
597 WRITE_ONCE(snp
->srcu_gp_seq_needed_exp
, s
);
598 raw_spin_unlock_irqrestore_rcu_node(snp
, flags
);
600 raw_spin_lock_irqsave_rcu_node(sp
, flags
);
601 if (!ULONG_CMP_LT(sp
->srcu_gp_seq_needed_exp
, s
))
602 sp
->srcu_gp_seq_needed_exp
= s
;
603 raw_spin_unlock_irqrestore_rcu_node(sp
, flags
);
607 * Funnel-locking scheme to scalably mediate many concurrent grace-period
608 * requests. The winner has to do the work of actually starting grace
609 * period s. Losers must either ensure that their desired grace-period
610 * number is recorded on at least their leaf srcu_node structure, or they
611 * must take steps to invoke their own callbacks.
613 static void srcu_funnel_gp_start(struct srcu_struct
*sp
, struct srcu_data
*sdp
,
614 unsigned long s
, bool do_norm
)
617 int idx
= rcu_seq_ctr(s
) % ARRAY_SIZE(sdp
->mynode
->srcu_have_cbs
);
618 struct srcu_node
*snp
= sdp
->mynode
;
619 unsigned long snp_seq
;
621 /* Each pass through the loop does one level of the srcu_node tree. */
622 for (; snp
!= NULL
; snp
= snp
->srcu_parent
) {
623 if (rcu_seq_done(&sp
->srcu_gp_seq
, s
) && snp
!= sdp
->mynode
)
624 return; /* GP already done and CBs recorded. */
625 raw_spin_lock_irqsave_rcu_node(snp
, flags
);
626 if (ULONG_CMP_GE(snp
->srcu_have_cbs
[idx
], s
)) {
627 snp_seq
= snp
->srcu_have_cbs
[idx
];
628 if (snp
== sdp
->mynode
&& snp_seq
== s
)
629 snp
->srcu_data_have_cbs
[idx
] |= sdp
->grpmask
;
630 raw_spin_unlock_irqrestore_rcu_node(snp
, flags
);
631 if (snp
== sdp
->mynode
&& snp_seq
!= s
) {
632 srcu_schedule_cbs_sdp(sdp
, do_norm
638 srcu_funnel_exp_start(sp
, snp
, s
);
641 snp
->srcu_have_cbs
[idx
] = s
;
642 if (snp
== sdp
->mynode
)
643 snp
->srcu_data_have_cbs
[idx
] |= sdp
->grpmask
;
644 if (!do_norm
&& ULONG_CMP_LT(snp
->srcu_gp_seq_needed_exp
, s
))
645 snp
->srcu_gp_seq_needed_exp
= s
;
646 raw_spin_unlock_irqrestore_rcu_node(snp
, flags
);
649 /* Top of tree, must ensure the grace period will be started. */
650 raw_spin_lock_irqsave_rcu_node(sp
, flags
);
651 if (ULONG_CMP_LT(sp
->srcu_gp_seq_needed
, s
)) {
653 * Record need for grace period s. Pair with load
654 * acquire setting up for initialization.
656 smp_store_release(&sp
->srcu_gp_seq_needed
, s
); /*^^^*/
658 if (!do_norm
&& ULONG_CMP_LT(sp
->srcu_gp_seq_needed_exp
, s
))
659 sp
->srcu_gp_seq_needed_exp
= s
;
661 /* If grace period not already done and none in progress, start it. */
662 if (!rcu_seq_done(&sp
->srcu_gp_seq
, s
) &&
663 rcu_seq_state(sp
->srcu_gp_seq
) == SRCU_STATE_IDLE
) {
664 WARN_ON_ONCE(ULONG_CMP_GE(sp
->srcu_gp_seq
, sp
->srcu_gp_seq_needed
));
666 queue_delayed_work(system_power_efficient_wq
, &sp
->work
,
669 raw_spin_unlock_irqrestore_rcu_node(sp
, flags
);
673 * Wait until all readers counted by array index idx complete, but
674 * loop an additional time if there is an expedited grace period pending.
675 * The caller must ensure that ->srcu_idx is not changed while checking.
677 static bool try_check_zero(struct srcu_struct
*sp
, int idx
, int trycount
)
680 if (srcu_readers_active_idx_check(sp
, idx
))
682 if (--trycount
+ !srcu_get_delay(sp
) <= 0)
684 udelay(SRCU_RETRY_CHECK_DELAY
);
689 * Increment the ->srcu_idx counter so that future SRCU readers will
690 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
691 * us to wait for pre-existing readers in a starvation-free manner.
693 static void srcu_flip(struct srcu_struct
*sp
)
696 * Ensure that if this updater saw a given reader's increment
697 * from __srcu_read_lock(), that reader was using an old value
698 * of ->srcu_idx. Also ensure that if a given reader sees the
699 * new value of ->srcu_idx, this updater's earlier scans cannot
700 * have seen that reader's increments (which is OK, because this
701 * grace period need not wait on that reader).
703 smp_mb(); /* E */ /* Pairs with B and C. */
705 WRITE_ONCE(sp
->srcu_idx
, sp
->srcu_idx
+ 1);
708 * Ensure that if the updater misses an __srcu_read_unlock()
709 * increment, that task's next __srcu_read_lock() will see the
710 * above counter update. Note that both this memory barrier
711 * and the one in srcu_readers_active_idx_check() provide the
712 * guarantee for __srcu_read_lock().
714 smp_mb(); /* D */ /* Pairs with C. */
718 * If SRCU is likely idle, return true, otherwise return false.
720 * Note that it is OK for several current from-idle requests for a new
721 * grace period from idle to specify expediting because they will all end
722 * up requesting the same grace period anyhow. So no loss.
724 * Note also that if any CPU (including the current one) is still invoking
725 * callbacks, this function will nevertheless say "idle". This is not
726 * ideal, but the overhead of checking all CPUs' callback lists is even
727 * less ideal, especially on large systems. Furthermore, the wakeup
728 * can happen before the callback is fully removed, so we have no choice
729 * but to accept this type of error.
731 * This function is also subject to counter-wrap errors, but let's face
732 * it, if this function was preempted for enough time for the counters
733 * to wrap, it really doesn't matter whether or not we expedite the grace
734 * period. The extra overhead of a needlessly expedited grace period is
735 * negligible when amoritized over that time period, and the extra latency
736 * of a needlessly non-expedited grace period is similarly negligible.
738 static bool srcu_might_be_idle(struct srcu_struct
*sp
)
740 unsigned long curseq
;
742 struct srcu_data
*sdp
;
745 /* If the local srcu_data structure has callbacks, not idle. */
746 local_irq_save(flags
);
747 sdp
= this_cpu_ptr(sp
->sda
);
748 if (rcu_segcblist_pend_cbs(&sdp
->srcu_cblist
)) {
749 local_irq_restore(flags
);
750 return false; /* Callbacks already present, so not idle. */
752 local_irq_restore(flags
);
755 * No local callbacks, so probabalistically probe global state.
756 * Exact information would require acquiring locks, which would
757 * kill scalability, hence the probabalistic nature of the probe.
760 /* First, see if enough time has passed since the last GP. */
761 t
= ktime_get_mono_fast_ns();
762 if (exp_holdoff
== 0 ||
763 time_in_range_open(t
, sp
->srcu_last_gp_end
,
764 sp
->srcu_last_gp_end
+ exp_holdoff
))
765 return false; /* Too soon after last GP. */
767 /* Next, check for probable idleness. */
768 curseq
= rcu_seq_current(&sp
->srcu_gp_seq
);
769 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
770 if (ULONG_CMP_LT(curseq
, READ_ONCE(sp
->srcu_gp_seq_needed
)))
771 return false; /* Grace period in progress, so not idle. */
772 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
773 if (curseq
!= rcu_seq_current(&sp
->srcu_gp_seq
))
774 return false; /* GP # changed, so not idle. */
775 return true; /* With reasonable probability, idle! */
779 * SRCU callback function to leak a callback.
781 static void srcu_leak_callback(struct rcu_head
*rhp
)
786 * Enqueue an SRCU callback on the srcu_data structure associated with
787 * the current CPU and the specified srcu_struct structure, initiating
788 * grace-period processing if it is not already running.
790 * Note that all CPUs must agree that the grace period extended beyond
791 * all pre-existing SRCU read-side critical section. On systems with
792 * more than one CPU, this means that when "func()" is invoked, each CPU
793 * is guaranteed to have executed a full memory barrier since the end of
794 * its last corresponding SRCU read-side critical section whose beginning
795 * preceded the call to call_rcu(). It also means that each CPU executing
796 * an SRCU read-side critical section that continues beyond the start of
797 * "func()" must have executed a memory barrier after the call_rcu()
798 * but before the beginning of that SRCU read-side critical section.
799 * Note that these guarantees include CPUs that are offline, idle, or
800 * executing in user mode, as well as CPUs that are executing in the kernel.
802 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
803 * resulting SRCU callback function "func()", then both CPU A and CPU
804 * B are guaranteed to execute a full memory barrier during the time
805 * interval between the call to call_rcu() and the invocation of "func()".
806 * This guarantee applies even if CPU A and CPU B are the same CPU (but
807 * again only if the system has more than one CPU).
809 * Of course, these guarantees apply only for invocations of call_srcu(),
810 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
811 * srcu_struct structure.
813 void __call_srcu(struct srcu_struct
*sp
, struct rcu_head
*rhp
,
814 rcu_callback_t func
, bool do_norm
)
817 bool needexp
= false;
820 struct srcu_data
*sdp
;
822 check_init_srcu_struct(sp
);
823 if (debug_rcu_head_queue(rhp
)) {
824 /* Probable double call_srcu(), so leak the callback. */
825 WRITE_ONCE(rhp
->func
, srcu_leak_callback
);
826 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
830 local_irq_save(flags
);
831 sdp
= this_cpu_ptr(sp
->sda
);
832 raw_spin_lock_rcu_node(sdp
);
833 rcu_segcblist_enqueue(&sdp
->srcu_cblist
, rhp
, false);
834 rcu_segcblist_advance(&sdp
->srcu_cblist
,
835 rcu_seq_current(&sp
->srcu_gp_seq
));
836 s
= rcu_seq_snap(&sp
->srcu_gp_seq
);
837 (void)rcu_segcblist_accelerate(&sdp
->srcu_cblist
, s
);
838 if (ULONG_CMP_LT(sdp
->srcu_gp_seq_needed
, s
)) {
839 sdp
->srcu_gp_seq_needed
= s
;
842 if (!do_norm
&& ULONG_CMP_LT(sdp
->srcu_gp_seq_needed_exp
, s
)) {
843 sdp
->srcu_gp_seq_needed_exp
= s
;
846 raw_spin_unlock_irqrestore_rcu_node(sdp
, flags
);
848 srcu_funnel_gp_start(sp
, sdp
, s
, do_norm
);
850 srcu_funnel_exp_start(sp
, sdp
->mynode
, s
);
854 * call_srcu() - Queue a callback for invocation after an SRCU grace period
855 * @sp: srcu_struct in queue the callback
856 * @head: structure to be used for queueing the SRCU callback.
857 * @func: function to be invoked after the SRCU grace period
859 * The callback function will be invoked some time after a full SRCU
860 * grace period elapses, in other words after all pre-existing SRCU
861 * read-side critical sections have completed. However, the callback
862 * function might well execute concurrently with other SRCU read-side
863 * critical sections that started after call_srcu() was invoked. SRCU
864 * read-side critical sections are delimited by srcu_read_lock() and
865 * srcu_read_unlock(), and may be nested.
867 * The callback will be invoked from process context, but must nevertheless
868 * be fast and must not block.
870 void call_srcu(struct srcu_struct
*sp
, struct rcu_head
*rhp
,
873 __call_srcu(sp
, rhp
, func
, true);
875 EXPORT_SYMBOL_GPL(call_srcu
);
878 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
880 static void __synchronize_srcu(struct srcu_struct
*sp
, bool do_norm
)
882 struct rcu_synchronize rcu
;
884 RCU_LOCKDEP_WARN(lock_is_held(&sp
->dep_map
) ||
885 lock_is_held(&rcu_bh_lock_map
) ||
886 lock_is_held(&rcu_lock_map
) ||
887 lock_is_held(&rcu_sched_lock_map
),
888 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
890 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
893 check_init_srcu_struct(sp
);
894 init_completion(&rcu
.completion
);
895 init_rcu_head_on_stack(&rcu
.head
);
896 __call_srcu(sp
, &rcu
.head
, wakeme_after_rcu
, do_norm
);
897 wait_for_completion(&rcu
.completion
);
898 destroy_rcu_head_on_stack(&rcu
.head
);
902 * synchronize_srcu_expedited - Brute-force SRCU grace period
903 * @sp: srcu_struct with which to synchronize.
905 * Wait for an SRCU grace period to elapse, but be more aggressive about
906 * spinning rather than blocking when waiting.
908 * Note that synchronize_srcu_expedited() has the same deadlock and
909 * memory-ordering properties as does synchronize_srcu().
911 void synchronize_srcu_expedited(struct srcu_struct
*sp
)
913 __synchronize_srcu(sp
, rcu_gp_is_normal());
915 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited
);
918 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
919 * @sp: srcu_struct with which to synchronize.
921 * Wait for the count to drain to zero of both indexes. To avoid the
922 * possible starvation of synchronize_srcu(), it waits for the count of
923 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
924 * and then flip the srcu_idx and wait for the count of the other index.
926 * Can block; must be called from process context.
928 * Note that it is illegal to call synchronize_srcu() from the corresponding
929 * SRCU read-side critical section; doing so will result in deadlock.
930 * However, it is perfectly legal to call synchronize_srcu() on one
931 * srcu_struct from some other srcu_struct's read-side critical section,
932 * as long as the resulting graph of srcu_structs is acyclic.
934 * There are memory-ordering constraints implied by synchronize_srcu().
935 * On systems with more than one CPU, when synchronize_srcu() returns,
936 * each CPU is guaranteed to have executed a full memory barrier since
937 * the end of its last corresponding SRCU-sched read-side critical section
938 * whose beginning preceded the call to synchronize_srcu(). In addition,
939 * each CPU having an SRCU read-side critical section that extends beyond
940 * the return from synchronize_srcu() is guaranteed to have executed a
941 * full memory barrier after the beginning of synchronize_srcu() and before
942 * the beginning of that SRCU read-side critical section. Note that these
943 * guarantees include CPUs that are offline, idle, or executing in user mode,
944 * as well as CPUs that are executing in the kernel.
946 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
947 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
948 * to have executed a full memory barrier during the execution of
949 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
950 * are the same CPU, but again only if the system has more than one CPU.
952 * Of course, these memory-ordering guarantees apply only when
953 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
954 * passed the same srcu_struct structure.
956 * If SRCU is likely idle, expedite the first request. This semantic
957 * was provided by Classic SRCU, and is relied upon by its users, so TREE
958 * SRCU must also provide it. Note that detecting idleness is heuristic
959 * and subject to both false positives and negatives.
961 void synchronize_srcu(struct srcu_struct
*sp
)
963 if (srcu_might_be_idle(sp
) || rcu_gp_is_expedited())
964 synchronize_srcu_expedited(sp
);
966 __synchronize_srcu(sp
, true);
968 EXPORT_SYMBOL_GPL(synchronize_srcu
);
971 * Callback function for srcu_barrier() use.
973 static void srcu_barrier_cb(struct rcu_head
*rhp
)
975 struct srcu_data
*sdp
;
976 struct srcu_struct
*sp
;
978 sdp
= container_of(rhp
, struct srcu_data
, srcu_barrier_head
);
980 if (atomic_dec_and_test(&sp
->srcu_barrier_cpu_cnt
))
981 complete(&sp
->srcu_barrier_completion
);
985 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
986 * @sp: srcu_struct on which to wait for in-flight callbacks.
988 void srcu_barrier(struct srcu_struct
*sp
)
991 struct srcu_data
*sdp
;
992 unsigned long s
= rcu_seq_snap(&sp
->srcu_barrier_seq
);
994 check_init_srcu_struct(sp
);
995 mutex_lock(&sp
->srcu_barrier_mutex
);
996 if (rcu_seq_done(&sp
->srcu_barrier_seq
, s
)) {
997 smp_mb(); /* Force ordering following return. */
998 mutex_unlock(&sp
->srcu_barrier_mutex
);
999 return; /* Someone else did our work for us. */
1001 rcu_seq_start(&sp
->srcu_barrier_seq
);
1002 init_completion(&sp
->srcu_barrier_completion
);
1004 /* Initial count prevents reaching zero until all CBs are posted. */
1005 atomic_set(&sp
->srcu_barrier_cpu_cnt
, 1);
1008 * Each pass through this loop enqueues a callback, but only
1009 * on CPUs already having callbacks enqueued. Note that if
1010 * a CPU already has callbacks enqueue, it must have already
1011 * registered the need for a future grace period, so all we
1012 * need do is enqueue a callback that will use the same
1013 * grace period as the last callback already in the queue.
1015 for_each_possible_cpu(cpu
) {
1016 sdp
= per_cpu_ptr(sp
->sda
, cpu
);
1017 raw_spin_lock_irq_rcu_node(sdp
);
1018 atomic_inc(&sp
->srcu_barrier_cpu_cnt
);
1019 sdp
->srcu_barrier_head
.func
= srcu_barrier_cb
;
1020 debug_rcu_head_queue(&sdp
->srcu_barrier_head
);
1021 if (!rcu_segcblist_entrain(&sdp
->srcu_cblist
,
1022 &sdp
->srcu_barrier_head
, 0)) {
1023 debug_rcu_head_unqueue(&sdp
->srcu_barrier_head
);
1024 atomic_dec(&sp
->srcu_barrier_cpu_cnt
);
1026 raw_spin_unlock_irq_rcu_node(sdp
);
1029 /* Remove the initial count, at which point reaching zero can happen. */
1030 if (atomic_dec_and_test(&sp
->srcu_barrier_cpu_cnt
))
1031 complete(&sp
->srcu_barrier_completion
);
1032 wait_for_completion(&sp
->srcu_barrier_completion
);
1034 rcu_seq_end(&sp
->srcu_barrier_seq
);
1035 mutex_unlock(&sp
->srcu_barrier_mutex
);
1037 EXPORT_SYMBOL_GPL(srcu_barrier
);
1040 * srcu_batches_completed - return batches completed.
1041 * @sp: srcu_struct on which to report batch completion.
1043 * Report the number of batches, correlated with, but not necessarily
1044 * precisely the same as, the number of grace periods that have elapsed.
1046 unsigned long srcu_batches_completed(struct srcu_struct
*sp
)
1048 return sp
->srcu_idx
;
1050 EXPORT_SYMBOL_GPL(srcu_batches_completed
);
1053 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1054 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1055 * completed in that state.
1057 static void srcu_advance_state(struct srcu_struct
*sp
)
1061 mutex_lock(&sp
->srcu_gp_mutex
);
1064 * Because readers might be delayed for an extended period after
1065 * fetching ->srcu_idx for their index, at any point in time there
1066 * might well be readers using both idx=0 and idx=1. We therefore
1067 * need to wait for readers to clear from both index values before
1068 * invoking a callback.
1070 * The load-acquire ensures that we see the accesses performed
1071 * by the prior grace period.
1073 idx
= rcu_seq_state(smp_load_acquire(&sp
->srcu_gp_seq
)); /* ^^^ */
1074 if (idx
== SRCU_STATE_IDLE
) {
1075 raw_spin_lock_irq_rcu_node(sp
);
1076 if (ULONG_CMP_GE(sp
->srcu_gp_seq
, sp
->srcu_gp_seq_needed
)) {
1077 WARN_ON_ONCE(rcu_seq_state(sp
->srcu_gp_seq
));
1078 raw_spin_unlock_irq_rcu_node(sp
);
1079 mutex_unlock(&sp
->srcu_gp_mutex
);
1082 idx
= rcu_seq_state(READ_ONCE(sp
->srcu_gp_seq
));
1083 if (idx
== SRCU_STATE_IDLE
)
1085 raw_spin_unlock_irq_rcu_node(sp
);
1086 if (idx
!= SRCU_STATE_IDLE
) {
1087 mutex_unlock(&sp
->srcu_gp_mutex
);
1088 return; /* Someone else started the grace period. */
1092 if (rcu_seq_state(READ_ONCE(sp
->srcu_gp_seq
)) == SRCU_STATE_SCAN1
) {
1093 idx
= 1 ^ (sp
->srcu_idx
& 1);
1094 if (!try_check_zero(sp
, idx
, 1)) {
1095 mutex_unlock(&sp
->srcu_gp_mutex
);
1096 return; /* readers present, retry later. */
1099 rcu_seq_set_state(&sp
->srcu_gp_seq
, SRCU_STATE_SCAN2
);
1102 if (rcu_seq_state(READ_ONCE(sp
->srcu_gp_seq
)) == SRCU_STATE_SCAN2
) {
1105 * SRCU read-side critical sections are normally short,
1106 * so check at least twice in quick succession after a flip.
1108 idx
= 1 ^ (sp
->srcu_idx
& 1);
1109 if (!try_check_zero(sp
, idx
, 2)) {
1110 mutex_unlock(&sp
->srcu_gp_mutex
);
1111 return; /* readers present, retry later. */
1113 srcu_gp_end(sp
); /* Releases ->srcu_gp_mutex. */
1118 * Invoke a limited number of SRCU callbacks that have passed through
1119 * their grace period. If there are more to do, SRCU will reschedule
1120 * the workqueue. Note that needed memory barriers have been executed
1121 * in this task's context by srcu_readers_active_idx_check().
1123 static void srcu_invoke_callbacks(struct work_struct
*work
)
1126 struct rcu_cblist ready_cbs
;
1127 struct rcu_head
*rhp
;
1128 struct srcu_data
*sdp
;
1129 struct srcu_struct
*sp
;
1131 sdp
= container_of(work
, struct srcu_data
, work
.work
);
1133 rcu_cblist_init(&ready_cbs
);
1134 raw_spin_lock_irq_rcu_node(sdp
);
1135 rcu_segcblist_advance(&sdp
->srcu_cblist
,
1136 rcu_seq_current(&sp
->srcu_gp_seq
));
1137 if (sdp
->srcu_cblist_invoking
||
1138 !rcu_segcblist_ready_cbs(&sdp
->srcu_cblist
)) {
1139 raw_spin_unlock_irq_rcu_node(sdp
);
1140 return; /* Someone else on the job or nothing to do. */
1143 /* We are on the job! Extract and invoke ready callbacks. */
1144 sdp
->srcu_cblist_invoking
= true;
1145 rcu_segcblist_extract_done_cbs(&sdp
->srcu_cblist
, &ready_cbs
);
1146 raw_spin_unlock_irq_rcu_node(sdp
);
1147 rhp
= rcu_cblist_dequeue(&ready_cbs
);
1148 for (; rhp
!= NULL
; rhp
= rcu_cblist_dequeue(&ready_cbs
)) {
1149 debug_rcu_head_unqueue(rhp
);
1156 * Update counts, accelerate new callbacks, and if needed,
1157 * schedule another round of callback invocation.
1159 raw_spin_lock_irq_rcu_node(sdp
);
1160 rcu_segcblist_insert_count(&sdp
->srcu_cblist
, &ready_cbs
);
1161 (void)rcu_segcblist_accelerate(&sdp
->srcu_cblist
,
1162 rcu_seq_snap(&sp
->srcu_gp_seq
));
1163 sdp
->srcu_cblist_invoking
= false;
1164 more
= rcu_segcblist_ready_cbs(&sdp
->srcu_cblist
);
1165 raw_spin_unlock_irq_rcu_node(sdp
);
1167 srcu_schedule_cbs_sdp(sdp
, 0);
1171 * Finished one round of SRCU grace period. Start another if there are
1172 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1174 static void srcu_reschedule(struct srcu_struct
*sp
, unsigned long delay
)
1178 raw_spin_lock_irq_rcu_node(sp
);
1179 if (ULONG_CMP_GE(sp
->srcu_gp_seq
, sp
->srcu_gp_seq_needed
)) {
1180 if (!WARN_ON_ONCE(rcu_seq_state(sp
->srcu_gp_seq
))) {
1181 /* All requests fulfilled, time to go idle. */
1184 } else if (!rcu_seq_state(sp
->srcu_gp_seq
)) {
1185 /* Outstanding request and no GP. Start one. */
1188 raw_spin_unlock_irq_rcu_node(sp
);
1191 queue_delayed_work(system_power_efficient_wq
, &sp
->work
, delay
);
1195 * This is the work-queue function that handles SRCU grace periods.
1197 void process_srcu(struct work_struct
*work
)
1199 struct srcu_struct
*sp
;
1201 sp
= container_of(work
, struct srcu_struct
, work
.work
);
1203 srcu_advance_state(sp
);
1204 srcu_reschedule(sp
, srcu_get_delay(sp
));
1206 EXPORT_SYMBOL_GPL(process_srcu
);
1208 void srcutorture_get_gp_data(enum rcutorture_type test_type
,
1209 struct srcu_struct
*sp
, int *flags
,
1210 unsigned long *gpnum
, unsigned long *completed
)
1212 if (test_type
!= SRCU_FLAVOR
)
1215 *completed
= rcu_seq_ctr(sp
->srcu_gp_seq
);
1216 *gpnum
= rcu_seq_ctr(sp
->srcu_gp_seq_needed
);
1218 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data
);
1220 void srcu_torture_stats_print(struct srcu_struct
*sp
, char *tt
, char *tf
)
1224 unsigned long s0
= 0, s1
= 0;
1226 idx
= sp
->srcu_idx
& 0x1;
1227 pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt
, tf
, idx
);
1228 for_each_possible_cpu(cpu
) {
1229 unsigned long l0
, l1
;
1230 unsigned long u0
, u1
;
1232 struct srcu_data
*counts
;
1234 counts
= per_cpu_ptr(sp
->sda
, cpu
);
1235 u0
= counts
->srcu_unlock_count
[!idx
];
1236 u1
= counts
->srcu_unlock_count
[idx
];
1239 * Make sure that a lock is always counted if the corresponding
1240 * unlock is counted.
1244 l0
= counts
->srcu_lock_count
[!idx
];
1245 l1
= counts
->srcu_lock_count
[idx
];
1249 pr_cont(" %d(%ld,%ld)", cpu
, c0
, c1
);
1253 pr_cont(" T(%ld,%ld)\n", s0
, s1
);
1255 EXPORT_SYMBOL_GPL(srcu_torture_stats_print
);
1257 static int __init
srcu_bootup_announce(void)
1259 pr_info("Hierarchical SRCU implementation.\n");
1260 if (exp_holdoff
!= DEFAULT_SRCU_EXP_HOLDOFF
)
1261 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff
);
1264 early_initcall(srcu_bootup_announce
);