1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
11 * Author: Ingo Molnar <mingo@elte.hu>
12 * Paul E. McKenney <paulmck@linux.ibm.com>
13 * Frederic Weisbecker <frederic@kernel.org>
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask
; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll
; /* Offload kthread are to poll. */
19 static inline int rcu_lockdep_is_held_nocb(struct rcu_data
*rdp
)
21 return lockdep_is_held(&rdp
->nocb_lock
);
24 static inline bool rcu_current_is_nocb_kthread(struct rcu_data
*rdp
)
26 /* Race on early boot between thread creation and assignment */
27 if (!rdp
->nocb_cb_kthread
|| !rdp
->nocb_gp_kthread
)
30 if (current
== rdp
->nocb_cb_kthread
|| current
== rdp
->nocb_gp_kthread
)
37 * Offload callback processing from the boot-time-specified set of CPUs
38 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
39 * created that pull the callbacks from the corresponding CPU, wait for
40 * a grace period to elapse, and invoke the callbacks. These kthreads
41 * are organized into GP kthreads, which manage incoming callbacks, wait for
42 * grace periods, and awaken CB kthreads, and the CB kthreads, which only
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
44 * do a wake_up() on their GP kthread when they insert a callback into any
45 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
46 * in which case each kthread actively polls its CPU. (Which isn't so great
47 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
49 * This is intended to be used in conjunction with Frederic Weisbecker's
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
53 * Offloading of callbacks can also be used as an energy-efficiency
54 * measure because CPUs with no RCU callbacks queued are more aggressive
55 * about entering dyntick-idle mode.
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
61 * If the list is invalid, a warning is emitted and all CPUs are offloaded.
63 static int __init
rcu_nocb_setup(char *str
)
65 alloc_bootmem_cpumask_var(&rcu_nocb_mask
);
66 if (cpulist_parse(str
, rcu_nocb_mask
)) {
67 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
68 cpumask_setall(rcu_nocb_mask
);
72 __setup("rcu_nocbs=", rcu_nocb_setup
);
74 static int __init
parse_rcu_nocb_poll(char *arg
)
79 early_param("rcu_nocb_poll", parse_rcu_nocb_poll
);
82 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
83 * After all, the main point of bypassing is to avoid lock contention
84 * on ->nocb_lock, which only can happen at high call_rcu() rates.
86 static int nocb_nobypass_lim_per_jiffy
= 16 * 1000 / HZ
;
87 module_param(nocb_nobypass_lim_per_jiffy
, int, 0);
90 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
91 * lock isn't immediately available, increment ->nocb_lock_contended to
92 * flag the contention.
94 static void rcu_nocb_bypass_lock(struct rcu_data
*rdp
)
95 __acquires(&rdp
->nocb_bypass_lock
)
97 lockdep_assert_irqs_disabled();
98 if (raw_spin_trylock(&rdp
->nocb_bypass_lock
))
100 atomic_inc(&rdp
->nocb_lock_contended
);
101 WARN_ON_ONCE(smp_processor_id() != rdp
->cpu
);
102 smp_mb__after_atomic(); /* atomic_inc() before lock. */
103 raw_spin_lock(&rdp
->nocb_bypass_lock
);
104 smp_mb__before_atomic(); /* atomic_dec() after lock. */
105 atomic_dec(&rdp
->nocb_lock_contended
);
109 * Spinwait until the specified rcu_data structure's ->nocb_lock is
110 * not contended. Please note that this is extremely special-purpose,
111 * relying on the fact that at most two kthreads and one CPU contend for
112 * this lock, and also that the two kthreads are guaranteed to have frequent
113 * grace-period-duration time intervals between successive acquisitions
114 * of the lock. This allows us to use an extremely simple throttling
115 * mechanism, and further to apply it only to the CPU doing floods of
116 * call_rcu() invocations. Don't try this at home!
118 static void rcu_nocb_wait_contended(struct rcu_data
*rdp
)
120 WARN_ON_ONCE(smp_processor_id() != rdp
->cpu
);
121 while (WARN_ON_ONCE(atomic_read(&rdp
->nocb_lock_contended
)))
126 * Conditionally acquire the specified rcu_data structure's
127 * ->nocb_bypass_lock.
129 static bool rcu_nocb_bypass_trylock(struct rcu_data
*rdp
)
131 lockdep_assert_irqs_disabled();
132 return raw_spin_trylock(&rdp
->nocb_bypass_lock
);
136 * Release the specified rcu_data structure's ->nocb_bypass_lock.
138 static void rcu_nocb_bypass_unlock(struct rcu_data
*rdp
)
139 __releases(&rdp
->nocb_bypass_lock
)
141 lockdep_assert_irqs_disabled();
142 raw_spin_unlock(&rdp
->nocb_bypass_lock
);
146 * Acquire the specified rcu_data structure's ->nocb_lock, but only
147 * if it corresponds to a no-CBs CPU.
149 static void rcu_nocb_lock(struct rcu_data
*rdp
)
151 lockdep_assert_irqs_disabled();
152 if (!rcu_rdp_is_offloaded(rdp
))
154 raw_spin_lock(&rdp
->nocb_lock
);
158 * Release the specified rcu_data structure's ->nocb_lock, but only
159 * if it corresponds to a no-CBs CPU.
161 static void rcu_nocb_unlock(struct rcu_data
*rdp
)
163 if (rcu_rdp_is_offloaded(rdp
)) {
164 lockdep_assert_irqs_disabled();
165 raw_spin_unlock(&rdp
->nocb_lock
);
170 * Release the specified rcu_data structure's ->nocb_lock and restore
171 * interrupts, but only if it corresponds to a no-CBs CPU.
173 static void rcu_nocb_unlock_irqrestore(struct rcu_data
*rdp
,
176 if (rcu_rdp_is_offloaded(rdp
)) {
177 lockdep_assert_irqs_disabled();
178 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
180 local_irq_restore(flags
);
184 /* Lockdep check that ->cblist may be safely accessed. */
185 static void rcu_lockdep_assert_cblist_protected(struct rcu_data
*rdp
)
187 lockdep_assert_irqs_disabled();
188 if (rcu_rdp_is_offloaded(rdp
))
189 lockdep_assert_held(&rdp
->nocb_lock
);
193 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
196 static void rcu_nocb_gp_cleanup(struct swait_queue_head
*sq
)
201 static struct swait_queue_head
*rcu_nocb_gp_get(struct rcu_node
*rnp
)
203 return &rnp
->nocb_gp_wq
[rcu_seq_ctr(rnp
->gp_seq
) & 0x1];
206 static void rcu_init_one_nocb(struct rcu_node
*rnp
)
208 init_swait_queue_head(&rnp
->nocb_gp_wq
[0]);
209 init_swait_queue_head(&rnp
->nocb_gp_wq
[1]);
212 /* Is the specified CPU a no-CBs CPU? */
213 bool rcu_is_nocb_cpu(int cpu
)
215 if (cpumask_available(rcu_nocb_mask
))
216 return cpumask_test_cpu(cpu
, rcu_nocb_mask
);
220 static bool __wake_nocb_gp(struct rcu_data
*rdp_gp
,
221 struct rcu_data
*rdp
,
222 bool force
, unsigned long flags
)
223 __releases(rdp_gp
->nocb_gp_lock
)
225 bool needwake
= false;
227 if (!READ_ONCE(rdp_gp
->nocb_gp_kthread
)) {
228 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
229 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
230 TPS("AlreadyAwake"));
234 if (rdp_gp
->nocb_defer_wakeup
> RCU_NOCB_WAKE_NOT
) {
235 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, RCU_NOCB_WAKE_NOT
);
236 del_timer(&rdp_gp
->nocb_timer
);
239 if (force
|| READ_ONCE(rdp_gp
->nocb_gp_sleep
)) {
240 WRITE_ONCE(rdp_gp
->nocb_gp_sleep
, false);
243 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
245 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("DoWake"));
246 wake_up_process(rdp_gp
->nocb_gp_kthread
);
253 * Kick the GP kthread for this NOCB group.
255 static bool wake_nocb_gp(struct rcu_data
*rdp
, bool force
)
258 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
260 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
261 return __wake_nocb_gp(rdp_gp
, rdp
, force
, flags
);
265 * Arrange to wake the GP kthread for this NOCB group at some future
266 * time when it is safe to do so.
268 static void wake_nocb_gp_defer(struct rcu_data
*rdp
, int waketype
,
272 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
274 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
277 * Bypass wakeup overrides previous deferments. In case
278 * of callback storm, no need to wake up too early.
280 if (waketype
== RCU_NOCB_WAKE_BYPASS
) {
281 mod_timer(&rdp_gp
->nocb_timer
, jiffies
+ 2);
282 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, waketype
);
284 if (rdp_gp
->nocb_defer_wakeup
< RCU_NOCB_WAKE
)
285 mod_timer(&rdp_gp
->nocb_timer
, jiffies
+ 1);
286 if (rdp_gp
->nocb_defer_wakeup
< waketype
)
287 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, waketype
);
290 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
292 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, reason
);
296 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
297 * However, if there is a callback to be enqueued and if ->nocb_bypass
298 * proves to be initially empty, just return false because the no-CB GP
299 * kthread may need to be awakened in this case.
301 * Note that this function always returns true if rhp is NULL.
303 static bool rcu_nocb_do_flush_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
306 struct rcu_cblist rcl
;
308 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp
));
309 rcu_lockdep_assert_cblist_protected(rdp
);
310 lockdep_assert_held(&rdp
->nocb_bypass_lock
);
311 if (rhp
&& !rcu_cblist_n_cbs(&rdp
->nocb_bypass
)) {
312 raw_spin_unlock(&rdp
->nocb_bypass_lock
);
315 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
317 rcu_segcblist_inc_len(&rdp
->cblist
); /* Must precede enqueue. */
318 rcu_cblist_flush_enqueue(&rcl
, &rdp
->nocb_bypass
, rhp
);
319 rcu_segcblist_insert_pend_cbs(&rdp
->cblist
, &rcl
);
320 WRITE_ONCE(rdp
->nocb_bypass_first
, j
);
321 rcu_nocb_bypass_unlock(rdp
);
326 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
327 * However, if there is a callback to be enqueued and if ->nocb_bypass
328 * proves to be initially empty, just return false because the no-CB GP
329 * kthread may need to be awakened in this case.
331 * Note that this function always returns true if rhp is NULL.
333 static bool rcu_nocb_flush_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
336 if (!rcu_rdp_is_offloaded(rdp
))
338 rcu_lockdep_assert_cblist_protected(rdp
);
339 rcu_nocb_bypass_lock(rdp
);
340 return rcu_nocb_do_flush_bypass(rdp
, rhp
, j
);
344 * If the ->nocb_bypass_lock is immediately available, flush the
345 * ->nocb_bypass queue into ->cblist.
347 static void rcu_nocb_try_flush_bypass(struct rcu_data
*rdp
, unsigned long j
)
349 rcu_lockdep_assert_cblist_protected(rdp
);
350 if (!rcu_rdp_is_offloaded(rdp
) ||
351 !rcu_nocb_bypass_trylock(rdp
))
353 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp
, NULL
, j
));
357 * See whether it is appropriate to use the ->nocb_bypass list in order
358 * to control contention on ->nocb_lock. A limited number of direct
359 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
360 * is non-empty, further callbacks must be placed into ->nocb_bypass,
361 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
362 * back to direct use of ->cblist. However, ->nocb_bypass should not be
363 * used if ->cblist is empty, because otherwise callbacks can be stranded
364 * on ->nocb_bypass because we cannot count on the current CPU ever again
365 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
366 * non-empty, the corresponding no-CBs grace-period kthread must not be
367 * in an indefinite sleep state.
369 * Finally, it is not permitted to use the bypass during early boot,
370 * as doing so would confuse the auto-initialization code. Besides
371 * which, there is no point in worrying about lock contention while
372 * there is only one CPU in operation.
374 static bool rcu_nocb_try_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
375 bool *was_alldone
, unsigned long flags
)
378 unsigned long cur_gp_seq
;
379 unsigned long j
= jiffies
;
380 long ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
382 lockdep_assert_irqs_disabled();
384 // Pure softirq/rcuc based processing: no bypassing, no
386 if (!rcu_rdp_is_offloaded(rdp
)) {
387 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
391 // In the process of (de-)offloading: no bypassing, but
393 if (!rcu_segcblist_completely_offloaded(&rdp
->cblist
)) {
395 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
396 return false; /* Not offloaded, no bypassing. */
399 // Don't use ->nocb_bypass during early boot.
400 if (rcu_scheduler_active
!= RCU_SCHEDULER_RUNNING
) {
402 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
403 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
407 // If we have advanced to a new jiffy, reset counts to allow
408 // moving back from ->nocb_bypass to ->cblist.
409 if (j
== rdp
->nocb_nobypass_last
) {
410 c
= rdp
->nocb_nobypass_count
+ 1;
412 WRITE_ONCE(rdp
->nocb_nobypass_last
, j
);
413 c
= rdp
->nocb_nobypass_count
- nocb_nobypass_lim_per_jiffy
;
414 if (ULONG_CMP_LT(rdp
->nocb_nobypass_count
,
415 nocb_nobypass_lim_per_jiffy
))
417 else if (c
> nocb_nobypass_lim_per_jiffy
)
418 c
= nocb_nobypass_lim_per_jiffy
;
420 WRITE_ONCE(rdp
->nocb_nobypass_count
, c
);
422 // If there hasn't yet been all that many ->cblist enqueues
423 // this jiffy, tell the caller to enqueue onto ->cblist. But flush
424 // ->nocb_bypass first.
425 if (rdp
->nocb_nobypass_count
< nocb_nobypass_lim_per_jiffy
) {
427 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
429 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
431 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp
, NULL
, j
));
432 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
433 return false; // Caller must enqueue the callback.
436 // If ->nocb_bypass has been used too long or is too full,
437 // flush ->nocb_bypass to ->cblist.
438 if ((ncbs
&& j
!= READ_ONCE(rdp
->nocb_bypass_first
)) ||
441 if (!rcu_nocb_flush_bypass(rdp
, rhp
, j
)) {
442 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
444 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
446 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
447 return false; // Caller must enqueue the callback.
449 if (j
!= rdp
->nocb_gp_adv_time
&&
450 rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
) &&
451 rcu_seq_done(&rdp
->mynode
->gp_seq
, cur_gp_seq
)) {
452 rcu_advance_cbs_nowake(rdp
->mynode
, rdp
);
453 rdp
->nocb_gp_adv_time
= j
;
455 rcu_nocb_unlock_irqrestore(rdp
, flags
);
456 return true; // Callback already enqueued.
459 // We need to use the bypass.
460 rcu_nocb_wait_contended(rdp
);
461 rcu_nocb_bypass_lock(rdp
);
462 ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
463 rcu_segcblist_inc_len(&rdp
->cblist
); /* Must precede enqueue. */
464 rcu_cblist_enqueue(&rdp
->nocb_bypass
, rhp
);
466 WRITE_ONCE(rdp
->nocb_bypass_first
, j
);
467 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("FirstBQ"));
469 rcu_nocb_bypass_unlock(rdp
);
470 smp_mb(); /* Order enqueue before wake. */
472 local_irq_restore(flags
);
474 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
475 rcu_nocb_lock(rdp
); // Rare during call_rcu() flood.
476 if (!rcu_segcblist_pend_cbs(&rdp
->cblist
)) {
477 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
479 __call_rcu_nocb_wake(rdp
, true, flags
);
481 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
482 TPS("FirstBQnoWake"));
483 rcu_nocb_unlock_irqrestore(rdp
, flags
);
486 return true; // Callback already enqueued.
490 * Awaken the no-CBs grace-period kthread if needed, either due to it
491 * legitimately being asleep or due to overload conditions.
493 * If warranted, also wake up the kthread servicing this CPUs queues.
495 static void __call_rcu_nocb_wake(struct rcu_data
*rdp
, bool was_alldone
,
497 __releases(rdp
->nocb_lock
)
499 unsigned long cur_gp_seq
;
502 struct task_struct
*t
;
504 // If we are being polled or there is no kthread, just leave.
505 t
= READ_ONCE(rdp
->nocb_gp_kthread
);
506 if (rcu_nocb_poll
|| !t
) {
507 rcu_nocb_unlock_irqrestore(rdp
, flags
);
508 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
512 // Need to actually to a wakeup.
513 len
= rcu_segcblist_n_cbs(&rdp
->cblist
);
515 rdp
->qlen_last_fqs_check
= len
;
516 if (!irqs_disabled_flags(flags
)) {
517 /* ... if queue was empty ... */
518 rcu_nocb_unlock_irqrestore(rdp
, flags
);
519 wake_nocb_gp(rdp
, false);
520 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
523 rcu_nocb_unlock_irqrestore(rdp
, flags
);
524 wake_nocb_gp_defer(rdp
, RCU_NOCB_WAKE
,
525 TPS("WakeEmptyIsDeferred"));
527 } else if (len
> rdp
->qlen_last_fqs_check
+ qhimark
) {
528 /* ... or if many callbacks queued. */
529 rdp
->qlen_last_fqs_check
= len
;
531 if (j
!= rdp
->nocb_gp_adv_time
&&
532 rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
) &&
533 rcu_seq_done(&rdp
->mynode
->gp_seq
, cur_gp_seq
)) {
534 rcu_advance_cbs_nowake(rdp
->mynode
, rdp
);
535 rdp
->nocb_gp_adv_time
= j
;
537 smp_mb(); /* Enqueue before timer_pending(). */
538 if ((rdp
->nocb_cb_sleep
||
539 !rcu_segcblist_ready_cbs(&rdp
->cblist
)) &&
540 !timer_pending(&rdp
->nocb_timer
)) {
541 rcu_nocb_unlock_irqrestore(rdp
, flags
);
542 wake_nocb_gp_defer(rdp
, RCU_NOCB_WAKE_FORCE
,
543 TPS("WakeOvfIsDeferred"));
545 rcu_nocb_unlock_irqrestore(rdp
, flags
);
546 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("WakeNot"));
549 rcu_nocb_unlock_irqrestore(rdp
, flags
);
550 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("WakeNot"));
556 * Check if we ignore this rdp.
558 * We check that without holding the nocb lock but
559 * we make sure not to miss a freshly offloaded rdp
560 * with the current ordering:
562 * rdp_offload_toggle() nocb_gp_enabled_cb()
563 * ------------------------- ----------------------------
564 * WRITE flags LOCK nocb_gp_lock
565 * LOCK nocb_gp_lock READ/WRITE nocb_gp_sleep
566 * READ/WRITE nocb_gp_sleep UNLOCK nocb_gp_lock
567 * UNLOCK nocb_gp_lock READ flags
569 static inline bool nocb_gp_enabled_cb(struct rcu_data
*rdp
)
571 u8 flags
= SEGCBLIST_OFFLOADED
| SEGCBLIST_KTHREAD_GP
;
573 return rcu_segcblist_test_flags(&rdp
->cblist
, flags
);
576 static inline bool nocb_gp_update_state_deoffloading(struct rcu_data
*rdp
,
577 bool *needwake_state
)
579 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
581 if (rcu_segcblist_test_flags(cblist
, SEGCBLIST_OFFLOADED
)) {
582 if (!rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_GP
)) {
583 rcu_segcblist_set_flags(cblist
, SEGCBLIST_KTHREAD_GP
);
584 if (rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_CB
))
585 *needwake_state
= true;
591 * De-offloading. Clear our flag and notify the de-offload worker.
592 * We will ignore this rdp until it ever gets re-offloaded.
594 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_GP
));
595 rcu_segcblist_clear_flags(cblist
, SEGCBLIST_KTHREAD_GP
);
596 if (!rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_CB
))
597 *needwake_state
= true;
603 * No-CBs GP kthreads come here to wait for additional callbacks to show up
604 * or for grace periods to end.
606 static void nocb_gp_wait(struct rcu_data
*my_rdp
)
610 int __maybe_unused cpu
= my_rdp
->cpu
;
611 unsigned long cur_gp_seq
;
614 unsigned long j
= jiffies
;
615 bool needwait_gp
= false; // This prevents actual uninitialized use.
618 struct rcu_data
*rdp
;
619 struct rcu_node
*rnp
;
620 unsigned long wait_gp_seq
= 0; // Suppress "use uninitialized" warning.
621 bool wasempty
= false;
624 * Each pass through the following loop checks for CBs and for the
625 * nearest grace period (if any) to wait for next. The CB kthreads
626 * and the global grace-period kthread are awakened if needed.
628 WARN_ON_ONCE(my_rdp
->nocb_gp_rdp
!= my_rdp
);
629 for (rdp
= my_rdp
; rdp
; rdp
= rdp
->nocb_next_cb_rdp
) {
630 bool needwake_state
= false;
632 if (!nocb_gp_enabled_cb(rdp
))
634 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("Check"));
635 rcu_nocb_lock_irqsave(rdp
, flags
);
636 if (nocb_gp_update_state_deoffloading(rdp
, &needwake_state
)) {
637 rcu_nocb_unlock_irqrestore(rdp
, flags
);
639 swake_up_one(&rdp
->nocb_state_wq
);
642 bypass_ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
644 (time_after(j
, READ_ONCE(rdp
->nocb_bypass_first
) + 1) ||
645 bypass_ncbs
> 2 * qhimark
)) {
646 // Bypass full or old, so flush it.
647 (void)rcu_nocb_try_flush_bypass(rdp
, j
);
648 bypass_ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
649 } else if (!bypass_ncbs
&& rcu_segcblist_empty(&rdp
->cblist
)) {
650 rcu_nocb_unlock_irqrestore(rdp
, flags
);
652 swake_up_one(&rdp
->nocb_state_wq
);
653 continue; /* No callbacks here, try next. */
656 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
662 // Advance callbacks if helpful and low contention.
664 if (!rcu_segcblist_restempty(&rdp
->cblist
,
665 RCU_NEXT_READY_TAIL
) ||
666 (rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
) &&
667 rcu_seq_done(&rnp
->gp_seq
, cur_gp_seq
))) {
668 raw_spin_lock_rcu_node(rnp
); /* irqs disabled. */
669 needwake_gp
= rcu_advance_cbs(rnp
, rdp
);
670 wasempty
= rcu_segcblist_restempty(&rdp
->cblist
,
671 RCU_NEXT_READY_TAIL
);
672 raw_spin_unlock_rcu_node(rnp
); /* irqs disabled. */
674 // Need to wait on some grace period?
675 WARN_ON_ONCE(wasempty
&&
676 !rcu_segcblist_restempty(&rdp
->cblist
,
677 RCU_NEXT_READY_TAIL
));
678 if (rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
)) {
680 ULONG_CMP_LT(cur_gp_seq
, wait_gp_seq
))
681 wait_gp_seq
= cur_gp_seq
;
683 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
686 if (rcu_segcblist_ready_cbs(&rdp
->cblist
)) {
687 needwake
= rdp
->nocb_cb_sleep
;
688 WRITE_ONCE(rdp
->nocb_cb_sleep
, false);
689 smp_mb(); /* CB invocation -after- GP end. */
693 rcu_nocb_unlock_irqrestore(rdp
, flags
);
695 swake_up_one(&rdp
->nocb_cb_wq
);
699 rcu_gp_kthread_wake();
701 swake_up_one(&rdp
->nocb_state_wq
);
704 my_rdp
->nocb_gp_bypass
= bypass
;
705 my_rdp
->nocb_gp_gp
= needwait_gp
;
706 my_rdp
->nocb_gp_seq
= needwait_gp
? wait_gp_seq
: 0;
708 if (bypass
&& !rcu_nocb_poll
) {
709 // At least one child with non-empty ->nocb_bypass, so set
710 // timer in order to avoid stranding its callbacks.
711 wake_nocb_gp_defer(my_rdp
, RCU_NOCB_WAKE_BYPASS
,
712 TPS("WakeBypassIsDeferred"));
715 /* Polling, so trace if first poll in the series. */
717 trace_rcu_nocb_wake(rcu_state
.name
, cpu
, TPS("Poll"));
718 schedule_timeout_idle(1);
719 } else if (!needwait_gp
) {
720 /* Wait for callbacks to appear. */
721 trace_rcu_nocb_wake(rcu_state
.name
, cpu
, TPS("Sleep"));
722 swait_event_interruptible_exclusive(my_rdp
->nocb_gp_wq
,
723 !READ_ONCE(my_rdp
->nocb_gp_sleep
));
724 trace_rcu_nocb_wake(rcu_state
.name
, cpu
, TPS("EndSleep"));
726 rnp
= my_rdp
->mynode
;
727 trace_rcu_this_gp(rnp
, my_rdp
, wait_gp_seq
, TPS("StartWait"));
728 swait_event_interruptible_exclusive(
729 rnp
->nocb_gp_wq
[rcu_seq_ctr(wait_gp_seq
) & 0x1],
730 rcu_seq_done(&rnp
->gp_seq
, wait_gp_seq
) ||
731 !READ_ONCE(my_rdp
->nocb_gp_sleep
));
732 trace_rcu_this_gp(rnp
, my_rdp
, wait_gp_seq
, TPS("EndWait"));
734 if (!rcu_nocb_poll
) {
735 raw_spin_lock_irqsave(&my_rdp
->nocb_gp_lock
, flags
);
736 if (my_rdp
->nocb_defer_wakeup
> RCU_NOCB_WAKE_NOT
) {
737 WRITE_ONCE(my_rdp
->nocb_defer_wakeup
, RCU_NOCB_WAKE_NOT
);
738 del_timer(&my_rdp
->nocb_timer
);
740 WRITE_ONCE(my_rdp
->nocb_gp_sleep
, true);
741 raw_spin_unlock_irqrestore(&my_rdp
->nocb_gp_lock
, flags
);
743 my_rdp
->nocb_gp_seq
= -1;
744 WARN_ON(signal_pending(current
));
748 * No-CBs grace-period-wait kthread. There is one of these per group
749 * of CPUs, but only once at least one CPU in that group has come online
750 * at least once since boot. This kthread checks for newly posted
751 * callbacks from any of the CPUs it is responsible for, waits for a
752 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
753 * that then have callback-invocation work to do.
755 static int rcu_nocb_gp_kthread(void *arg
)
757 struct rcu_data
*rdp
= arg
;
760 WRITE_ONCE(rdp
->nocb_gp_loops
, rdp
->nocb_gp_loops
+ 1);
762 cond_resched_tasks_rcu_qs();
767 static inline bool nocb_cb_can_run(struct rcu_data
*rdp
)
769 u8 flags
= SEGCBLIST_OFFLOADED
| SEGCBLIST_KTHREAD_CB
;
770 return rcu_segcblist_test_flags(&rdp
->cblist
, flags
);
773 static inline bool nocb_cb_wait_cond(struct rcu_data
*rdp
)
775 return nocb_cb_can_run(rdp
) && !READ_ONCE(rdp
->nocb_cb_sleep
);
779 * Invoke any ready callbacks from the corresponding no-CBs CPU,
780 * then, if there are no more, wait for more to appear.
782 static void nocb_cb_wait(struct rcu_data
*rdp
)
784 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
785 unsigned long cur_gp_seq
;
787 bool needwake_state
= false;
788 bool needwake_gp
= false;
789 bool can_sleep
= true;
790 struct rcu_node
*rnp
= rdp
->mynode
;
792 local_irq_save(flags
);
793 rcu_momentary_dyntick_idle();
794 local_irq_restore(flags
);
796 * Disable BH to provide the expected environment. Also, when
797 * transitioning to/from NOCB mode, a self-requeuing callback might
798 * be invoked from softirq. A short grace period could cause both
799 * instances of this callback would execute concurrently.
804 lockdep_assert_irqs_enabled();
805 rcu_nocb_lock_irqsave(rdp
, flags
);
806 if (rcu_segcblist_nextgp(cblist
, &cur_gp_seq
) &&
807 rcu_seq_done(&rnp
->gp_seq
, cur_gp_seq
) &&
808 raw_spin_trylock_rcu_node(rnp
)) { /* irqs already disabled. */
809 needwake_gp
= rcu_advance_cbs(rdp
->mynode
, rdp
);
810 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled. */
813 if (rcu_segcblist_test_flags(cblist
, SEGCBLIST_OFFLOADED
)) {
814 if (!rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_CB
)) {
815 rcu_segcblist_set_flags(cblist
, SEGCBLIST_KTHREAD_CB
);
816 if (rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_GP
))
817 needwake_state
= true;
819 if (rcu_segcblist_ready_cbs(cblist
))
823 * De-offloading. Clear our flag and notify the de-offload worker.
824 * We won't touch the callbacks and keep sleeping until we ever
827 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_CB
));
828 rcu_segcblist_clear_flags(cblist
, SEGCBLIST_KTHREAD_CB
);
829 if (!rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_GP
))
830 needwake_state
= true;
833 WRITE_ONCE(rdp
->nocb_cb_sleep
, can_sleep
);
835 if (rdp
->nocb_cb_sleep
)
836 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("CBSleep"));
838 rcu_nocb_unlock_irqrestore(rdp
, flags
);
840 rcu_gp_kthread_wake();
843 swake_up_one(&rdp
->nocb_state_wq
);
846 swait_event_interruptible_exclusive(rdp
->nocb_cb_wq
,
847 nocb_cb_wait_cond(rdp
));
849 // VVV Ensure CB invocation follows _sleep test.
850 if (smp_load_acquire(&rdp
->nocb_cb_sleep
)) { // ^^^
851 WARN_ON(signal_pending(current
));
852 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("WokeEmpty"));
854 } while (!nocb_cb_can_run(rdp
));
858 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
859 * nocb_cb_wait() to do the dirty work.
861 static int rcu_nocb_cb_kthread(void *arg
)
863 struct rcu_data
*rdp
= arg
;
865 // Each pass through this loop does one callback batch, and,
866 // if there are no more ready callbacks, waits for them.
869 cond_resched_tasks_rcu_qs();
874 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
875 static int rcu_nocb_need_deferred_wakeup(struct rcu_data
*rdp
, int level
)
877 return READ_ONCE(rdp
->nocb_defer_wakeup
) >= level
;
880 /* Do a deferred wakeup of rcu_nocb_kthread(). */
881 static bool do_nocb_deferred_wakeup_common(struct rcu_data
*rdp_gp
,
882 struct rcu_data
*rdp
, int level
,
884 __releases(rdp_gp
->nocb_gp_lock
)
889 if (!rcu_nocb_need_deferred_wakeup(rdp_gp
, level
)) {
890 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
894 ndw
= rdp_gp
->nocb_defer_wakeup
;
895 ret
= __wake_nocb_gp(rdp_gp
, rdp
, ndw
== RCU_NOCB_WAKE_FORCE
, flags
);
896 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("DeferredWake"));
901 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
902 static void do_nocb_deferred_wakeup_timer(struct timer_list
*t
)
905 struct rcu_data
*rdp
= from_timer(rdp
, t
, nocb_timer
);
907 WARN_ON_ONCE(rdp
->nocb_gp_rdp
!= rdp
);
908 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("Timer"));
910 raw_spin_lock_irqsave(&rdp
->nocb_gp_lock
, flags
);
911 smp_mb__after_spinlock(); /* Timer expire before wakeup. */
912 do_nocb_deferred_wakeup_common(rdp
, rdp
, RCU_NOCB_WAKE_BYPASS
, flags
);
916 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
917 * This means we do an inexact common-case check. Note that if
918 * we miss, ->nocb_timer will eventually clean things up.
920 static bool do_nocb_deferred_wakeup(struct rcu_data
*rdp
)
923 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
925 if (!rdp_gp
|| !rcu_nocb_need_deferred_wakeup(rdp_gp
, RCU_NOCB_WAKE
))
928 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
929 return do_nocb_deferred_wakeup_common(rdp_gp
, rdp
, RCU_NOCB_WAKE
, flags
);
932 void rcu_nocb_flush_deferred_wakeup(void)
934 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data
));
936 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup
);
938 static int rdp_offload_toggle(struct rcu_data
*rdp
,
939 bool offload
, unsigned long flags
)
940 __releases(rdp
->nocb_lock
)
942 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
943 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
944 bool wake_gp
= false;
946 rcu_segcblist_offload(cblist
, offload
);
948 if (rdp
->nocb_cb_sleep
)
949 rdp
->nocb_cb_sleep
= false;
950 rcu_nocb_unlock_irqrestore(rdp
, flags
);
953 * Ignore former value of nocb_cb_sleep and force wake up as it could
954 * have been spuriously set to false already.
956 swake_up_one(&rdp
->nocb_cb_wq
);
958 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
959 if (rdp_gp
->nocb_gp_sleep
) {
960 rdp_gp
->nocb_gp_sleep
= false;
963 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
966 wake_up_process(rdp_gp
->nocb_gp_kthread
);
971 static long rcu_nocb_rdp_deoffload(void *arg
)
973 struct rcu_data
*rdp
= arg
;
974 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
978 WARN_ON_ONCE(rdp
->cpu
!= raw_smp_processor_id());
980 pr_info("De-offloading %d\n", rdp
->cpu
);
982 rcu_nocb_lock_irqsave(rdp
, flags
);
984 * Flush once and for all now. This suffices because we are
985 * running on the target CPU holding ->nocb_lock (thus having
986 * interrupts disabled), and because rdp_offload_toggle()
987 * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
988 * Thus future calls to rcu_segcblist_completely_offloaded() will
989 * return false, which means that future calls to rcu_nocb_try_bypass()
990 * will refuse to put anything into the bypass.
992 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp
, NULL
, jiffies
));
993 ret
= rdp_offload_toggle(rdp
, false, flags
);
994 swait_event_exclusive(rdp
->nocb_state_wq
,
995 !rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_CB
|
996 SEGCBLIST_KTHREAD_GP
));
998 * Lock one last time to acquire latest callback updates from kthreads
999 * so we can later handle callbacks locally without locking.
1001 rcu_nocb_lock_irqsave(rdp
, flags
);
1003 * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
1004 * lock is released but how about being paranoid for once?
1006 rcu_segcblist_set_flags(cblist
, SEGCBLIST_SOFTIRQ_ONLY
);
1008 * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
1009 * rcu_nocb_unlock_irqrestore() anymore.
1011 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
1014 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
1020 int rcu_nocb_cpu_deoffload(int cpu
)
1022 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1025 mutex_lock(&rcu_state
.barrier_mutex
);
1027 if (rcu_rdp_is_offloaded(rdp
)) {
1028 if (cpu_online(cpu
)) {
1029 ret
= work_on_cpu(cpu
, rcu_nocb_rdp_deoffload
, rdp
);
1031 cpumask_clear_cpu(cpu
, rcu_nocb_mask
);
1033 pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
1038 mutex_unlock(&rcu_state
.barrier_mutex
);
1042 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload
);
1044 static long rcu_nocb_rdp_offload(void *arg
)
1046 struct rcu_data
*rdp
= arg
;
1047 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
1048 unsigned long flags
;
1051 WARN_ON_ONCE(rdp
->cpu
!= raw_smp_processor_id());
1053 * For now we only support re-offload, ie: the rdp must have been
1054 * offloaded on boot first.
1056 if (!rdp
->nocb_gp_rdp
)
1059 pr_info("Offloading %d\n", rdp
->cpu
);
1061 * Can't use rcu_nocb_lock_irqsave() while we are in
1062 * SEGCBLIST_SOFTIRQ_ONLY mode.
1064 raw_spin_lock_irqsave(&rdp
->nocb_lock
, flags
);
1067 * We didn't take the nocb lock while working on the
1068 * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
1069 * Every modifications that have been done previously on
1070 * rdp->cblist must be visible remotely by the nocb kthreads
1071 * upon wake up after reading the cblist flags.
1073 * The layout against nocb_lock enforces that ordering:
1075 * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
1076 * ------------------------- ----------------------------
1077 * WRITE callbacks rcu_nocb_lock()
1078 * rcu_nocb_lock() READ flags
1079 * WRITE flags READ callbacks
1080 * rcu_nocb_unlock() rcu_nocb_unlock()
1082 ret
= rdp_offload_toggle(rdp
, true, flags
);
1083 swait_event_exclusive(rdp
->nocb_state_wq
,
1084 rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_CB
) &&
1085 rcu_segcblist_test_flags(cblist
, SEGCBLIST_KTHREAD_GP
));
1090 int rcu_nocb_cpu_offload(int cpu
)
1092 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1095 mutex_lock(&rcu_state
.barrier_mutex
);
1097 if (!rcu_rdp_is_offloaded(rdp
)) {
1098 if (cpu_online(cpu
)) {
1099 ret
= work_on_cpu(cpu
, rcu_nocb_rdp_offload
, rdp
);
1101 cpumask_set_cpu(cpu
, rcu_nocb_mask
);
1103 pr_info("NOCB: Can't CB-offload an offline CPU\n");
1108 mutex_unlock(&rcu_state
.barrier_mutex
);
1112 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload
);
1114 void __init
rcu_init_nohz(void)
1117 bool need_rcu_nocb_mask
= false;
1118 struct rcu_data
*rdp
;
1120 #if defined(CONFIG_NO_HZ_FULL)
1121 if (tick_nohz_full_running
&& cpumask_weight(tick_nohz_full_mask
))
1122 need_rcu_nocb_mask
= true;
1123 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1125 if (!cpumask_available(rcu_nocb_mask
) && need_rcu_nocb_mask
) {
1126 if (!zalloc_cpumask_var(&rcu_nocb_mask
, GFP_KERNEL
)) {
1127 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1131 if (!cpumask_available(rcu_nocb_mask
))
1134 #if defined(CONFIG_NO_HZ_FULL)
1135 if (tick_nohz_full_running
)
1136 cpumask_or(rcu_nocb_mask
, rcu_nocb_mask
, tick_nohz_full_mask
);
1137 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1139 if (!cpumask_subset(rcu_nocb_mask
, cpu_possible_mask
)) {
1140 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1141 cpumask_and(rcu_nocb_mask
, cpu_possible_mask
,
1144 if (cpumask_empty(rcu_nocb_mask
))
1145 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1147 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1148 cpumask_pr_args(rcu_nocb_mask
));
1150 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1152 for_each_cpu(cpu
, rcu_nocb_mask
) {
1153 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1154 if (rcu_segcblist_empty(&rdp
->cblist
))
1155 rcu_segcblist_init(&rdp
->cblist
);
1156 rcu_segcblist_offload(&rdp
->cblist
, true);
1157 rcu_segcblist_set_flags(&rdp
->cblist
, SEGCBLIST_KTHREAD_CB
);
1158 rcu_segcblist_set_flags(&rdp
->cblist
, SEGCBLIST_KTHREAD_GP
);
1160 rcu_organize_nocb_kthreads();
1163 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1164 static void __init
rcu_boot_init_nocb_percpu_data(struct rcu_data
*rdp
)
1166 init_swait_queue_head(&rdp
->nocb_cb_wq
);
1167 init_swait_queue_head(&rdp
->nocb_gp_wq
);
1168 init_swait_queue_head(&rdp
->nocb_state_wq
);
1169 raw_spin_lock_init(&rdp
->nocb_lock
);
1170 raw_spin_lock_init(&rdp
->nocb_bypass_lock
);
1171 raw_spin_lock_init(&rdp
->nocb_gp_lock
);
1172 timer_setup(&rdp
->nocb_timer
, do_nocb_deferred_wakeup_timer
, 0);
1173 rcu_cblist_init(&rdp
->nocb_bypass
);
1177 * If the specified CPU is a no-CBs CPU that does not already have its
1178 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
1179 * for this CPU's group has not yet been created, spawn it as well.
1181 static void rcu_spawn_one_nocb_kthread(int cpu
)
1183 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1184 struct rcu_data
*rdp_gp
;
1185 struct task_struct
*t
;
1188 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
1189 * then nothing to do.
1191 if (!rcu_is_nocb_cpu(cpu
) || rdp
->nocb_cb_kthread
)
1194 /* If we didn't spawn the GP kthread first, reorganize! */
1195 rdp_gp
= rdp
->nocb_gp_rdp
;
1196 if (!rdp_gp
->nocb_gp_kthread
) {
1197 t
= kthread_run(rcu_nocb_gp_kthread
, rdp_gp
,
1198 "rcuog/%d", rdp_gp
->cpu
);
1199 if (WARN_ONCE(IS_ERR(t
), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__
))
1201 WRITE_ONCE(rdp_gp
->nocb_gp_kthread
, t
);
1204 /* Spawn the kthread for this CPU. */
1205 t
= kthread_run(rcu_nocb_cb_kthread
, rdp
,
1206 "rcuo%c/%d", rcu_state
.abbr
, cpu
);
1207 if (WARN_ONCE(IS_ERR(t
), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__
))
1209 WRITE_ONCE(rdp
->nocb_cb_kthread
, t
);
1210 WRITE_ONCE(rdp
->nocb_gp_kthread
, rdp_gp
->nocb_gp_kthread
);
1214 * If the specified CPU is a no-CBs CPU that does not already have its
1215 * rcuo kthread, spawn it.
1217 static void rcu_spawn_cpu_nocb_kthread(int cpu
)
1219 if (rcu_scheduler_fully_active
)
1220 rcu_spawn_one_nocb_kthread(cpu
);
1224 * Once the scheduler is running, spawn rcuo kthreads for all online
1225 * no-CBs CPUs. This assumes that the early_initcall()s happen before
1226 * non-boot CPUs come online -- if this changes, we will need to add
1227 * some mutual exclusion.
1229 static void __init
rcu_spawn_nocb_kthreads(void)
1233 for_each_online_cpu(cpu
)
1234 rcu_spawn_cpu_nocb_kthread(cpu
);
1237 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1238 static int rcu_nocb_gp_stride
= -1;
1239 module_param(rcu_nocb_gp_stride
, int, 0444);
1242 * Initialize GP-CB relationships for all no-CBs CPU.
1244 static void __init
rcu_organize_nocb_kthreads(void)
1247 bool firsttime
= true;
1248 bool gotnocbs
= false;
1249 bool gotnocbscbs
= true;
1250 int ls
= rcu_nocb_gp_stride
;
1251 int nl
= 0; /* Next GP kthread. */
1252 struct rcu_data
*rdp
;
1253 struct rcu_data
*rdp_gp
= NULL
; /* Suppress misguided gcc warn. */
1254 struct rcu_data
*rdp_prev
= NULL
;
1256 if (!cpumask_available(rcu_nocb_mask
))
1259 ls
= nr_cpu_ids
/ int_sqrt(nr_cpu_ids
);
1260 rcu_nocb_gp_stride
= ls
;
1264 * Each pass through this loop sets up one rcu_data structure.
1265 * Should the corresponding CPU come online in the future, then
1266 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1268 for_each_cpu(cpu
, rcu_nocb_mask
) {
1269 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1270 if (rdp
->cpu
>= nl
) {
1271 /* New GP kthread, set up for CBs & next GP. */
1273 nl
= DIV_ROUND_UP(rdp
->cpu
+ 1, ls
) * ls
;
1274 rdp
->nocb_gp_rdp
= rdp
;
1278 pr_cont("%s\n", gotnocbscbs
1279 ? "" : " (self only)");
1280 gotnocbscbs
= false;
1282 pr_alert("%s: No-CB GP kthread CPU %d:",
1286 /* Another CB kthread, link to previous GP kthread. */
1288 rdp
->nocb_gp_rdp
= rdp_gp
;
1289 rdp_prev
->nocb_next_cb_rdp
= rdp
;
1291 pr_cont(" %d", cpu
);
1295 if (gotnocbs
&& dump_tree
)
1296 pr_cont("%s\n", gotnocbscbs
? "" : " (self only)");
1300 * Bind the current task to the offloaded CPUs. If there are no offloaded
1301 * CPUs, leave the task unbound. Splat if the bind attempt fails.
1303 void rcu_bind_current_to_nocb(void)
1305 if (cpumask_available(rcu_nocb_mask
) && cpumask_weight(rcu_nocb_mask
))
1306 WARN_ON(sched_setaffinity(current
->pid
, rcu_nocb_mask
));
1308 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb
);
1310 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1312 static char *show_rcu_should_be_on_cpu(struct task_struct
*tsp
)
1314 return tsp
&& task_is_running(tsp
) && !tsp
->on_cpu
? "!" : "";
1316 #else // #ifdef CONFIG_SMP
1317 static char *show_rcu_should_be_on_cpu(struct task_struct
*tsp
)
1321 #endif // #else #ifdef CONFIG_SMP
1324 * Dump out nocb grace-period kthread state for the specified rcu_data
1327 static void show_rcu_nocb_gp_state(struct rcu_data
*rdp
)
1329 struct rcu_node
*rnp
= rdp
->mynode
;
1331 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1333 "kK"[!!rdp
->nocb_gp_kthread
],
1334 "lL"[raw_spin_is_locked(&rdp
->nocb_gp_lock
)],
1335 "dD"[!!rdp
->nocb_defer_wakeup
],
1336 "tT"[timer_pending(&rdp
->nocb_timer
)],
1337 "sS"[!!rdp
->nocb_gp_sleep
],
1338 ".W"[swait_active(&rdp
->nocb_gp_wq
)],
1339 ".W"[swait_active(&rnp
->nocb_gp_wq
[0])],
1340 ".W"[swait_active(&rnp
->nocb_gp_wq
[1])],
1341 ".B"[!!rdp
->nocb_gp_bypass
],
1342 ".G"[!!rdp
->nocb_gp_gp
],
1343 (long)rdp
->nocb_gp_seq
,
1344 rnp
->grplo
, rnp
->grphi
, READ_ONCE(rdp
->nocb_gp_loops
),
1345 rdp
->nocb_gp_kthread
? task_state_to_char(rdp
->nocb_gp_kthread
) : '.',
1346 rdp
->nocb_cb_kthread
? (int)task_cpu(rdp
->nocb_gp_kthread
) : -1,
1347 show_rcu_should_be_on_cpu(rdp
->nocb_cb_kthread
));
1350 /* Dump out nocb kthread state for the specified rcu_data structure. */
1351 static void show_rcu_nocb_state(struct rcu_data
*rdp
)
1355 struct rcu_segcblist
*rsclp
= &rdp
->cblist
;
1359 if (rdp
->nocb_gp_rdp
== rdp
)
1360 show_rcu_nocb_gp_state(rdp
);
1362 sprintf(bufw
, "%ld", rsclp
->gp_seq
[RCU_WAIT_TAIL
]);
1363 sprintf(bufr
, "%ld", rsclp
->gp_seq
[RCU_NEXT_READY_TAIL
]);
1364 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1365 rdp
->cpu
, rdp
->nocb_gp_rdp
->cpu
,
1366 rdp
->nocb_next_cb_rdp
? rdp
->nocb_next_cb_rdp
->cpu
: -1,
1367 "kK"[!!rdp
->nocb_cb_kthread
],
1368 "bB"[raw_spin_is_locked(&rdp
->nocb_bypass_lock
)],
1369 "cC"[!!atomic_read(&rdp
->nocb_lock_contended
)],
1370 "lL"[raw_spin_is_locked(&rdp
->nocb_lock
)],
1371 "sS"[!!rdp
->nocb_cb_sleep
],
1372 ".W"[swait_active(&rdp
->nocb_cb_wq
)],
1373 jiffies
- rdp
->nocb_bypass_first
,
1374 jiffies
- rdp
->nocb_nobypass_last
,
1375 rdp
->nocb_nobypass_count
,
1376 ".D"[rcu_segcblist_ready_cbs(rsclp
)],
1377 ".W"[!rcu_segcblist_segempty(rsclp
, RCU_WAIT_TAIL
)],
1378 rcu_segcblist_segempty(rsclp
, RCU_WAIT_TAIL
) ? "" : bufw
,
1379 ".R"[!rcu_segcblist_segempty(rsclp
, RCU_NEXT_READY_TAIL
)],
1380 rcu_segcblist_segempty(rsclp
, RCU_NEXT_READY_TAIL
) ? "" : bufr
,
1381 ".N"[!rcu_segcblist_segempty(rsclp
, RCU_NEXT_TAIL
)],
1382 ".B"[!!rcu_cblist_n_cbs(&rdp
->nocb_bypass
)],
1383 rcu_segcblist_n_cbs(&rdp
->cblist
),
1384 rdp
->nocb_cb_kthread
? task_state_to_char(rdp
->nocb_cb_kthread
) : '.',
1385 rdp
->nocb_cb_kthread
? (int)task_cpu(rdp
->nocb_gp_kthread
) : -1,
1386 show_rcu_should_be_on_cpu(rdp
->nocb_cb_kthread
));
1388 /* It is OK for GP kthreads to have GP state. */
1389 if (rdp
->nocb_gp_rdp
== rdp
)
1392 waslocked
= raw_spin_is_locked(&rdp
->nocb_gp_lock
);
1393 wassleep
= swait_active(&rdp
->nocb_gp_wq
);
1394 if (!rdp
->nocb_gp_sleep
&& !waslocked
&& !wassleep
)
1395 return; /* Nothing untoward. */
1397 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1399 "dD"[!!rdp
->nocb_defer_wakeup
],
1400 "sS"[!!rdp
->nocb_gp_sleep
],
1404 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1406 static inline int rcu_lockdep_is_held_nocb(struct rcu_data
*rdp
)
1411 static inline bool rcu_current_is_nocb_kthread(struct rcu_data
*rdp
)
1416 /* No ->nocb_lock to acquire. */
1417 static void rcu_nocb_lock(struct rcu_data
*rdp
)
1421 /* No ->nocb_lock to release. */
1422 static void rcu_nocb_unlock(struct rcu_data
*rdp
)
1426 /* No ->nocb_lock to release. */
1427 static void rcu_nocb_unlock_irqrestore(struct rcu_data
*rdp
,
1428 unsigned long flags
)
1430 local_irq_restore(flags
);
1433 /* Lockdep check that ->cblist may be safely accessed. */
1434 static void rcu_lockdep_assert_cblist_protected(struct rcu_data
*rdp
)
1436 lockdep_assert_irqs_disabled();
1439 static void rcu_nocb_gp_cleanup(struct swait_queue_head
*sq
)
1443 static struct swait_queue_head
*rcu_nocb_gp_get(struct rcu_node
*rnp
)
1448 static void rcu_init_one_nocb(struct rcu_node
*rnp
)
1452 static bool rcu_nocb_flush_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
1458 static bool rcu_nocb_try_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
1459 bool *was_alldone
, unsigned long flags
)
1464 static void __call_rcu_nocb_wake(struct rcu_data
*rdp
, bool was_empty
,
1465 unsigned long flags
)
1467 WARN_ON_ONCE(1); /* Should be dead code! */
1470 static void __init
rcu_boot_init_nocb_percpu_data(struct rcu_data
*rdp
)
1474 static int rcu_nocb_need_deferred_wakeup(struct rcu_data
*rdp
, int level
)
1479 static bool do_nocb_deferred_wakeup(struct rcu_data
*rdp
)
1484 static void rcu_spawn_cpu_nocb_kthread(int cpu
)
1488 static void __init
rcu_spawn_nocb_kthreads(void)
1492 static void show_rcu_nocb_state(struct rcu_data
*rdp
)
1496 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */