]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/rcu/tree_nocb.h
Merge tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[mirror_ubuntu-jammy-kernel.git] / kernel / rcu / tree_nocb.h
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
6 *
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
9 * Copyright SUSE, 2021
10 *
11 * Author: Ingo Molnar <mingo@elte.hu>
12 * Paul E. McKenney <paulmck@linux.ibm.com>
13 * Frederic Weisbecker <frederic@kernel.org>
14 */
15
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
19 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
20 {
21 return lockdep_is_held(&rdp->nocb_lock);
22 }
23
24 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
25 {
26 /* Race on early boot between thread creation and assignment */
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
28 return true;
29
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
31 if (in_task())
32 return true;
33 return false;
34 }
35
36 /*
37 * Offload callback processing from the boot-time-specified set of CPUs
38 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
39 * created that pull the callbacks from the corresponding CPU, wait for
40 * a grace period to elapse, and invoke the callbacks. These kthreads
41 * are organized into GP kthreads, which manage incoming callbacks, wait for
42 * grace periods, and awaken CB kthreads, and the CB kthreads, which only
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
44 * do a wake_up() on their GP kthread when they insert a callback into any
45 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
46 * in which case each kthread actively polls its CPU. (Which isn't so great
47 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
48 *
49 * This is intended to be used in conjunction with Frederic Weisbecker's
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
52 *
53 * Offloading of callbacks can also be used as an energy-efficiency
54 * measure because CPUs with no RCU callbacks queued are more aggressive
55 * about entering dyntick-idle mode.
56 */
57
58
59 /*
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
61 * If the list is invalid, a warning is emitted and all CPUs are offloaded.
62 */
63 static int __init rcu_nocb_setup(char *str)
64 {
65 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
66 if (cpulist_parse(str, rcu_nocb_mask)) {
67 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
68 cpumask_setall(rcu_nocb_mask);
69 }
70 return 1;
71 }
72 __setup("rcu_nocbs=", rcu_nocb_setup);
73
74 static int __init parse_rcu_nocb_poll(char *arg)
75 {
76 rcu_nocb_poll = true;
77 return 0;
78 }
79 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
80
81 /*
82 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
83 * After all, the main point of bypassing is to avoid lock contention
84 * on ->nocb_lock, which only can happen at high call_rcu() rates.
85 */
86 static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
87 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
88
89 /*
90 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
91 * lock isn't immediately available, increment ->nocb_lock_contended to
92 * flag the contention.
93 */
94 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
95 __acquires(&rdp->nocb_bypass_lock)
96 {
97 lockdep_assert_irqs_disabled();
98 if (raw_spin_trylock(&rdp->nocb_bypass_lock))
99 return;
100 atomic_inc(&rdp->nocb_lock_contended);
101 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
102 smp_mb__after_atomic(); /* atomic_inc() before lock. */
103 raw_spin_lock(&rdp->nocb_bypass_lock);
104 smp_mb__before_atomic(); /* atomic_dec() after lock. */
105 atomic_dec(&rdp->nocb_lock_contended);
106 }
107
108 /*
109 * Spinwait until the specified rcu_data structure's ->nocb_lock is
110 * not contended. Please note that this is extremely special-purpose,
111 * relying on the fact that at most two kthreads and one CPU contend for
112 * this lock, and also that the two kthreads are guaranteed to have frequent
113 * grace-period-duration time intervals between successive acquisitions
114 * of the lock. This allows us to use an extremely simple throttling
115 * mechanism, and further to apply it only to the CPU doing floods of
116 * call_rcu() invocations. Don't try this at home!
117 */
118 static void rcu_nocb_wait_contended(struct rcu_data *rdp)
119 {
120 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
121 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
122 cpu_relax();
123 }
124
125 /*
126 * Conditionally acquire the specified rcu_data structure's
127 * ->nocb_bypass_lock.
128 */
129 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
130 {
131 lockdep_assert_irqs_disabled();
132 return raw_spin_trylock(&rdp->nocb_bypass_lock);
133 }
134
135 /*
136 * Release the specified rcu_data structure's ->nocb_bypass_lock.
137 */
138 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
139 __releases(&rdp->nocb_bypass_lock)
140 {
141 lockdep_assert_irqs_disabled();
142 raw_spin_unlock(&rdp->nocb_bypass_lock);
143 }
144
145 /*
146 * Acquire the specified rcu_data structure's ->nocb_lock, but only
147 * if it corresponds to a no-CBs CPU.
148 */
149 static void rcu_nocb_lock(struct rcu_data *rdp)
150 {
151 lockdep_assert_irqs_disabled();
152 if (!rcu_rdp_is_offloaded(rdp))
153 return;
154 raw_spin_lock(&rdp->nocb_lock);
155 }
156
157 /*
158 * Release the specified rcu_data structure's ->nocb_lock, but only
159 * if it corresponds to a no-CBs CPU.
160 */
161 static void rcu_nocb_unlock(struct rcu_data *rdp)
162 {
163 if (rcu_rdp_is_offloaded(rdp)) {
164 lockdep_assert_irqs_disabled();
165 raw_spin_unlock(&rdp->nocb_lock);
166 }
167 }
168
169 /*
170 * Release the specified rcu_data structure's ->nocb_lock and restore
171 * interrupts, but only if it corresponds to a no-CBs CPU.
172 */
173 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
174 unsigned long flags)
175 {
176 if (rcu_rdp_is_offloaded(rdp)) {
177 lockdep_assert_irqs_disabled();
178 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
179 } else {
180 local_irq_restore(flags);
181 }
182 }
183
184 /* Lockdep check that ->cblist may be safely accessed. */
185 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
186 {
187 lockdep_assert_irqs_disabled();
188 if (rcu_rdp_is_offloaded(rdp))
189 lockdep_assert_held(&rdp->nocb_lock);
190 }
191
192 /*
193 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
194 * grace period.
195 */
196 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
197 {
198 swake_up_all(sq);
199 }
200
201 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
202 {
203 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
204 }
205
206 static void rcu_init_one_nocb(struct rcu_node *rnp)
207 {
208 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
209 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
210 }
211
212 /* Is the specified CPU a no-CBs CPU? */
213 bool rcu_is_nocb_cpu(int cpu)
214 {
215 if (cpumask_available(rcu_nocb_mask))
216 return cpumask_test_cpu(cpu, rcu_nocb_mask);
217 return false;
218 }
219
220 static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
221 struct rcu_data *rdp,
222 bool force, unsigned long flags)
223 __releases(rdp_gp->nocb_gp_lock)
224 {
225 bool needwake = false;
226
227 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
228 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
229 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
230 TPS("AlreadyAwake"));
231 return false;
232 }
233
234 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
235 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
236 del_timer(&rdp_gp->nocb_timer);
237 }
238
239 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
240 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
241 needwake = true;
242 }
243 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
244 if (needwake) {
245 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
246 wake_up_process(rdp_gp->nocb_gp_kthread);
247 }
248
249 return needwake;
250 }
251
252 /*
253 * Kick the GP kthread for this NOCB group.
254 */
255 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
256 {
257 unsigned long flags;
258 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
259
260 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
261 return __wake_nocb_gp(rdp_gp, rdp, force, flags);
262 }
263
264 /*
265 * Arrange to wake the GP kthread for this NOCB group at some future
266 * time when it is safe to do so.
267 */
268 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
269 const char *reason)
270 {
271 unsigned long flags;
272 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
273
274 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
275
276 /*
277 * Bypass wakeup overrides previous deferments. In case
278 * of callback storm, no need to wake up too early.
279 */
280 if (waketype == RCU_NOCB_WAKE_BYPASS) {
281 mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
282 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
283 } else {
284 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
285 mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
286 if (rdp_gp->nocb_defer_wakeup < waketype)
287 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
288 }
289
290 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
291
292 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
293 }
294
295 /*
296 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
297 * However, if there is a callback to be enqueued and if ->nocb_bypass
298 * proves to be initially empty, just return false because the no-CB GP
299 * kthread may need to be awakened in this case.
300 *
301 * Note that this function always returns true if rhp is NULL.
302 */
303 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
304 unsigned long j)
305 {
306 struct rcu_cblist rcl;
307
308 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
309 rcu_lockdep_assert_cblist_protected(rdp);
310 lockdep_assert_held(&rdp->nocb_bypass_lock);
311 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
312 raw_spin_unlock(&rdp->nocb_bypass_lock);
313 return false;
314 }
315 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
316 if (rhp)
317 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
318 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
319 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
320 WRITE_ONCE(rdp->nocb_bypass_first, j);
321 rcu_nocb_bypass_unlock(rdp);
322 return true;
323 }
324
325 /*
326 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
327 * However, if there is a callback to be enqueued and if ->nocb_bypass
328 * proves to be initially empty, just return false because the no-CB GP
329 * kthread may need to be awakened in this case.
330 *
331 * Note that this function always returns true if rhp is NULL.
332 */
333 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
334 unsigned long j)
335 {
336 if (!rcu_rdp_is_offloaded(rdp))
337 return true;
338 rcu_lockdep_assert_cblist_protected(rdp);
339 rcu_nocb_bypass_lock(rdp);
340 return rcu_nocb_do_flush_bypass(rdp, rhp, j);
341 }
342
343 /*
344 * If the ->nocb_bypass_lock is immediately available, flush the
345 * ->nocb_bypass queue into ->cblist.
346 */
347 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
348 {
349 rcu_lockdep_assert_cblist_protected(rdp);
350 if (!rcu_rdp_is_offloaded(rdp) ||
351 !rcu_nocb_bypass_trylock(rdp))
352 return;
353 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
354 }
355
356 /*
357 * See whether it is appropriate to use the ->nocb_bypass list in order
358 * to control contention on ->nocb_lock. A limited number of direct
359 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
360 * is non-empty, further callbacks must be placed into ->nocb_bypass,
361 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
362 * back to direct use of ->cblist. However, ->nocb_bypass should not be
363 * used if ->cblist is empty, because otherwise callbacks can be stranded
364 * on ->nocb_bypass because we cannot count on the current CPU ever again
365 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
366 * non-empty, the corresponding no-CBs grace-period kthread must not be
367 * in an indefinite sleep state.
368 *
369 * Finally, it is not permitted to use the bypass during early boot,
370 * as doing so would confuse the auto-initialization code. Besides
371 * which, there is no point in worrying about lock contention while
372 * there is only one CPU in operation.
373 */
374 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
375 bool *was_alldone, unsigned long flags)
376 {
377 unsigned long c;
378 unsigned long cur_gp_seq;
379 unsigned long j = jiffies;
380 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
381
382 lockdep_assert_irqs_disabled();
383
384 // Pure softirq/rcuc based processing: no bypassing, no
385 // locking.
386 if (!rcu_rdp_is_offloaded(rdp)) {
387 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
388 return false;
389 }
390
391 // In the process of (de-)offloading: no bypassing, but
392 // locking.
393 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
394 rcu_nocb_lock(rdp);
395 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
396 return false; /* Not offloaded, no bypassing. */
397 }
398
399 // Don't use ->nocb_bypass during early boot.
400 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
401 rcu_nocb_lock(rdp);
402 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
403 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
404 return false;
405 }
406
407 // If we have advanced to a new jiffy, reset counts to allow
408 // moving back from ->nocb_bypass to ->cblist.
409 if (j == rdp->nocb_nobypass_last) {
410 c = rdp->nocb_nobypass_count + 1;
411 } else {
412 WRITE_ONCE(rdp->nocb_nobypass_last, j);
413 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
414 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
415 nocb_nobypass_lim_per_jiffy))
416 c = 0;
417 else if (c > nocb_nobypass_lim_per_jiffy)
418 c = nocb_nobypass_lim_per_jiffy;
419 }
420 WRITE_ONCE(rdp->nocb_nobypass_count, c);
421
422 // If there hasn't yet been all that many ->cblist enqueues
423 // this jiffy, tell the caller to enqueue onto ->cblist. But flush
424 // ->nocb_bypass first.
425 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
426 rcu_nocb_lock(rdp);
427 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
428 if (*was_alldone)
429 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
430 TPS("FirstQ"));
431 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
432 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
433 return false; // Caller must enqueue the callback.
434 }
435
436 // If ->nocb_bypass has been used too long or is too full,
437 // flush ->nocb_bypass to ->cblist.
438 if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
439 ncbs >= qhimark) {
440 rcu_nocb_lock(rdp);
441 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
442 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
443 if (*was_alldone)
444 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
445 TPS("FirstQ"));
446 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
447 return false; // Caller must enqueue the callback.
448 }
449 if (j != rdp->nocb_gp_adv_time &&
450 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
451 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
452 rcu_advance_cbs_nowake(rdp->mynode, rdp);
453 rdp->nocb_gp_adv_time = j;
454 }
455 rcu_nocb_unlock_irqrestore(rdp, flags);
456 return true; // Callback already enqueued.
457 }
458
459 // We need to use the bypass.
460 rcu_nocb_wait_contended(rdp);
461 rcu_nocb_bypass_lock(rdp);
462 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
463 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
464 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
465 if (!ncbs) {
466 WRITE_ONCE(rdp->nocb_bypass_first, j);
467 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
468 }
469 rcu_nocb_bypass_unlock(rdp);
470 smp_mb(); /* Order enqueue before wake. */
471 if (ncbs) {
472 local_irq_restore(flags);
473 } else {
474 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
475 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
476 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
477 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
478 TPS("FirstBQwake"));
479 __call_rcu_nocb_wake(rdp, true, flags);
480 } else {
481 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
482 TPS("FirstBQnoWake"));
483 rcu_nocb_unlock_irqrestore(rdp, flags);
484 }
485 }
486 return true; // Callback already enqueued.
487 }
488
489 /*
490 * Awaken the no-CBs grace-period kthread if needed, either due to it
491 * legitimately being asleep or due to overload conditions.
492 *
493 * If warranted, also wake up the kthread servicing this CPUs queues.
494 */
495 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
496 unsigned long flags)
497 __releases(rdp->nocb_lock)
498 {
499 unsigned long cur_gp_seq;
500 unsigned long j;
501 long len;
502 struct task_struct *t;
503
504 // If we are being polled or there is no kthread, just leave.
505 t = READ_ONCE(rdp->nocb_gp_kthread);
506 if (rcu_nocb_poll || !t) {
507 rcu_nocb_unlock_irqrestore(rdp, flags);
508 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
509 TPS("WakeNotPoll"));
510 return;
511 }
512 // Need to actually to a wakeup.
513 len = rcu_segcblist_n_cbs(&rdp->cblist);
514 if (was_alldone) {
515 rdp->qlen_last_fqs_check = len;
516 if (!irqs_disabled_flags(flags)) {
517 /* ... if queue was empty ... */
518 rcu_nocb_unlock_irqrestore(rdp, flags);
519 wake_nocb_gp(rdp, false);
520 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
521 TPS("WakeEmpty"));
522 } else {
523 rcu_nocb_unlock_irqrestore(rdp, flags);
524 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
525 TPS("WakeEmptyIsDeferred"));
526 }
527 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
528 /* ... or if many callbacks queued. */
529 rdp->qlen_last_fqs_check = len;
530 j = jiffies;
531 if (j != rdp->nocb_gp_adv_time &&
532 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
533 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
534 rcu_advance_cbs_nowake(rdp->mynode, rdp);
535 rdp->nocb_gp_adv_time = j;
536 }
537 smp_mb(); /* Enqueue before timer_pending(). */
538 if ((rdp->nocb_cb_sleep ||
539 !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
540 !timer_pending(&rdp->nocb_timer)) {
541 rcu_nocb_unlock_irqrestore(rdp, flags);
542 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
543 TPS("WakeOvfIsDeferred"));
544 } else {
545 rcu_nocb_unlock_irqrestore(rdp, flags);
546 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
547 }
548 } else {
549 rcu_nocb_unlock_irqrestore(rdp, flags);
550 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
551 }
552 return;
553 }
554
555 /*
556 * Check if we ignore this rdp.
557 *
558 * We check that without holding the nocb lock but
559 * we make sure not to miss a freshly offloaded rdp
560 * with the current ordering:
561 *
562 * rdp_offload_toggle() nocb_gp_enabled_cb()
563 * ------------------------- ----------------------------
564 * WRITE flags LOCK nocb_gp_lock
565 * LOCK nocb_gp_lock READ/WRITE nocb_gp_sleep
566 * READ/WRITE nocb_gp_sleep UNLOCK nocb_gp_lock
567 * UNLOCK nocb_gp_lock READ flags
568 */
569 static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
570 {
571 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
572
573 return rcu_segcblist_test_flags(&rdp->cblist, flags);
574 }
575
576 static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
577 bool *needwake_state)
578 {
579 struct rcu_segcblist *cblist = &rdp->cblist;
580
581 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
582 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
583 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
584 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
585 *needwake_state = true;
586 }
587 return false;
588 }
589
590 /*
591 * De-offloading. Clear our flag and notify the de-offload worker.
592 * We will ignore this rdp until it ever gets re-offloaded.
593 */
594 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
595 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
596 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
597 *needwake_state = true;
598 return true;
599 }
600
601
602 /*
603 * No-CBs GP kthreads come here to wait for additional callbacks to show up
604 * or for grace periods to end.
605 */
606 static void nocb_gp_wait(struct rcu_data *my_rdp)
607 {
608 bool bypass = false;
609 long bypass_ncbs;
610 int __maybe_unused cpu = my_rdp->cpu;
611 unsigned long cur_gp_seq;
612 unsigned long flags;
613 bool gotcbs = false;
614 unsigned long j = jiffies;
615 bool needwait_gp = false; // This prevents actual uninitialized use.
616 bool needwake;
617 bool needwake_gp;
618 struct rcu_data *rdp;
619 struct rcu_node *rnp;
620 unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
621 bool wasempty = false;
622
623 /*
624 * Each pass through the following loop checks for CBs and for the
625 * nearest grace period (if any) to wait for next. The CB kthreads
626 * and the global grace-period kthread are awakened if needed.
627 */
628 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
629 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
630 bool needwake_state = false;
631
632 if (!nocb_gp_enabled_cb(rdp))
633 continue;
634 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
635 rcu_nocb_lock_irqsave(rdp, flags);
636 if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
637 rcu_nocb_unlock_irqrestore(rdp, flags);
638 if (needwake_state)
639 swake_up_one(&rdp->nocb_state_wq);
640 continue;
641 }
642 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
643 if (bypass_ncbs &&
644 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
645 bypass_ncbs > 2 * qhimark)) {
646 // Bypass full or old, so flush it.
647 (void)rcu_nocb_try_flush_bypass(rdp, j);
648 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
649 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
650 rcu_nocb_unlock_irqrestore(rdp, flags);
651 if (needwake_state)
652 swake_up_one(&rdp->nocb_state_wq);
653 continue; /* No callbacks here, try next. */
654 }
655 if (bypass_ncbs) {
656 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
657 TPS("Bypass"));
658 bypass = true;
659 }
660 rnp = rdp->mynode;
661
662 // Advance callbacks if helpful and low contention.
663 needwake_gp = false;
664 if (!rcu_segcblist_restempty(&rdp->cblist,
665 RCU_NEXT_READY_TAIL) ||
666 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
667 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
668 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
669 needwake_gp = rcu_advance_cbs(rnp, rdp);
670 wasempty = rcu_segcblist_restempty(&rdp->cblist,
671 RCU_NEXT_READY_TAIL);
672 raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
673 }
674 // Need to wait on some grace period?
675 WARN_ON_ONCE(wasempty &&
676 !rcu_segcblist_restempty(&rdp->cblist,
677 RCU_NEXT_READY_TAIL));
678 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
679 if (!needwait_gp ||
680 ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
681 wait_gp_seq = cur_gp_seq;
682 needwait_gp = true;
683 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
684 TPS("NeedWaitGP"));
685 }
686 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
687 needwake = rdp->nocb_cb_sleep;
688 WRITE_ONCE(rdp->nocb_cb_sleep, false);
689 smp_mb(); /* CB invocation -after- GP end. */
690 } else {
691 needwake = false;
692 }
693 rcu_nocb_unlock_irqrestore(rdp, flags);
694 if (needwake) {
695 swake_up_one(&rdp->nocb_cb_wq);
696 gotcbs = true;
697 }
698 if (needwake_gp)
699 rcu_gp_kthread_wake();
700 if (needwake_state)
701 swake_up_one(&rdp->nocb_state_wq);
702 }
703
704 my_rdp->nocb_gp_bypass = bypass;
705 my_rdp->nocb_gp_gp = needwait_gp;
706 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
707
708 if (bypass && !rcu_nocb_poll) {
709 // At least one child with non-empty ->nocb_bypass, so set
710 // timer in order to avoid stranding its callbacks.
711 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
712 TPS("WakeBypassIsDeferred"));
713 }
714 if (rcu_nocb_poll) {
715 /* Polling, so trace if first poll in the series. */
716 if (gotcbs)
717 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
718 schedule_timeout_idle(1);
719 } else if (!needwait_gp) {
720 /* Wait for callbacks to appear. */
721 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
722 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
723 !READ_ONCE(my_rdp->nocb_gp_sleep));
724 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
725 } else {
726 rnp = my_rdp->mynode;
727 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
728 swait_event_interruptible_exclusive(
729 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
730 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
731 !READ_ONCE(my_rdp->nocb_gp_sleep));
732 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
733 }
734 if (!rcu_nocb_poll) {
735 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
736 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
737 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
738 del_timer(&my_rdp->nocb_timer);
739 }
740 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
741 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
742 }
743 my_rdp->nocb_gp_seq = -1;
744 WARN_ON(signal_pending(current));
745 }
746
747 /*
748 * No-CBs grace-period-wait kthread. There is one of these per group
749 * of CPUs, but only once at least one CPU in that group has come online
750 * at least once since boot. This kthread checks for newly posted
751 * callbacks from any of the CPUs it is responsible for, waits for a
752 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
753 * that then have callback-invocation work to do.
754 */
755 static int rcu_nocb_gp_kthread(void *arg)
756 {
757 struct rcu_data *rdp = arg;
758
759 for (;;) {
760 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
761 nocb_gp_wait(rdp);
762 cond_resched_tasks_rcu_qs();
763 }
764 return 0;
765 }
766
767 static inline bool nocb_cb_can_run(struct rcu_data *rdp)
768 {
769 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
770 return rcu_segcblist_test_flags(&rdp->cblist, flags);
771 }
772
773 static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
774 {
775 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
776 }
777
778 /*
779 * Invoke any ready callbacks from the corresponding no-CBs CPU,
780 * then, if there are no more, wait for more to appear.
781 */
782 static void nocb_cb_wait(struct rcu_data *rdp)
783 {
784 struct rcu_segcblist *cblist = &rdp->cblist;
785 unsigned long cur_gp_seq;
786 unsigned long flags;
787 bool needwake_state = false;
788 bool needwake_gp = false;
789 bool can_sleep = true;
790 struct rcu_node *rnp = rdp->mynode;
791
792 local_irq_save(flags);
793 rcu_momentary_dyntick_idle();
794 local_irq_restore(flags);
795 /*
796 * Disable BH to provide the expected environment. Also, when
797 * transitioning to/from NOCB mode, a self-requeuing callback might
798 * be invoked from softirq. A short grace period could cause both
799 * instances of this callback would execute concurrently.
800 */
801 local_bh_disable();
802 rcu_do_batch(rdp);
803 local_bh_enable();
804 lockdep_assert_irqs_enabled();
805 rcu_nocb_lock_irqsave(rdp, flags);
806 if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
807 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
808 raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
809 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
810 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
811 }
812
813 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
814 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
815 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
816 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
817 needwake_state = true;
818 }
819 if (rcu_segcblist_ready_cbs(cblist))
820 can_sleep = false;
821 } else {
822 /*
823 * De-offloading. Clear our flag and notify the de-offload worker.
824 * We won't touch the callbacks and keep sleeping until we ever
825 * get re-offloaded.
826 */
827 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
828 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
829 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
830 needwake_state = true;
831 }
832
833 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
834
835 if (rdp->nocb_cb_sleep)
836 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
837
838 rcu_nocb_unlock_irqrestore(rdp, flags);
839 if (needwake_gp)
840 rcu_gp_kthread_wake();
841
842 if (needwake_state)
843 swake_up_one(&rdp->nocb_state_wq);
844
845 do {
846 swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
847 nocb_cb_wait_cond(rdp));
848
849 // VVV Ensure CB invocation follows _sleep test.
850 if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
851 WARN_ON(signal_pending(current));
852 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
853 }
854 } while (!nocb_cb_can_run(rdp));
855 }
856
857 /*
858 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
859 * nocb_cb_wait() to do the dirty work.
860 */
861 static int rcu_nocb_cb_kthread(void *arg)
862 {
863 struct rcu_data *rdp = arg;
864
865 // Each pass through this loop does one callback batch, and,
866 // if there are no more ready callbacks, waits for them.
867 for (;;) {
868 nocb_cb_wait(rdp);
869 cond_resched_tasks_rcu_qs();
870 }
871 return 0;
872 }
873
874 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
875 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
876 {
877 return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
878 }
879
880 /* Do a deferred wakeup of rcu_nocb_kthread(). */
881 static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
882 struct rcu_data *rdp, int level,
883 unsigned long flags)
884 __releases(rdp_gp->nocb_gp_lock)
885 {
886 int ndw;
887 int ret;
888
889 if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
890 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
891 return false;
892 }
893
894 ndw = rdp_gp->nocb_defer_wakeup;
895 ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
896 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
897
898 return ret;
899 }
900
901 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
902 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
903 {
904 unsigned long flags;
905 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
906
907 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
908 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
909
910 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
911 smp_mb__after_spinlock(); /* Timer expire before wakeup. */
912 do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
913 }
914
915 /*
916 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
917 * This means we do an inexact common-case check. Note that if
918 * we miss, ->nocb_timer will eventually clean things up.
919 */
920 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
921 {
922 unsigned long flags;
923 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
924
925 if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
926 return false;
927
928 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
929 return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
930 }
931
932 void rcu_nocb_flush_deferred_wakeup(void)
933 {
934 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
935 }
936 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
937
938 static int rdp_offload_toggle(struct rcu_data *rdp,
939 bool offload, unsigned long flags)
940 __releases(rdp->nocb_lock)
941 {
942 struct rcu_segcblist *cblist = &rdp->cblist;
943 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
944 bool wake_gp = false;
945
946 rcu_segcblist_offload(cblist, offload);
947
948 if (rdp->nocb_cb_sleep)
949 rdp->nocb_cb_sleep = false;
950 rcu_nocb_unlock_irqrestore(rdp, flags);
951
952 /*
953 * Ignore former value of nocb_cb_sleep and force wake up as it could
954 * have been spuriously set to false already.
955 */
956 swake_up_one(&rdp->nocb_cb_wq);
957
958 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
959 if (rdp_gp->nocb_gp_sleep) {
960 rdp_gp->nocb_gp_sleep = false;
961 wake_gp = true;
962 }
963 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
964
965 if (wake_gp)
966 wake_up_process(rdp_gp->nocb_gp_kthread);
967
968 return 0;
969 }
970
971 static long rcu_nocb_rdp_deoffload(void *arg)
972 {
973 struct rcu_data *rdp = arg;
974 struct rcu_segcblist *cblist = &rdp->cblist;
975 unsigned long flags;
976 int ret;
977
978 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
979
980 pr_info("De-offloading %d\n", rdp->cpu);
981
982 rcu_nocb_lock_irqsave(rdp, flags);
983 /*
984 * Flush once and for all now. This suffices because we are
985 * running on the target CPU holding ->nocb_lock (thus having
986 * interrupts disabled), and because rdp_offload_toggle()
987 * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
988 * Thus future calls to rcu_segcblist_completely_offloaded() will
989 * return false, which means that future calls to rcu_nocb_try_bypass()
990 * will refuse to put anything into the bypass.
991 */
992 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
993 ret = rdp_offload_toggle(rdp, false, flags);
994 swait_event_exclusive(rdp->nocb_state_wq,
995 !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
996 SEGCBLIST_KTHREAD_GP));
997 /*
998 * Lock one last time to acquire latest callback updates from kthreads
999 * so we can later handle callbacks locally without locking.
1000 */
1001 rcu_nocb_lock_irqsave(rdp, flags);
1002 /*
1003 * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
1004 * lock is released but how about being paranoid for once?
1005 */
1006 rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
1007 /*
1008 * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
1009 * rcu_nocb_unlock_irqrestore() anymore.
1010 */
1011 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1012
1013 /* Sanity check */
1014 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1015
1016
1017 return ret;
1018 }
1019
1020 int rcu_nocb_cpu_deoffload(int cpu)
1021 {
1022 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1023 int ret = 0;
1024
1025 mutex_lock(&rcu_state.barrier_mutex);
1026 cpus_read_lock();
1027 if (rcu_rdp_is_offloaded(rdp)) {
1028 if (cpu_online(cpu)) {
1029 ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
1030 if (!ret)
1031 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1032 } else {
1033 pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
1034 ret = -EINVAL;
1035 }
1036 }
1037 cpus_read_unlock();
1038 mutex_unlock(&rcu_state.barrier_mutex);
1039
1040 return ret;
1041 }
1042 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1043
1044 static long rcu_nocb_rdp_offload(void *arg)
1045 {
1046 struct rcu_data *rdp = arg;
1047 struct rcu_segcblist *cblist = &rdp->cblist;
1048 unsigned long flags;
1049 int ret;
1050
1051 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1052 /*
1053 * For now we only support re-offload, ie: the rdp must have been
1054 * offloaded on boot first.
1055 */
1056 if (!rdp->nocb_gp_rdp)
1057 return -EINVAL;
1058
1059 pr_info("Offloading %d\n", rdp->cpu);
1060 /*
1061 * Can't use rcu_nocb_lock_irqsave() while we are in
1062 * SEGCBLIST_SOFTIRQ_ONLY mode.
1063 */
1064 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1065
1066 /*
1067 * We didn't take the nocb lock while working on the
1068 * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
1069 * Every modifications that have been done previously on
1070 * rdp->cblist must be visible remotely by the nocb kthreads
1071 * upon wake up after reading the cblist flags.
1072 *
1073 * The layout against nocb_lock enforces that ordering:
1074 *
1075 * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
1076 * ------------------------- ----------------------------
1077 * WRITE callbacks rcu_nocb_lock()
1078 * rcu_nocb_lock() READ flags
1079 * WRITE flags READ callbacks
1080 * rcu_nocb_unlock() rcu_nocb_unlock()
1081 */
1082 ret = rdp_offload_toggle(rdp, true, flags);
1083 swait_event_exclusive(rdp->nocb_state_wq,
1084 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
1085 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
1086
1087 return ret;
1088 }
1089
1090 int rcu_nocb_cpu_offload(int cpu)
1091 {
1092 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1093 int ret = 0;
1094
1095 mutex_lock(&rcu_state.barrier_mutex);
1096 cpus_read_lock();
1097 if (!rcu_rdp_is_offloaded(rdp)) {
1098 if (cpu_online(cpu)) {
1099 ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
1100 if (!ret)
1101 cpumask_set_cpu(cpu, rcu_nocb_mask);
1102 } else {
1103 pr_info("NOCB: Can't CB-offload an offline CPU\n");
1104 ret = -EINVAL;
1105 }
1106 }
1107 cpus_read_unlock();
1108 mutex_unlock(&rcu_state.barrier_mutex);
1109
1110 return ret;
1111 }
1112 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1113
1114 void __init rcu_init_nohz(void)
1115 {
1116 int cpu;
1117 bool need_rcu_nocb_mask = false;
1118 struct rcu_data *rdp;
1119
1120 #if defined(CONFIG_NO_HZ_FULL)
1121 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
1122 need_rcu_nocb_mask = true;
1123 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1124
1125 if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
1126 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1127 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1128 return;
1129 }
1130 }
1131 if (!cpumask_available(rcu_nocb_mask))
1132 return;
1133
1134 #if defined(CONFIG_NO_HZ_FULL)
1135 if (tick_nohz_full_running)
1136 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
1137 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1138
1139 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1140 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1141 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1142 rcu_nocb_mask);
1143 }
1144 if (cpumask_empty(rcu_nocb_mask))
1145 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1146 else
1147 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1148 cpumask_pr_args(rcu_nocb_mask));
1149 if (rcu_nocb_poll)
1150 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1151
1152 for_each_cpu(cpu, rcu_nocb_mask) {
1153 rdp = per_cpu_ptr(&rcu_data, cpu);
1154 if (rcu_segcblist_empty(&rdp->cblist))
1155 rcu_segcblist_init(&rdp->cblist);
1156 rcu_segcblist_offload(&rdp->cblist, true);
1157 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
1158 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
1159 }
1160 rcu_organize_nocb_kthreads();
1161 }
1162
1163 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1164 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1165 {
1166 init_swait_queue_head(&rdp->nocb_cb_wq);
1167 init_swait_queue_head(&rdp->nocb_gp_wq);
1168 init_swait_queue_head(&rdp->nocb_state_wq);
1169 raw_spin_lock_init(&rdp->nocb_lock);
1170 raw_spin_lock_init(&rdp->nocb_bypass_lock);
1171 raw_spin_lock_init(&rdp->nocb_gp_lock);
1172 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1173 rcu_cblist_init(&rdp->nocb_bypass);
1174 }
1175
1176 /*
1177 * If the specified CPU is a no-CBs CPU that does not already have its
1178 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
1179 * for this CPU's group has not yet been created, spawn it as well.
1180 */
1181 static void rcu_spawn_one_nocb_kthread(int cpu)
1182 {
1183 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1184 struct rcu_data *rdp_gp;
1185 struct task_struct *t;
1186
1187 /*
1188 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
1189 * then nothing to do.
1190 */
1191 if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
1192 return;
1193
1194 /* If we didn't spawn the GP kthread first, reorganize! */
1195 rdp_gp = rdp->nocb_gp_rdp;
1196 if (!rdp_gp->nocb_gp_kthread) {
1197 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1198 "rcuog/%d", rdp_gp->cpu);
1199 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
1200 return;
1201 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1202 }
1203
1204 /* Spawn the kthread for this CPU. */
1205 t = kthread_run(rcu_nocb_cb_kthread, rdp,
1206 "rcuo%c/%d", rcu_state.abbr, cpu);
1207 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1208 return;
1209 WRITE_ONCE(rdp->nocb_cb_kthread, t);
1210 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1211 }
1212
1213 /*
1214 * If the specified CPU is a no-CBs CPU that does not already have its
1215 * rcuo kthread, spawn it.
1216 */
1217 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1218 {
1219 if (rcu_scheduler_fully_active)
1220 rcu_spawn_one_nocb_kthread(cpu);
1221 }
1222
1223 /*
1224 * Once the scheduler is running, spawn rcuo kthreads for all online
1225 * no-CBs CPUs. This assumes that the early_initcall()s happen before
1226 * non-boot CPUs come online -- if this changes, we will need to add
1227 * some mutual exclusion.
1228 */
1229 static void __init rcu_spawn_nocb_kthreads(void)
1230 {
1231 int cpu;
1232
1233 for_each_online_cpu(cpu)
1234 rcu_spawn_cpu_nocb_kthread(cpu);
1235 }
1236
1237 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1238 static int rcu_nocb_gp_stride = -1;
1239 module_param(rcu_nocb_gp_stride, int, 0444);
1240
1241 /*
1242 * Initialize GP-CB relationships for all no-CBs CPU.
1243 */
1244 static void __init rcu_organize_nocb_kthreads(void)
1245 {
1246 int cpu;
1247 bool firsttime = true;
1248 bool gotnocbs = false;
1249 bool gotnocbscbs = true;
1250 int ls = rcu_nocb_gp_stride;
1251 int nl = 0; /* Next GP kthread. */
1252 struct rcu_data *rdp;
1253 struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */
1254 struct rcu_data *rdp_prev = NULL;
1255
1256 if (!cpumask_available(rcu_nocb_mask))
1257 return;
1258 if (ls == -1) {
1259 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1260 rcu_nocb_gp_stride = ls;
1261 }
1262
1263 /*
1264 * Each pass through this loop sets up one rcu_data structure.
1265 * Should the corresponding CPU come online in the future, then
1266 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1267 */
1268 for_each_cpu(cpu, rcu_nocb_mask) {
1269 rdp = per_cpu_ptr(&rcu_data, cpu);
1270 if (rdp->cpu >= nl) {
1271 /* New GP kthread, set up for CBs & next GP. */
1272 gotnocbs = true;
1273 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1274 rdp->nocb_gp_rdp = rdp;
1275 rdp_gp = rdp;
1276 if (dump_tree) {
1277 if (!firsttime)
1278 pr_cont("%s\n", gotnocbscbs
1279 ? "" : " (self only)");
1280 gotnocbscbs = false;
1281 firsttime = false;
1282 pr_alert("%s: No-CB GP kthread CPU %d:",
1283 __func__, cpu);
1284 }
1285 } else {
1286 /* Another CB kthread, link to previous GP kthread. */
1287 gotnocbscbs = true;
1288 rdp->nocb_gp_rdp = rdp_gp;
1289 rdp_prev->nocb_next_cb_rdp = rdp;
1290 if (dump_tree)
1291 pr_cont(" %d", cpu);
1292 }
1293 rdp_prev = rdp;
1294 }
1295 if (gotnocbs && dump_tree)
1296 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1297 }
1298
1299 /*
1300 * Bind the current task to the offloaded CPUs. If there are no offloaded
1301 * CPUs, leave the task unbound. Splat if the bind attempt fails.
1302 */
1303 void rcu_bind_current_to_nocb(void)
1304 {
1305 if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
1306 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1307 }
1308 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1309
1310 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1311 #ifdef CONFIG_SMP
1312 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1313 {
1314 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1315 }
1316 #else // #ifdef CONFIG_SMP
1317 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1318 {
1319 return "";
1320 }
1321 #endif // #else #ifdef CONFIG_SMP
1322
1323 /*
1324 * Dump out nocb grace-period kthread state for the specified rcu_data
1325 * structure.
1326 */
1327 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1328 {
1329 struct rcu_node *rnp = rdp->mynode;
1330
1331 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1332 rdp->cpu,
1333 "kK"[!!rdp->nocb_gp_kthread],
1334 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1335 "dD"[!!rdp->nocb_defer_wakeup],
1336 "tT"[timer_pending(&rdp->nocb_timer)],
1337 "sS"[!!rdp->nocb_gp_sleep],
1338 ".W"[swait_active(&rdp->nocb_gp_wq)],
1339 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
1340 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
1341 ".B"[!!rdp->nocb_gp_bypass],
1342 ".G"[!!rdp->nocb_gp_gp],
1343 (long)rdp->nocb_gp_seq,
1344 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1345 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1346 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1347 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1348 }
1349
1350 /* Dump out nocb kthread state for the specified rcu_data structure. */
1351 static void show_rcu_nocb_state(struct rcu_data *rdp)
1352 {
1353 char bufw[20];
1354 char bufr[20];
1355 struct rcu_segcblist *rsclp = &rdp->cblist;
1356 bool waslocked;
1357 bool wassleep;
1358
1359 if (rdp->nocb_gp_rdp == rdp)
1360 show_rcu_nocb_gp_state(rdp);
1361
1362 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1363 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1364 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1365 rdp->cpu, rdp->nocb_gp_rdp->cpu,
1366 rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
1367 "kK"[!!rdp->nocb_cb_kthread],
1368 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1369 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
1370 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1371 "sS"[!!rdp->nocb_cb_sleep],
1372 ".W"[swait_active(&rdp->nocb_cb_wq)],
1373 jiffies - rdp->nocb_bypass_first,
1374 jiffies - rdp->nocb_nobypass_last,
1375 rdp->nocb_nobypass_count,
1376 ".D"[rcu_segcblist_ready_cbs(rsclp)],
1377 ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1378 rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1379 ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1380 rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1381 ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1382 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1383 rcu_segcblist_n_cbs(&rdp->cblist),
1384 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1385 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1386 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1387
1388 /* It is OK for GP kthreads to have GP state. */
1389 if (rdp->nocb_gp_rdp == rdp)
1390 return;
1391
1392 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1393 wassleep = swait_active(&rdp->nocb_gp_wq);
1394 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1395 return; /* Nothing untoward. */
1396
1397 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1398 "lL"[waslocked],
1399 "dD"[!!rdp->nocb_defer_wakeup],
1400 "sS"[!!rdp->nocb_gp_sleep],
1401 ".W"[wassleep]);
1402 }
1403
1404 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1405
1406 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
1407 {
1408 return 0;
1409 }
1410
1411 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
1412 {
1413 return false;
1414 }
1415
1416 /* No ->nocb_lock to acquire. */
1417 static void rcu_nocb_lock(struct rcu_data *rdp)
1418 {
1419 }
1420
1421 /* No ->nocb_lock to release. */
1422 static void rcu_nocb_unlock(struct rcu_data *rdp)
1423 {
1424 }
1425
1426 /* No ->nocb_lock to release. */
1427 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1428 unsigned long flags)
1429 {
1430 local_irq_restore(flags);
1431 }
1432
1433 /* Lockdep check that ->cblist may be safely accessed. */
1434 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1435 {
1436 lockdep_assert_irqs_disabled();
1437 }
1438
1439 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1440 {
1441 }
1442
1443 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1444 {
1445 return NULL;
1446 }
1447
1448 static void rcu_init_one_nocb(struct rcu_node *rnp)
1449 {
1450 }
1451
1452 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1453 unsigned long j)
1454 {
1455 return true;
1456 }
1457
1458 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1459 bool *was_alldone, unsigned long flags)
1460 {
1461 return false;
1462 }
1463
1464 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1465 unsigned long flags)
1466 {
1467 WARN_ON_ONCE(1); /* Should be dead code! */
1468 }
1469
1470 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1471 {
1472 }
1473
1474 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1475 {
1476 return false;
1477 }
1478
1479 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1480 {
1481 return false;
1482 }
1483
1484 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1485 {
1486 }
1487
1488 static void __init rcu_spawn_nocb_kthreads(void)
1489 {
1490 }
1491
1492 static void show_rcu_nocb_state(struct rcu_data *rdp)
1493 {
1494 }
1495
1496 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */