1 // SPDX-License-Identifier: GPL-2.0+
3 * RCU CPU stall warnings for normal RCU grace periods
5 * Copyright IBM Corporation, 2019
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
10 //////////////////////////////////////////////////////////////////////////////
12 // Controlling CPU stall warnings, including delay calculation.
14 /* panic() on RCU Stall sysctl. */
15 int sysctl_panic_on_rcu_stall __read_mostly
;
17 #ifdef CONFIG_PROVE_RCU
18 #define RCU_STALL_DELAY_DELTA (5 * HZ)
20 #define RCU_STALL_DELAY_DELTA 0
22 #define RCU_STALL_MIGHT_DIV 8
23 #define RCU_STALL_MIGHT_MIN (2 * HZ)
25 /* Limit-check stall timeouts specified at boottime and runtime. */
26 int rcu_jiffies_till_stall_check(void)
28 int till_stall_check
= READ_ONCE(rcu_cpu_stall_timeout
);
31 * Limit check must be consistent with the Kconfig limits
32 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
34 if (till_stall_check
< 3) {
35 WRITE_ONCE(rcu_cpu_stall_timeout
, 3);
37 } else if (till_stall_check
> 300) {
38 WRITE_ONCE(rcu_cpu_stall_timeout
, 300);
39 till_stall_check
= 300;
41 return till_stall_check
* HZ
+ RCU_STALL_DELAY_DELTA
;
43 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check
);
46 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
48 * Returns @true if the current grace period is sufficiently old that
49 * it is reasonable to assume that it might be stalled. This can be
50 * useful when deciding whether to allocate memory to enable RCU-mediated
51 * freeing on the one hand or just invoking synchronize_rcu() on the other.
52 * The latter is preferable when the grace period is stalled.
54 * Note that sampling of the .gp_start and .gp_seq fields must be done
55 * carefully to avoid false positives at the beginnings and ends of
58 bool rcu_gp_might_be_stalled(void)
60 unsigned long d
= rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV
;
61 unsigned long j
= jiffies
;
63 if (d
< RCU_STALL_MIGHT_MIN
)
64 d
= RCU_STALL_MIGHT_MIN
;
65 smp_mb(); // jiffies before .gp_seq to avoid false positives.
66 if (!rcu_gp_in_progress())
68 // Long delays at this point avoids false positive, but a delay
69 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
70 smp_mb(); // .gp_seq before second .gp_start
72 return !time_before(j
, READ_ONCE(rcu_state
.gp_start
) + d
);
75 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
76 void rcu_sysrq_start(void)
78 if (!rcu_cpu_stall_suppress
)
79 rcu_cpu_stall_suppress
= 2;
82 void rcu_sysrq_end(void)
84 if (rcu_cpu_stall_suppress
== 2)
85 rcu_cpu_stall_suppress
= 0;
88 /* Don't print RCU CPU stall warnings during a kernel panic. */
89 static int rcu_panic(struct notifier_block
*this, unsigned long ev
, void *ptr
)
91 rcu_cpu_stall_suppress
= 1;
95 static struct notifier_block rcu_panic_block
= {
96 .notifier_call
= rcu_panic
,
99 static int __init
check_cpu_stall_init(void)
101 atomic_notifier_chain_register(&panic_notifier_list
, &rcu_panic_block
);
104 early_initcall(check_cpu_stall_init
);
106 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
107 static void panic_on_rcu_stall(void)
109 if (sysctl_panic_on_rcu_stall
)
110 panic("RCU Stall\n");
114 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
116 * Set the stall-warning timeout way off into the future, thus preventing
117 * any RCU CPU stall-warning messages from appearing in the current set of
120 * The caller must disable hard irqs.
122 void rcu_cpu_stall_reset(void)
124 WRITE_ONCE(rcu_state
.jiffies_stall
, jiffies
+ ULONG_MAX
/ 2);
127 //////////////////////////////////////////////////////////////////////////////
129 // Interaction with RCU grace periods
131 /* Start of new grace period, so record stall time (and forcing times). */
132 static void record_gp_stall_check_time(void)
134 unsigned long j
= jiffies
;
137 WRITE_ONCE(rcu_state
.gp_start
, j
);
138 j1
= rcu_jiffies_till_stall_check();
139 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
140 WRITE_ONCE(rcu_state
.jiffies_stall
, j
+ j1
);
141 rcu_state
.jiffies_resched
= j
+ j1
/ 2;
142 rcu_state
.n_force_qs_gpstart
= READ_ONCE(rcu_state
.n_force_qs
);
145 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
146 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
148 rdp
->ticks_this_gp
= 0;
149 rdp
->softirq_snap
= kstat_softirqs_cpu(RCU_SOFTIRQ
, smp_processor_id());
150 WRITE_ONCE(rdp
->last_fqs_resched
, jiffies
);
154 * If too much time has passed in the current grace period, and if
155 * so configured, go kick the relevant kthreads.
157 static void rcu_stall_kick_kthreads(void)
161 if (!rcu_kick_kthreads
)
163 j
= READ_ONCE(rcu_state
.jiffies_kick_kthreads
);
164 if (time_after(jiffies
, j
) && rcu_state
.gp_kthread
&&
165 (rcu_gp_in_progress() || READ_ONCE(rcu_state
.gp_flags
))) {
166 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
168 rcu_ftrace_dump(DUMP_ALL
);
169 wake_up_process(rcu_state
.gp_kthread
);
170 WRITE_ONCE(rcu_state
.jiffies_kick_kthreads
, j
+ HZ
);
175 * Handler for the irq_work request posted about halfway into the RCU CPU
176 * stall timeout, and used to detect excessive irq disabling. Set state
177 * appropriately, but just complain if there is unexpected state on entry.
179 static void rcu_iw_handler(struct irq_work
*iwp
)
181 struct rcu_data
*rdp
;
182 struct rcu_node
*rnp
;
184 rdp
= container_of(iwp
, struct rcu_data
, rcu_iw
);
186 raw_spin_lock_rcu_node(rnp
);
187 if (!WARN_ON_ONCE(!rdp
->rcu_iw_pending
)) {
188 rdp
->rcu_iw_gp_seq
= rnp
->gp_seq
;
189 rdp
->rcu_iw_pending
= false;
191 raw_spin_unlock_rcu_node(rnp
);
194 //////////////////////////////////////////////////////////////////////////////
196 // Printing RCU CPU stall warnings
198 #ifdef CONFIG_PREEMPT_RCU
201 * Dump detailed information for all tasks blocking the current RCU
202 * grace period on the specified rcu_node structure.
204 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
207 struct task_struct
*t
;
209 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
210 if (!rcu_preempt_blocked_readers_cgp(rnp
)) {
211 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
214 t
= list_entry(rnp
->gp_tasks
->prev
,
215 struct task_struct
, rcu_node_entry
);
216 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
218 * We could be printing a lot while holding a spinlock.
219 * Avoid triggering hard lockup.
221 touch_nmi_watchdog();
224 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
227 // Communicate task state back to the RCU CPU stall warning request.
228 struct rcu_stall_chk_rdr
{
230 union rcu_special rs
;
235 * Report out the state of a not-running task that is stalling the
236 * current RCU grace period.
238 static bool check_slow_task(struct task_struct
*t
, void *arg
)
240 struct rcu_stall_chk_rdr
*rscrp
= arg
;
243 return false; // It is running, so decline to inspect it.
244 rscrp
->nesting
= t
->rcu_read_lock_nesting
;
245 rscrp
->rs
= t
->rcu_read_unlock_special
;
246 rscrp
->on_blkd_list
= !list_empty(&t
->rcu_node_entry
);
251 * Scan the current list of tasks blocked within RCU read-side critical
252 * sections, printing out the tid of each.
254 static int rcu_print_task_stall(struct rcu_node
*rnp
)
257 struct rcu_stall_chk_rdr rscr
;
258 struct task_struct
*t
;
260 if (!rcu_preempt_blocked_readers_cgp(rnp
))
262 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
263 rnp
->level
, rnp
->grplo
, rnp
->grphi
);
264 t
= list_entry(rnp
->gp_tasks
->prev
,
265 struct task_struct
, rcu_node_entry
);
266 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
267 if (!try_invoke_on_locked_down_task(t
, check_slow_task
, &rscr
))
268 pr_cont(" P%d", t
->pid
);
270 pr_cont(" P%d/%d:%c%c%c%c",
271 t
->pid
, rscr
.nesting
,
272 ".b"[rscr
.rs
.b
.blocked
],
273 ".q"[rscr
.rs
.b
.need_qs
],
274 ".e"[rscr
.rs
.b
.exp_hint
],
275 ".l"[rscr
.on_blkd_list
]);
282 #else /* #ifdef CONFIG_PREEMPT_RCU */
285 * Because preemptible RCU does not exist, we never have to check for
286 * tasks blocked within RCU read-side critical sections.
288 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
293 * Because preemptible RCU does not exist, we never have to check for
294 * tasks blocked within RCU read-side critical sections.
296 static int rcu_print_task_stall(struct rcu_node
*rnp
)
300 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
303 * Dump stacks of all tasks running on stalled CPUs. First try using
304 * NMIs, but fall back to manual remote stack tracing on architectures
305 * that don't support NMI-based stack dumps. The NMI-triggered stack
306 * traces are more accurate because they are printed by the target CPU.
308 static void rcu_dump_cpu_stacks(void)
312 struct rcu_node
*rnp
;
314 rcu_for_each_leaf_node(rnp
) {
315 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
316 for_each_leaf_node_possible_cpu(rnp
, cpu
)
317 if (rnp
->qsmask
& leaf_node_cpu_bit(rnp
, cpu
))
318 if (!trigger_single_cpu_backtrace(cpu
))
320 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
324 #ifdef CONFIG_RCU_FAST_NO_HZ
326 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
328 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
330 sprintf(cp
, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
331 rdp
->last_accelerate
& 0xffff, jiffies
& 0xffff,
332 !!rdp
->tick_nohz_enabled_snap
);
335 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
337 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
342 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
344 static const char * const gp_state_names
[] = {
345 [RCU_GP_IDLE
] = "RCU_GP_IDLE",
346 [RCU_GP_WAIT_GPS
] = "RCU_GP_WAIT_GPS",
347 [RCU_GP_DONE_GPS
] = "RCU_GP_DONE_GPS",
348 [RCU_GP_ONOFF
] = "RCU_GP_ONOFF",
349 [RCU_GP_INIT
] = "RCU_GP_INIT",
350 [RCU_GP_WAIT_FQS
] = "RCU_GP_WAIT_FQS",
351 [RCU_GP_DOING_FQS
] = "RCU_GP_DOING_FQS",
352 [RCU_GP_CLEANUP
] = "RCU_GP_CLEANUP",
353 [RCU_GP_CLEANED
] = "RCU_GP_CLEANED",
357 * Convert a ->gp_state value to a character string.
359 static const char *gp_state_getname(short gs
)
361 if (gs
< 0 || gs
>= ARRAY_SIZE(gp_state_names
))
363 return gp_state_names
[gs
];
366 /* Is the RCU grace-period kthread being starved of CPU time? */
367 static bool rcu_is_gp_kthread_starving(unsigned long *jp
)
369 unsigned long j
= jiffies
- READ_ONCE(rcu_state
.gp_activity
);
377 * Print out diagnostic information for the specified stalled CPU.
379 * If the specified CPU is aware of the current RCU grace period, then
380 * print the number of scheduling clock interrupts the CPU has taken
381 * during the time that it has been aware. Otherwise, print the number
382 * of RCU grace periods that this CPU is ignorant of, for example, "1"
383 * if the CPU was aware of the previous grace period.
385 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
387 static void print_cpu_stall_info(int cpu
)
392 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
394 unsigned long ticks_value
;
397 * We could be printing a lot while holding a spinlock. Avoid
398 * triggering hard lockup.
400 touch_nmi_watchdog();
402 ticks_value
= rcu_seq_ctr(rcu_state
.gp_seq
- rdp
->gp_seq
);
404 ticks_title
= "GPs behind";
406 ticks_title
= "ticks this GP";
407 ticks_value
= rdp
->ticks_this_gp
;
409 print_cpu_stall_fast_no_hz(fast_no_hz
, cpu
);
410 delta
= rcu_seq_ctr(rdp
->mynode
->gp_seq
- rdp
->rcu_iw_gp_seq
);
411 falsepositive
= rcu_is_gp_kthread_starving(NULL
) &&
412 rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp
));
413 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
415 "O."[!!cpu_online(cpu
)],
416 "o."[!!(rdp
->grpmask
& rdp
->mynode
->qsmaskinit
)],
417 "N."[!!(rdp
->grpmask
& rdp
->mynode
->qsmaskinitnext
)],
418 !IS_ENABLED(CONFIG_IRQ_WORK
) ? '?' :
419 rdp
->rcu_iw_pending
? (int)min(delta
, 9UL) + '0' :
421 ticks_value
, ticks_title
,
422 rcu_dynticks_snap(rdp
) & 0xfff,
423 rdp
->dynticks_nesting
, rdp
->dynticks_nmi_nesting
,
424 rdp
->softirq_snap
, kstat_softirqs_cpu(RCU_SOFTIRQ
, cpu
),
425 data_race(rcu_state
.n_force_qs
) - rcu_state
.n_force_qs_gpstart
,
427 falsepositive
? " (false positive?)" : "");
430 /* Complain about starvation of grace-period kthread. */
431 static void rcu_check_gp_kthread_starvation(void)
433 struct task_struct
*gpk
= rcu_state
.gp_kthread
;
436 if (rcu_is_gp_kthread_starving(&j
)) {
437 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
439 (long)rcu_seq_current(&rcu_state
.gp_seq
),
440 data_race(rcu_state
.gp_flags
),
441 gp_state_getname(rcu_state
.gp_state
), rcu_state
.gp_state
,
442 gpk
? gpk
->state
: ~0, gpk
? task_cpu(gpk
) : -1);
444 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state
.name
);
445 pr_err("RCU grace-period kthread stack dump:\n");
446 sched_show_task(gpk
);
447 wake_up_process(gpk
);
452 static void print_other_cpu_stall(unsigned long gp_seq
, unsigned long gps
)
459 struct rcu_node
*rnp
;
462 /* Kick and suppress, if so configured. */
463 rcu_stall_kick_kthreads();
464 if (rcu_stall_is_suppressed())
468 * OK, time to rat on our buddy...
469 * See Documentation/RCU/stallwarn.rst for info on how to debug
470 * RCU CPU stall warnings.
472 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state
.name
);
473 rcu_for_each_leaf_node(rnp
) {
474 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
475 ndetected
+= rcu_print_task_stall(rnp
);
476 if (rnp
->qsmask
!= 0) {
477 for_each_leaf_node_possible_cpu(rnp
, cpu
)
478 if (rnp
->qsmask
& leaf_node_cpu_bit(rnp
, cpu
)) {
479 print_cpu_stall_info(cpu
);
483 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
486 for_each_possible_cpu(cpu
)
487 totqlen
+= rcu_get_n_cbs_cpu(cpu
);
488 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
489 smp_processor_id(), (long)(jiffies
- gps
),
490 (long)rcu_seq_current(&rcu_state
.gp_seq
), totqlen
);
492 rcu_dump_cpu_stacks();
494 /* Complain about tasks blocking the grace period. */
495 rcu_for_each_leaf_node(rnp
)
496 rcu_print_detail_task_stall_rnp(rnp
);
498 if (rcu_seq_current(&rcu_state
.gp_seq
) != gp_seq
) {
499 pr_err("INFO: Stall ended before state dump start\n");
502 gpa
= data_race(rcu_state
.gp_activity
);
503 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
504 rcu_state
.name
, j
- gpa
, j
, gpa
,
505 data_race(jiffies_till_next_fqs
),
506 rcu_get_root()->qsmask
);
509 /* Rewrite if needed in case of slow consoles. */
510 if (ULONG_CMP_GE(jiffies
, READ_ONCE(rcu_state
.jiffies_stall
)))
511 WRITE_ONCE(rcu_state
.jiffies_stall
,
512 jiffies
+ 3 * rcu_jiffies_till_stall_check() + 3);
514 rcu_check_gp_kthread_starvation();
516 panic_on_rcu_stall();
518 rcu_force_quiescent_state(); /* Kick them all. */
521 static void print_cpu_stall(unsigned long gps
)
525 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
526 struct rcu_node
*rnp
= rcu_get_root();
529 /* Kick and suppress, if so configured. */
530 rcu_stall_kick_kthreads();
531 if (rcu_stall_is_suppressed())
535 * OK, time to rat on ourselves...
536 * See Documentation/RCU/stallwarn.rst for info on how to debug
537 * RCU CPU stall warnings.
539 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state
.name
);
540 raw_spin_lock_irqsave_rcu_node(rdp
->mynode
, flags
);
541 print_cpu_stall_info(smp_processor_id());
542 raw_spin_unlock_irqrestore_rcu_node(rdp
->mynode
, flags
);
543 for_each_possible_cpu(cpu
)
544 totqlen
+= rcu_get_n_cbs_cpu(cpu
);
545 pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
547 (long)rcu_seq_current(&rcu_state
.gp_seq
), totqlen
);
549 rcu_check_gp_kthread_starvation();
551 rcu_dump_cpu_stacks();
553 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
554 /* Rewrite if needed in case of slow consoles. */
555 if (ULONG_CMP_GE(jiffies
, READ_ONCE(rcu_state
.jiffies_stall
)))
556 WRITE_ONCE(rcu_state
.jiffies_stall
,
557 jiffies
+ 3 * rcu_jiffies_till_stall_check() + 3);
558 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
560 panic_on_rcu_stall();
563 * Attempt to revive the RCU machinery by forcing a context switch.
565 * A context switch would normally allow the RCU state machine to make
566 * progress and it could be we're stuck in kernel space without context
567 * switches for an entirely unreasonable amount of time.
569 set_tsk_need_resched(current
);
570 set_preempt_need_resched();
573 static void check_cpu_stall(struct rcu_data
*rdp
)
581 struct rcu_node
*rnp
;
583 if ((rcu_stall_is_suppressed() && !rcu_kick_kthreads
) ||
584 !rcu_gp_in_progress())
586 rcu_stall_kick_kthreads();
590 * Lots of memory barriers to reject false positives.
592 * The idea is to pick up rcu_state.gp_seq, then
593 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
594 * another copy of rcu_state.gp_seq. These values are updated in
595 * the opposite order with memory barriers (or equivalent) during
596 * grace-period initialization and cleanup. Now, a false positive
597 * can occur if we get an new value of rcu_state.gp_start and a old
598 * value of rcu_state.jiffies_stall. But given the memory barriers,
599 * the only way that this can happen is if one grace period ends
600 * and another starts between these two fetches. This is detected
601 * by comparing the second fetch of rcu_state.gp_seq with the
602 * previous fetch from rcu_state.gp_seq.
604 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
605 * and rcu_state.gp_start suffice to forestall false positives.
607 gs1
= READ_ONCE(rcu_state
.gp_seq
);
608 smp_rmb(); /* Pick up ->gp_seq first... */
609 js
= READ_ONCE(rcu_state
.jiffies_stall
);
610 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
611 gps
= READ_ONCE(rcu_state
.gp_start
);
612 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
613 gs2
= READ_ONCE(rcu_state
.gp_seq
);
615 ULONG_CMP_LT(j
, js
) ||
616 ULONG_CMP_GE(gps
, js
))
617 return; /* No stall or GP completed since entering function. */
619 jn
= jiffies
+ 3 * rcu_jiffies_till_stall_check() + 3;
620 if (rcu_gp_in_progress() &&
621 (READ_ONCE(rnp
->qsmask
) & rdp
->grpmask
) &&
622 cmpxchg(&rcu_state
.jiffies_stall
, js
, jn
) == js
) {
624 /* We haven't checked in, so go dump stack. */
625 print_cpu_stall(gps
);
626 if (rcu_cpu_stall_ftrace_dump
)
627 rcu_ftrace_dump(DUMP_ALL
);
629 } else if (rcu_gp_in_progress() &&
630 ULONG_CMP_GE(j
, js
+ RCU_STALL_RAT_DELAY
) &&
631 cmpxchg(&rcu_state
.jiffies_stall
, js
, jn
) == js
) {
633 /* They had a few time units to dump stack, so complain. */
634 print_other_cpu_stall(gs2
, gps
);
635 if (rcu_cpu_stall_ftrace_dump
)
636 rcu_ftrace_dump(DUMP_ALL
);
640 //////////////////////////////////////////////////////////////////////////////
642 // RCU forward-progress mechanisms, including of callback invocation.
646 * Show the state of the grace-period kthreads.
648 void show_rcu_gp_kthreads(void)
650 unsigned long cbs
= 0;
656 struct rcu_data
*rdp
;
657 struct rcu_node
*rnp
;
658 struct task_struct
*t
= READ_ONCE(rcu_state
.gp_kthread
);
661 ja
= j
- data_race(rcu_state
.gp_activity
);
662 jr
= j
- data_race(rcu_state
.gp_req_activity
);
663 jw
= j
- data_race(rcu_state
.gp_wake_time
);
664 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
665 rcu_state
.name
, gp_state_getname(rcu_state
.gp_state
),
666 rcu_state
.gp_state
, t
? t
->state
: 0x1ffffL
,
667 ja
, jr
, jw
, (long)data_race(rcu_state
.gp_wake_seq
),
668 (long)data_race(rcu_state
.gp_seq
),
669 (long)data_race(rcu_get_root()->gp_seq_needed
),
670 data_race(rcu_state
.gp_flags
));
671 rcu_for_each_node_breadth_first(rnp
) {
672 if (ULONG_CMP_GE(READ_ONCE(rcu_state
.gp_seq
),
673 READ_ONCE(rnp
->gp_seq_needed
)))
675 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
676 rnp
->grplo
, rnp
->grphi
, (long)data_race(rnp
->gp_seq
),
677 (long)data_race(rnp
->gp_seq_needed
));
678 if (!rcu_is_leaf_node(rnp
))
680 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
681 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
682 if (READ_ONCE(rdp
->gpwrap
) ||
683 ULONG_CMP_GE(READ_ONCE(rcu_state
.gp_seq
),
684 READ_ONCE(rdp
->gp_seq_needed
)))
686 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
687 cpu
, (long)data_race(rdp
->gp_seq_needed
));
690 for_each_possible_cpu(cpu
) {
691 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
692 cbs
+= data_race(rdp
->n_cbs_invoked
);
693 if (rcu_segcblist_is_offloaded(&rdp
->cblist
))
694 show_rcu_nocb_state(rdp
);
696 pr_info("RCU callbacks invoked since boot: %lu\n", cbs
);
697 show_rcu_tasks_gp_kthreads();
699 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads
);
702 * This function checks for grace-period requests that fail to motivate
703 * RCU to come out of its idle mode.
705 static void rcu_check_gp_start_stall(struct rcu_node
*rnp
, struct rcu_data
*rdp
,
706 const unsigned long gpssdelay
)
710 struct rcu_node
*rnp_root
= rcu_get_root();
711 static atomic_t warned
= ATOMIC_INIT(0);
713 if (!IS_ENABLED(CONFIG_PROVE_RCU
) || rcu_gp_in_progress() ||
714 ULONG_CMP_GE(READ_ONCE(rnp_root
->gp_seq
),
715 READ_ONCE(rnp_root
->gp_seq_needed
)) ||
716 !smp_load_acquire(&rcu_state
.gp_kthread
)) // Get stable kthread.
718 j
= jiffies
; /* Expensive access, and in common case don't get here. */
719 if (time_before(j
, READ_ONCE(rcu_state
.gp_req_activity
) + gpssdelay
) ||
720 time_before(j
, READ_ONCE(rcu_state
.gp_activity
) + gpssdelay
) ||
721 atomic_read(&warned
))
724 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
726 if (rcu_gp_in_progress() ||
727 ULONG_CMP_GE(READ_ONCE(rnp_root
->gp_seq
),
728 READ_ONCE(rnp_root
->gp_seq_needed
)) ||
729 time_before(j
, READ_ONCE(rcu_state
.gp_req_activity
) + gpssdelay
) ||
730 time_before(j
, READ_ONCE(rcu_state
.gp_activity
) + gpssdelay
) ||
731 atomic_read(&warned
)) {
732 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
735 /* Hold onto the leaf lock to make others see warned==1. */
738 raw_spin_lock_rcu_node(rnp_root
); /* irqs already disabled. */
740 if (rcu_gp_in_progress() ||
741 ULONG_CMP_GE(READ_ONCE(rnp_root
->gp_seq
),
742 READ_ONCE(rnp_root
->gp_seq_needed
)) ||
743 time_before(j
, READ_ONCE(rcu_state
.gp_req_activity
) + gpssdelay
) ||
744 time_before(j
, READ_ONCE(rcu_state
.gp_activity
) + gpssdelay
) ||
745 atomic_xchg(&warned
, 1)) {
747 /* irqs remain disabled. */
748 raw_spin_unlock_rcu_node(rnp_root
);
749 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
754 raw_spin_unlock_rcu_node(rnp_root
);
755 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
756 show_rcu_gp_kthreads();
760 * Do a forward-progress check for rcutorture. This is normally invoked
761 * due to an OOM event. The argument "j" gives the time period during
762 * which rcutorture would like progress to have been made.
764 void rcu_fwd_progress_check(unsigned long j
)
768 unsigned long max_cbs
= 0;
770 struct rcu_data
*rdp
;
772 if (rcu_gp_in_progress()) {
773 pr_info("%s: GP age %lu jiffies\n",
774 __func__
, jiffies
- rcu_state
.gp_start
);
775 show_rcu_gp_kthreads();
777 pr_info("%s: Last GP end %lu jiffies ago\n",
778 __func__
, jiffies
- rcu_state
.gp_end
);
780 rdp
= this_cpu_ptr(&rcu_data
);
781 rcu_check_gp_start_stall(rdp
->mynode
, rdp
, j
);
784 for_each_possible_cpu(cpu
) {
785 cbs
= rcu_get_n_cbs_cpu(cpu
);
789 pr_info("%s: callbacks", __func__
);
790 pr_cont(" %d: %lu", cpu
, cbs
);
799 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check
);
801 /* Commandeer a sysrq key to dump RCU's tree. */
802 static bool sysrq_rcu
;
803 module_param(sysrq_rcu
, bool, 0444);
805 /* Dump grace-period-request information due to commandeered sysrq. */
806 static void sysrq_show_rcu(int key
)
808 show_rcu_gp_kthreads();
811 static const struct sysrq_key_op sysrq_rcudump_op
= {
812 .handler
= sysrq_show_rcu
,
813 .help_msg
= "show-rcu(y)",
814 .action_msg
= "Show RCU tree",
815 .enable_mask
= SYSRQ_ENABLE_DUMP
,
818 static int __init
rcu_sysrq_init(void)
821 return register_sysrq_key('y', &sysrq_rcudump_op
);
824 early_initcall(rcu_sysrq_init
);