]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/rcu/tree_stall.h
rcu: Move gp_state_names[] and gp_state_getname() to tree_stall.h
[mirror_ubuntu-jammy-kernel.git] / kernel / rcu / tree_stall.h
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * RCU CPU stall warnings for normal RCU grace periods
4 *
5 * Copyright IBM Corporation, 2019
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 //////////////////////////////////////////////////////////////////////////////
11 //
12 // Controlling CPU stall warnings, including delay calculation.
13
14 /* panic() on RCU Stall sysctl. */
15 int sysctl_panic_on_rcu_stall __read_mostly;
16
17 #ifdef CONFIG_PROVE_RCU
18 #define RCU_STALL_DELAY_DELTA (5 * HZ)
19 #else
20 #define RCU_STALL_DELAY_DELTA 0
21 #endif
22
23 /* Limit-check stall timeouts specified at boottime and runtime. */
24 int rcu_jiffies_till_stall_check(void)
25 {
26 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
27
28 /*
29 * Limit check must be consistent with the Kconfig limits
30 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
31 */
32 if (till_stall_check < 3) {
33 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
34 till_stall_check = 3;
35 } else if (till_stall_check > 300) {
36 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
37 till_stall_check = 300;
38 }
39 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
40 }
41 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
42
43 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
44 void rcu_sysrq_start(void)
45 {
46 if (!rcu_cpu_stall_suppress)
47 rcu_cpu_stall_suppress = 2;
48 }
49
50 void rcu_sysrq_end(void)
51 {
52 if (rcu_cpu_stall_suppress == 2)
53 rcu_cpu_stall_suppress = 0;
54 }
55
56 /* Don't print RCU CPU stall warnings during a kernel panic. */
57 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
58 {
59 rcu_cpu_stall_suppress = 1;
60 return NOTIFY_DONE;
61 }
62
63 static struct notifier_block rcu_panic_block = {
64 .notifier_call = rcu_panic,
65 };
66
67 static int __init check_cpu_stall_init(void)
68 {
69 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
70 return 0;
71 }
72 early_initcall(check_cpu_stall_init);
73
74 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
75 static void panic_on_rcu_stall(void)
76 {
77 if (sysctl_panic_on_rcu_stall)
78 panic("RCU Stall\n");
79 }
80
81 /**
82 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
83 *
84 * Set the stall-warning timeout way off into the future, thus preventing
85 * any RCU CPU stall-warning messages from appearing in the current set of
86 * RCU grace periods.
87 *
88 * The caller must disable hard irqs.
89 */
90 void rcu_cpu_stall_reset(void)
91 {
92 WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
93 }
94
95 //////////////////////////////////////////////////////////////////////////////
96 //
97 // Interaction with RCU grace periods
98
99 /* Start of new grace period, so record stall time (and forcing times). */
100 static void record_gp_stall_check_time(void)
101 {
102 unsigned long j = jiffies;
103 unsigned long j1;
104
105 rcu_state.gp_start = j;
106 j1 = rcu_jiffies_till_stall_check();
107 /* Record ->gp_start before ->jiffies_stall. */
108 smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
109 rcu_state.jiffies_resched = j + j1 / 2;
110 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
111 }
112
113 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
114 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
115 {
116 rdp->ticks_this_gp = 0;
117 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
118 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
119 }
120
121 /*
122 * If too much time has passed in the current grace period, and if
123 * so configured, go kick the relevant kthreads.
124 */
125 static void rcu_stall_kick_kthreads(void)
126 {
127 unsigned long j;
128
129 if (!rcu_kick_kthreads)
130 return;
131 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
132 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
133 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
134 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
135 rcu_state.name);
136 rcu_ftrace_dump(DUMP_ALL);
137 wake_up_process(rcu_state.gp_kthread);
138 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
139 }
140 }
141
142 /*
143 * Handler for the irq_work request posted about halfway into the RCU CPU
144 * stall timeout, and used to detect excessive irq disabling. Set state
145 * appropriately, but just complain if there is unexpected state on entry.
146 */
147 static void rcu_iw_handler(struct irq_work *iwp)
148 {
149 struct rcu_data *rdp;
150 struct rcu_node *rnp;
151
152 rdp = container_of(iwp, struct rcu_data, rcu_iw);
153 rnp = rdp->mynode;
154 raw_spin_lock_rcu_node(rnp);
155 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
156 rdp->rcu_iw_gp_seq = rnp->gp_seq;
157 rdp->rcu_iw_pending = false;
158 }
159 raw_spin_unlock_rcu_node(rnp);
160 }
161
162 //////////////////////////////////////////////////////////////////////////////
163 //
164 // Printing RCU CPU stall warnings
165
166 #ifdef CONFIG_PREEMPTION
167
168 /*
169 * Dump detailed information for all tasks blocking the current RCU
170 * grace period on the specified rcu_node structure.
171 */
172 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
173 {
174 unsigned long flags;
175 struct task_struct *t;
176
177 raw_spin_lock_irqsave_rcu_node(rnp, flags);
178 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
179 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
180 return;
181 }
182 t = list_entry(rnp->gp_tasks->prev,
183 struct task_struct, rcu_node_entry);
184 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
185 /*
186 * We could be printing a lot while holding a spinlock.
187 * Avoid triggering hard lockup.
188 */
189 touch_nmi_watchdog();
190 sched_show_task(t);
191 }
192 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
193 }
194
195 /*
196 * Scan the current list of tasks blocked within RCU read-side critical
197 * sections, printing out the tid of each.
198 */
199 static int rcu_print_task_stall(struct rcu_node *rnp)
200 {
201 struct task_struct *t;
202 int ndetected = 0;
203
204 if (!rcu_preempt_blocked_readers_cgp(rnp))
205 return 0;
206 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
207 rnp->level, rnp->grplo, rnp->grphi);
208 t = list_entry(rnp->gp_tasks->prev,
209 struct task_struct, rcu_node_entry);
210 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
211 pr_cont(" P%d", t->pid);
212 ndetected++;
213 }
214 pr_cont("\n");
215 return ndetected;
216 }
217
218 #else /* #ifdef CONFIG_PREEMPTION */
219
220 /*
221 * Because preemptible RCU does not exist, we never have to check for
222 * tasks blocked within RCU read-side critical sections.
223 */
224 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
225 {
226 }
227
228 /*
229 * Because preemptible RCU does not exist, we never have to check for
230 * tasks blocked within RCU read-side critical sections.
231 */
232 static int rcu_print_task_stall(struct rcu_node *rnp)
233 {
234 return 0;
235 }
236 #endif /* #else #ifdef CONFIG_PREEMPTION */
237
238 /*
239 * Dump stacks of all tasks running on stalled CPUs. First try using
240 * NMIs, but fall back to manual remote stack tracing on architectures
241 * that don't support NMI-based stack dumps. The NMI-triggered stack
242 * traces are more accurate because they are printed by the target CPU.
243 */
244 static void rcu_dump_cpu_stacks(void)
245 {
246 int cpu;
247 unsigned long flags;
248 struct rcu_node *rnp;
249
250 rcu_for_each_leaf_node(rnp) {
251 raw_spin_lock_irqsave_rcu_node(rnp, flags);
252 for_each_leaf_node_possible_cpu(rnp, cpu)
253 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
254 if (!trigger_single_cpu_backtrace(cpu))
255 dump_cpu_task(cpu);
256 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
257 }
258 }
259
260 #ifdef CONFIG_RCU_FAST_NO_HZ
261
262 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
263 {
264 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
265
266 sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
267 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
268 ".l"[rdp->all_lazy],
269 ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
270 ".D"[!!rdp->tick_nohz_enabled_snap]);
271 }
272
273 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
274
275 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
276 {
277 *cp = '\0';
278 }
279
280 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
281
282 static const char * const gp_state_names[] = {
283 [RCU_GP_IDLE] = "RCU_GP_IDLE",
284 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
285 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
286 [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
287 [RCU_GP_INIT] = "RCU_GP_INIT",
288 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
289 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
290 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
291 [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
292 };
293
294 /*
295 * Convert a ->gp_state value to a character string.
296 */
297 static const char *gp_state_getname(short gs)
298 {
299 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
300 return "???";
301 return gp_state_names[gs];
302 }
303
304 /*
305 * Print out diagnostic information for the specified stalled CPU.
306 *
307 * If the specified CPU is aware of the current RCU grace period, then
308 * print the number of scheduling clock interrupts the CPU has taken
309 * during the time that it has been aware. Otherwise, print the number
310 * of RCU grace periods that this CPU is ignorant of, for example, "1"
311 * if the CPU was aware of the previous grace period.
312 *
313 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
314 */
315 static void print_cpu_stall_info(int cpu)
316 {
317 unsigned long delta;
318 char fast_no_hz[72];
319 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
320 char *ticks_title;
321 unsigned long ticks_value;
322
323 /*
324 * We could be printing a lot while holding a spinlock. Avoid
325 * triggering hard lockup.
326 */
327 touch_nmi_watchdog();
328
329 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
330 if (ticks_value) {
331 ticks_title = "GPs behind";
332 } else {
333 ticks_title = "ticks this GP";
334 ticks_value = rdp->ticks_this_gp;
335 }
336 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
337 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
338 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
339 cpu,
340 "O."[!!cpu_online(cpu)],
341 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
342 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
343 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
344 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
345 "!."[!delta],
346 ticks_value, ticks_title,
347 rcu_dynticks_snap(rdp) & 0xfff,
348 rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
349 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
350 READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
351 fast_no_hz);
352 }
353
354 /* Complain about starvation of grace-period kthread. */
355 static void rcu_check_gp_kthread_starvation(void)
356 {
357 struct task_struct *gpk = rcu_state.gp_kthread;
358 unsigned long j;
359
360 j = jiffies - READ_ONCE(rcu_state.gp_activity);
361 if (j > 2 * HZ) {
362 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
363 rcu_state.name, j,
364 (long)rcu_seq_current(&rcu_state.gp_seq),
365 READ_ONCE(rcu_state.gp_flags),
366 gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
367 gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
368 if (gpk) {
369 pr_err("RCU grace-period kthread stack dump:\n");
370 sched_show_task(gpk);
371 wake_up_process(gpk);
372 }
373 }
374 }
375
376 static void print_other_cpu_stall(unsigned long gp_seq)
377 {
378 int cpu;
379 unsigned long flags;
380 unsigned long gpa;
381 unsigned long j;
382 int ndetected = 0;
383 struct rcu_node *rnp;
384 long totqlen = 0;
385
386 /* Kick and suppress, if so configured. */
387 rcu_stall_kick_kthreads();
388 if (rcu_cpu_stall_suppress)
389 return;
390
391 /*
392 * OK, time to rat on our buddy...
393 * See Documentation/RCU/stallwarn.txt for info on how to debug
394 * RCU CPU stall warnings.
395 */
396 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
397 rcu_for_each_leaf_node(rnp) {
398 raw_spin_lock_irqsave_rcu_node(rnp, flags);
399 ndetected += rcu_print_task_stall(rnp);
400 if (rnp->qsmask != 0) {
401 for_each_leaf_node_possible_cpu(rnp, cpu)
402 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
403 print_cpu_stall_info(cpu);
404 ndetected++;
405 }
406 }
407 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
408 }
409
410 for_each_possible_cpu(cpu)
411 totqlen += rcu_get_n_cbs_cpu(cpu);
412 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
413 smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
414 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
415 if (ndetected) {
416 rcu_dump_cpu_stacks();
417
418 /* Complain about tasks blocking the grace period. */
419 rcu_for_each_leaf_node(rnp)
420 rcu_print_detail_task_stall_rnp(rnp);
421 } else {
422 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
423 pr_err("INFO: Stall ended before state dump start\n");
424 } else {
425 j = jiffies;
426 gpa = READ_ONCE(rcu_state.gp_activity);
427 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
428 rcu_state.name, j - gpa, j, gpa,
429 READ_ONCE(jiffies_till_next_fqs),
430 rcu_get_root()->qsmask);
431 /* In this case, the current CPU might be at fault. */
432 sched_show_task(current);
433 }
434 }
435 /* Rewrite if needed in case of slow consoles. */
436 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
437 WRITE_ONCE(rcu_state.jiffies_stall,
438 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
439
440 rcu_check_gp_kthread_starvation();
441
442 panic_on_rcu_stall();
443
444 rcu_force_quiescent_state(); /* Kick them all. */
445 }
446
447 static void print_cpu_stall(void)
448 {
449 int cpu;
450 unsigned long flags;
451 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
452 struct rcu_node *rnp = rcu_get_root();
453 long totqlen = 0;
454
455 /* Kick and suppress, if so configured. */
456 rcu_stall_kick_kthreads();
457 if (rcu_cpu_stall_suppress)
458 return;
459
460 /*
461 * OK, time to rat on ourselves...
462 * See Documentation/RCU/stallwarn.txt for info on how to debug
463 * RCU CPU stall warnings.
464 */
465 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
466 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
467 print_cpu_stall_info(smp_processor_id());
468 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
469 for_each_possible_cpu(cpu)
470 totqlen += rcu_get_n_cbs_cpu(cpu);
471 pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
472 jiffies - rcu_state.gp_start,
473 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
474
475 rcu_check_gp_kthread_starvation();
476
477 rcu_dump_cpu_stacks();
478
479 raw_spin_lock_irqsave_rcu_node(rnp, flags);
480 /* Rewrite if needed in case of slow consoles. */
481 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
482 WRITE_ONCE(rcu_state.jiffies_stall,
483 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
484 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
485
486 panic_on_rcu_stall();
487
488 /*
489 * Attempt to revive the RCU machinery by forcing a context switch.
490 *
491 * A context switch would normally allow the RCU state machine to make
492 * progress and it could be we're stuck in kernel space without context
493 * switches for an entirely unreasonable amount of time.
494 */
495 set_tsk_need_resched(current);
496 set_preempt_need_resched();
497 }
498
499 static void check_cpu_stall(struct rcu_data *rdp)
500 {
501 unsigned long gs1;
502 unsigned long gs2;
503 unsigned long gps;
504 unsigned long j;
505 unsigned long jn;
506 unsigned long js;
507 struct rcu_node *rnp;
508
509 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
510 !rcu_gp_in_progress())
511 return;
512 rcu_stall_kick_kthreads();
513 j = jiffies;
514
515 /*
516 * Lots of memory barriers to reject false positives.
517 *
518 * The idea is to pick up rcu_state.gp_seq, then
519 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
520 * another copy of rcu_state.gp_seq. These values are updated in
521 * the opposite order with memory barriers (or equivalent) during
522 * grace-period initialization and cleanup. Now, a false positive
523 * can occur if we get an new value of rcu_state.gp_start and a old
524 * value of rcu_state.jiffies_stall. But given the memory barriers,
525 * the only way that this can happen is if one grace period ends
526 * and another starts between these two fetches. This is detected
527 * by comparing the second fetch of rcu_state.gp_seq with the
528 * previous fetch from rcu_state.gp_seq.
529 *
530 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
531 * and rcu_state.gp_start suffice to forestall false positives.
532 */
533 gs1 = READ_ONCE(rcu_state.gp_seq);
534 smp_rmb(); /* Pick up ->gp_seq first... */
535 js = READ_ONCE(rcu_state.jiffies_stall);
536 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
537 gps = READ_ONCE(rcu_state.gp_start);
538 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
539 gs2 = READ_ONCE(rcu_state.gp_seq);
540 if (gs1 != gs2 ||
541 ULONG_CMP_LT(j, js) ||
542 ULONG_CMP_GE(gps, js))
543 return; /* No stall or GP completed since entering function. */
544 rnp = rdp->mynode;
545 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
546 if (rcu_gp_in_progress() &&
547 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
548 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
549
550 /* We haven't checked in, so go dump stack. */
551 print_cpu_stall();
552 if (rcu_cpu_stall_ftrace_dump)
553 rcu_ftrace_dump(DUMP_ALL);
554
555 } else if (rcu_gp_in_progress() &&
556 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
557 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
558
559 /* They had a few time units to dump stack, so complain. */
560 print_other_cpu_stall(gs2);
561 if (rcu_cpu_stall_ftrace_dump)
562 rcu_ftrace_dump(DUMP_ALL);
563 }
564 }
565
566 //////////////////////////////////////////////////////////////////////////////
567 //
568 // RCU forward-progress mechanisms, including of callback invocation.
569
570
571 /*
572 * Show the state of the grace-period kthreads.
573 */
574 void show_rcu_gp_kthreads(void)
575 {
576 int cpu;
577 unsigned long j;
578 unsigned long ja;
579 unsigned long jr;
580 unsigned long jw;
581 struct rcu_data *rdp;
582 struct rcu_node *rnp;
583
584 j = jiffies;
585 ja = j - READ_ONCE(rcu_state.gp_activity);
586 jr = j - READ_ONCE(rcu_state.gp_req_activity);
587 jw = j - READ_ONCE(rcu_state.gp_wake_time);
588 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
589 rcu_state.name, gp_state_getname(rcu_state.gp_state),
590 rcu_state.gp_state,
591 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
592 ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
593 (long)READ_ONCE(rcu_state.gp_seq),
594 (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
595 READ_ONCE(rcu_state.gp_flags));
596 rcu_for_each_node_breadth_first(rnp) {
597 if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
598 continue;
599 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
600 rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
601 (long)rnp->gp_seq_needed);
602 if (!rcu_is_leaf_node(rnp))
603 continue;
604 for_each_leaf_node_possible_cpu(rnp, cpu) {
605 rdp = per_cpu_ptr(&rcu_data, cpu);
606 if (rdp->gpwrap ||
607 ULONG_CMP_GE(rcu_state.gp_seq,
608 rdp->gp_seq_needed))
609 continue;
610 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
611 cpu, (long)rdp->gp_seq_needed);
612 }
613 }
614 for_each_possible_cpu(cpu) {
615 rdp = per_cpu_ptr(&rcu_data, cpu);
616 if (rcu_segcblist_is_offloaded(&rdp->cblist))
617 show_rcu_nocb_state(rdp);
618 }
619 /* sched_show_task(rcu_state.gp_kthread); */
620 }
621 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
622
623 /*
624 * This function checks for grace-period requests that fail to motivate
625 * RCU to come out of its idle mode.
626 */
627 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
628 const unsigned long gpssdelay)
629 {
630 unsigned long flags;
631 unsigned long j;
632 struct rcu_node *rnp_root = rcu_get_root();
633 static atomic_t warned = ATOMIC_INIT(0);
634
635 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
636 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
637 return;
638 j = jiffies; /* Expensive access, and in common case don't get here. */
639 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
640 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
641 atomic_read(&warned))
642 return;
643
644 raw_spin_lock_irqsave_rcu_node(rnp, flags);
645 j = jiffies;
646 if (rcu_gp_in_progress() ||
647 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
648 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
649 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
650 atomic_read(&warned)) {
651 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
652 return;
653 }
654 /* Hold onto the leaf lock to make others see warned==1. */
655
656 if (rnp_root != rnp)
657 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
658 j = jiffies;
659 if (rcu_gp_in_progress() ||
660 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
661 time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
662 time_before(j, rcu_state.gp_activity + gpssdelay) ||
663 atomic_xchg(&warned, 1)) {
664 if (rnp_root != rnp)
665 /* irqs remain disabled. */
666 raw_spin_unlock_rcu_node(rnp_root);
667 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
668 return;
669 }
670 WARN_ON(1);
671 if (rnp_root != rnp)
672 raw_spin_unlock_rcu_node(rnp_root);
673 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
674 show_rcu_gp_kthreads();
675 }
676
677 /*
678 * Do a forward-progress check for rcutorture. This is normally invoked
679 * due to an OOM event. The argument "j" gives the time period during
680 * which rcutorture would like progress to have been made.
681 */
682 void rcu_fwd_progress_check(unsigned long j)
683 {
684 unsigned long cbs;
685 int cpu;
686 unsigned long max_cbs = 0;
687 int max_cpu = -1;
688 struct rcu_data *rdp;
689
690 if (rcu_gp_in_progress()) {
691 pr_info("%s: GP age %lu jiffies\n",
692 __func__, jiffies - rcu_state.gp_start);
693 show_rcu_gp_kthreads();
694 } else {
695 pr_info("%s: Last GP end %lu jiffies ago\n",
696 __func__, jiffies - rcu_state.gp_end);
697 preempt_disable();
698 rdp = this_cpu_ptr(&rcu_data);
699 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
700 preempt_enable();
701 }
702 for_each_possible_cpu(cpu) {
703 cbs = rcu_get_n_cbs_cpu(cpu);
704 if (!cbs)
705 continue;
706 if (max_cpu < 0)
707 pr_info("%s: callbacks", __func__);
708 pr_cont(" %d: %lu", cpu, cbs);
709 if (cbs <= max_cbs)
710 continue;
711 max_cbs = cbs;
712 max_cpu = cpu;
713 }
714 if (max_cpu >= 0)
715 pr_cont("\n");
716 }
717 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
718
719 /* Commandeer a sysrq key to dump RCU's tree. */
720 static bool sysrq_rcu;
721 module_param(sysrq_rcu, bool, 0444);
722
723 /* Dump grace-period-request information due to commandeered sysrq. */
724 static void sysrq_show_rcu(int key)
725 {
726 show_rcu_gp_kthreads();
727 }
728
729 static struct sysrq_key_op sysrq_rcudump_op = {
730 .handler = sysrq_show_rcu,
731 .help_msg = "show-rcu(y)",
732 .action_msg = "Show RCU tree",
733 .enable_mask = SYSRQ_ENABLE_DUMP,
734 };
735
736 static int __init rcu_sysrq_init(void)
737 {
738 if (sysrq_rcu)
739 return register_sysrq_key('y', &sysrq_rcudump_op);
740 return 0;
741 }
742 early_initcall(rcu_sysrq_init);