]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/sched/debug.c
Merge tag 'spi-fix-v5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[mirror_ubuntu-hirsute-kernel.git] / kernel / sched / debug.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
43ae34cb 2/*
391e43da 3 * kernel/sched/debug.c
43ae34cb 4 *
325ea10c 5 * Print the CFS rbtree and other debugging details
43ae34cb
IM
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
43ae34cb 8 */
029632fb
PZ
9#include "sched.h"
10
efe25c2c
BR
11static DEFINE_SPINLOCK(sched_debug_lock);
12
43ae34cb
IM
13/*
14 * This allows printing both to /proc/sched_debug and
15 * to the console
16 */
17#define SEQ_printf(m, x...) \
18 do { \
19 if (m) \
20 seq_printf(m, x); \
21 else \
a8c024cd 22 pr_cont(x); \
43ae34cb
IM
23 } while (0)
24
ef83a571
IM
25/*
26 * Ease the printing of nsec fields:
27 */
90b2628f 28static long long nsec_high(unsigned long long nsec)
ef83a571 29{
90b2628f 30 if ((long long)nsec < 0) {
ef83a571
IM
31 nsec = -nsec;
32 do_div(nsec, 1000000);
33 return -nsec;
34 }
35 do_div(nsec, 1000000);
36
37 return nsec;
38}
39
90b2628f 40static unsigned long nsec_low(unsigned long long nsec)
ef83a571 41{
90b2628f 42 if ((long long)nsec < 0)
ef83a571
IM
43 nsec = -nsec;
44
45 return do_div(nsec, 1000000);
46}
47
48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
d6ca41d7
SRRH
50#define SCHED_FEAT(name, enabled) \
51 #name ,
52
53static const char * const sched_feat_names[] = {
54#include "features.h"
55};
56
57#undef SCHED_FEAT
58
59static int sched_feat_show(struct seq_file *m, void *v)
60{
61 int i;
62
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
65 seq_puts(m, "NO_");
66 seq_printf(m, "%s ", sched_feat_names[i]);
67 }
68 seq_puts(m, "\n");
69
70 return 0;
71}
72
e9666d10 73#ifdef CONFIG_JUMP_LABEL
d6ca41d7
SRRH
74
75#define jump_label_key__true STATIC_KEY_INIT_TRUE
76#define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78#define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
80
81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82#include "features.h"
83};
84
85#undef SCHED_FEAT
86
87static void sched_feat_disable(int i)
88{
e73e8197 89 static_key_disable_cpuslocked(&sched_feat_keys[i]);
d6ca41d7
SRRH
90}
91
92static void sched_feat_enable(int i)
93{
e73e8197 94 static_key_enable_cpuslocked(&sched_feat_keys[i]);
d6ca41d7
SRRH
95}
96#else
97static void sched_feat_disable(int i) { };
98static void sched_feat_enable(int i) { };
e9666d10 99#endif /* CONFIG_JUMP_LABEL */
d6ca41d7
SRRH
100
101static int sched_feat_set(char *cmp)
102{
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
8f894bf4
YX
111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
d6ca41d7
SRRH
121 }
122
8f894bf4 123 return 0;
d6ca41d7
SRRH
124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129{
130 char buf[64];
131 char *cmp;
8f894bf4 132 int ret;
d6ca41d7
SRRH
133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
e73e8197 146 cpus_read_lock();
d6ca41d7 147 inode_lock(inode);
8f894bf4 148 ret = sched_feat_set(cmp);
d6ca41d7 149 inode_unlock(inode);
e73e8197 150 cpus_read_unlock();
8f894bf4
YX
151 if (ret < 0)
152 return ret;
d6ca41d7
SRRH
153
154 *ppos += cnt;
155
156 return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161 return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
9469eb01
PZ
172__read_mostly bool sched_debug_enabled;
173
d6ca41d7
SRRH
174static __init int sched_init_debug(void)
175{
176 debugfs_create_file("sched_features", 0644, NULL, NULL,
177 &sched_feat_fops);
178
9469eb01
PZ
179 debugfs_create_bool("sched_debug", 0644, NULL,
180 &sched_debug_enabled);
181
d6ca41d7
SRRH
182 return 0;
183}
184late_initcall(sched_init_debug);
185
3866e845
SRRH
186#ifdef CONFIG_SMP
187
188#ifdef CONFIG_SYSCTL
189
190static struct ctl_table sd_ctl_dir[] = {
191 {
192 .procname = "sched_domain",
193 .mode = 0555,
194 },
195 {}
196};
197
198static struct ctl_table sd_ctl_root[] = {
199 {
200 .procname = "kernel",
201 .mode = 0555,
202 .child = sd_ctl_dir,
203 },
204 {}
205};
206
207static struct ctl_table *sd_alloc_ctl_entry(int n)
208{
209 struct ctl_table *entry =
210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212 return entry;
213}
214
215static void sd_free_ctl_entry(struct ctl_table **tablep)
216{
217 struct ctl_table *entry;
218
219 /*
220 * In the intermediate directories, both the child directory and
221 * procname are dynamically allocated and could fail but the mode
222 * will always be set. In the lowest directory the names are
223 * static strings and all have proc handlers.
224 */
225 for (entry = *tablep; entry->mode; entry++) {
226 if (entry->child)
227 sd_free_ctl_entry(&entry->child);
228 if (entry->proc_handler == NULL)
229 kfree(entry->procname);
230 }
231
232 kfree(*tablep);
233 *tablep = NULL;
234}
235
3866e845
SRRH
236static void
237set_table_entry(struct ctl_table *entry,
238 const char *procname, void *data, int maxlen,
3d8d5355 239 umode_t mode, proc_handler *proc_handler)
3866e845
SRRH
240{
241 entry->procname = procname;
242 entry->data = data;
243 entry->maxlen = maxlen;
244 entry->mode = mode;
245 entry->proc_handler = proc_handler;
3866e845
SRRH
246}
247
248static struct ctl_table *
249sd_alloc_ctl_domain_table(struct sched_domain *sd)
250{
0e1fef63 251 struct ctl_table *table = sd_alloc_ctl_entry(9);
3866e845
SRRH
252
253 if (table == NULL)
254 return NULL;
255
0e1fef63
DE
256 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
257 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
258 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
259 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
260 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
9818427c 261 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, proc_dointvec_minmax);
0e1fef63
DE
262 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
263 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
264 /* &table[8] is terminator */
3866e845
SRRH
265
266 return table;
267}
268
269static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
270{
271 struct ctl_table *entry, *table;
272 struct sched_domain *sd;
273 int domain_num = 0, i;
274 char buf[32];
275
276 for_each_domain(cpu, sd)
277 domain_num++;
278 entry = table = sd_alloc_ctl_entry(domain_num + 1);
279 if (table == NULL)
280 return NULL;
281
282 i = 0;
283 for_each_domain(cpu, sd) {
284 snprintf(buf, 32, "domain%d", i);
285 entry->procname = kstrdup(buf, GFP_KERNEL);
286 entry->mode = 0555;
287 entry->child = sd_alloc_ctl_domain_table(sd);
288 entry++;
289 i++;
290 }
291 return table;
292}
293
97fb7a0a
IM
294static cpumask_var_t sd_sysctl_cpus;
295static struct ctl_table_header *sd_sysctl_header;
bbdacdfe 296
3866e845
SRRH
297void register_sched_domain_sysctl(void)
298{
bbdacdfe
PZ
299 static struct ctl_table *cpu_entries;
300 static struct ctl_table **cpu_idx;
1ca4fa3a 301 static bool init_done = false;
3866e845 302 char buf[32];
bbdacdfe 303 int i;
3866e845 304
bbdacdfe
PZ
305 if (!cpu_entries) {
306 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
307 if (!cpu_entries)
308 return;
3866e845 309
bbdacdfe
PZ
310 WARN_ON(sd_ctl_dir[0].child);
311 sd_ctl_dir[0].child = cpu_entries;
312 }
3866e845 313
bbdacdfe
PZ
314 if (!cpu_idx) {
315 struct ctl_table *e = cpu_entries;
316
317 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
318 if (!cpu_idx)
319 return;
320
321 /* deal with sparse possible map */
322 for_each_possible_cpu(i) {
323 cpu_idx[i] = e;
324 e++;
325 }
326 }
327
328 if (!cpumask_available(sd_sysctl_cpus)) {
329 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
330 return;
1ca4fa3a 331 }
bbdacdfe 332
1ca4fa3a
HS
333 if (!init_done) {
334 init_done = true;
bbdacdfe
PZ
335 /* init to possible to not have holes in @cpu_entries */
336 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
337 }
338
339 for_each_cpu(i, sd_sysctl_cpus) {
340 struct ctl_table *e = cpu_idx[i];
341
342 if (e->child)
343 sd_free_ctl_entry(&e->child);
344
345 if (!e->procname) {
346 snprintf(buf, 32, "cpu%d", i);
347 e->procname = kstrdup(buf, GFP_KERNEL);
348 }
349 e->mode = 0555;
350 e->child = sd_alloc_ctl_cpu_table(i);
351
352 __cpumask_clear_cpu(i, sd_sysctl_cpus);
3866e845
SRRH
353 }
354
355 WARN_ON(sd_sysctl_header);
356 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
357}
358
bbdacdfe
PZ
359void dirty_sched_domain_sysctl(int cpu)
360{
361 if (cpumask_available(sd_sysctl_cpus))
362 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
363}
364
3866e845
SRRH
365/* may be called multiple times per register */
366void unregister_sched_domain_sysctl(void)
367{
368 unregister_sysctl_table(sd_sysctl_header);
369 sd_sysctl_header = NULL;
3866e845
SRRH
370}
371#endif /* CONFIG_SYSCTL */
372#endif /* CONFIG_SMP */
373
ff9b48c3 374#ifdef CONFIG_FAIR_GROUP_SCHED
5091faa4 375static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
ff9b48c3
BR
376{
377 struct sched_entity *se = tg->se[cpu];
ff9b48c3 378
97fb7a0a
IM
379#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
380#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
381#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
382#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
ff9b48c3 383
cd126afe 384 if (!se)
18bf2805 385 return;
18bf2805 386
ff9b48c3
BR
387 PN(se->exec_start);
388 PN(se->vruntime);
389 PN(se->sum_exec_runtime);
97fb7a0a 390
cb251765 391 if (schedstat_enabled()) {
4fa8d299
JP
392 PN_SCHEDSTAT(se->statistics.wait_start);
393 PN_SCHEDSTAT(se->statistics.sleep_start);
394 PN_SCHEDSTAT(se->statistics.block_start);
395 PN_SCHEDSTAT(se->statistics.sleep_max);
396 PN_SCHEDSTAT(se->statistics.block_max);
397 PN_SCHEDSTAT(se->statistics.exec_max);
398 PN_SCHEDSTAT(se->statistics.slice_max);
399 PN_SCHEDSTAT(se->statistics.wait_max);
400 PN_SCHEDSTAT(se->statistics.wait_sum);
401 P_SCHEDSTAT(se->statistics.wait_count);
cb251765 402 }
97fb7a0a 403
ff9b48c3 404 P(se->load.weight);
9d85f21c 405#ifdef CONFIG_SMP
9d89c257
YD
406 P(se->avg.load_avg);
407 P(se->avg.util_avg);
9f683953 408 P(se->avg.runnable_avg);
9d85f21c 409#endif
4fa8d299
JP
410
411#undef PN_SCHEDSTAT
ff9b48c3 412#undef PN
4fa8d299 413#undef P_SCHEDSTAT
ff9b48c3
BR
414#undef P
415}
416#endif
417
efe25c2c
BR
418#ifdef CONFIG_CGROUP_SCHED
419static char group_path[PATH_MAX];
420
421static char *task_group_path(struct task_group *tg)
422{
8ecedd7a
BR
423 if (autogroup_path(tg, group_path, PATH_MAX))
424 return group_path;
425
4c737b41 426 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
97fb7a0a 427
4c737b41 428 return group_path;
efe25c2c
BR
429}
430#endif
431
43ae34cb 432static void
a48da48b 433print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb 434{
20435d84 435 if (rq->curr == p)
e8c16495 436 SEQ_printf(m, ">R");
20435d84
XX
437 else
438 SEQ_printf(m, " %c", task_state_to_char(p));
43ae34cb 439
f080d93e 440 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
fc840914 441 p->comm, task_pid_nr(p),
ef83a571 442 SPLIT_NS(p->se.vruntime),
43ae34cb 443 (long long)(p->nvcsw + p->nivcsw),
6f605d83 444 p->prio);
9c572591 445
33d6176e 446 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
20e1d486 447 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
33d6176e 448 SPLIT_NS(p->se.sum_exec_runtime),
20e1d486 449 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
9c572591 450
b32e86b4 451#ifdef CONFIG_NUMA_BALANCING
e3d24d0a 452 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
b32e86b4 453#endif
efe25c2c
BR
454#ifdef CONFIG_CGROUP_SCHED
455 SEQ_printf(m, " %s", task_group_path(task_group(p)));
456#endif
d19ca308 457
d19ca308 458 SEQ_printf(m, "\n");
43ae34cb
IM
459}
460
a48da48b 461static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb
IM
462{
463 struct task_struct *g, *p;
464
e9ca2670
JL
465 SEQ_printf(m, "\n");
466 SEQ_printf(m, "runnable tasks:\n");
f080d93e 467 SEQ_printf(m, " S task PID tree-key switches prio"
e9ca2670
JL
468 " wait-time sum-exec sum-sleep\n");
469 SEQ_printf(m, "-------------------------------------------------------"
f080d93e 470 "------------------------------------------------------\n");
43ae34cb 471
5bd96ab6 472 rcu_read_lock();
d38e83c7 473 for_each_process_thread(g, p) {
b32e86b4 474 if (task_cpu(p) != rq_cpu)
43ae34cb
IM
475 continue;
476
a48da48b 477 print_task(m, rq, p);
d38e83c7 478 }
5bd96ab6 479 rcu_read_unlock();
43ae34cb
IM
480}
481
5cef9eca 482void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb 483{
86d9560c
IM
484 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
485 spread, rq0_min_vruntime, spread0;
348ec61e 486 struct rq *rq = cpu_rq(cpu);
67e12eac
IM
487 struct sched_entity *last;
488 unsigned long flags;
489
efe25c2c 490#ifdef CONFIG_FAIR_GROUP_SCHED
e9ca2670
JL
491 SEQ_printf(m, "\n");
492 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
efe25c2c 493#else
e9ca2670
JL
494 SEQ_printf(m, "\n");
495 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
efe25c2c 496#endif
ef83a571
IM
497 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
498 SPLIT_NS(cfs_rq->exec_clock));
67e12eac 499
05fa785c 500 raw_spin_lock_irqsave(&rq->lock, flags);
bfb06889 501 if (rb_first_cached(&cfs_rq->tasks_timeline))
ac53db59 502 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
67e12eac
IM
503 last = __pick_last_entity(cfs_rq);
504 if (last)
505 max_vruntime = last->vruntime;
5ac5c4d6 506 min_vruntime = cfs_rq->min_vruntime;
348ec61e 507 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
05fa785c 508 raw_spin_unlock_irqrestore(&rq->lock, flags);
ef83a571
IM
509 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
510 SPLIT_NS(MIN_vruntime));
511 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
512 SPLIT_NS(min_vruntime));
513 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
514 SPLIT_NS(max_vruntime));
67e12eac 515 spread = max_vruntime - MIN_vruntime;
ef83a571
IM
516 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
517 SPLIT_NS(spread));
86d9560c 518 spread0 = min_vruntime - rq0_min_vruntime;
ef83a571
IM
519 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
520 SPLIT_NS(spread0));
5ac5c4d6 521 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
ddc97297 522 cfs_rq->nr_spread_over);
c82513e5 523 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
2069dd75 524 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
c09595f6 525#ifdef CONFIG_SMP
9d89c257
YD
526 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
527 cfs_rq->avg.load_avg);
9f683953
VG
528 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
529 cfs_rq->avg.runnable_avg);
9d89c257
YD
530 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
531 cfs_rq->avg.util_avg);
7f65ea42
PB
532 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
533 cfs_rq->avg.util_est.enqueued);
2a2f5d4e
PZ
534 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
535 cfs_rq->removed.load_avg);
536 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
537 cfs_rq->removed.util_avg);
9f683953
VG
538 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
539 cfs_rq->removed.runnable_avg);
333bb864 540#ifdef CONFIG_FAIR_GROUP_SCHED
9d89c257
YD
541 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
542 cfs_rq->tg_load_avg_contrib);
333bb864
AS
543 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
544 atomic_long_read(&cfs_rq->tg->load_avg));
c09595f6 545#endif
333bb864 546#endif
f9f9ffc2 547#ifdef CONFIG_CFS_BANDWIDTH
f9f9ffc2
BS
548 SEQ_printf(m, " .%-30s: %d\n", "throttled",
549 cfs_rq->throttled);
550 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
551 cfs_rq->throttle_count);
552#endif
2069dd75 553
333bb864 554#ifdef CONFIG_FAIR_GROUP_SCHED
ff9b48c3 555 print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f6 556#endif
43ae34cb
IM
557}
558
ada18de2
PZ
559void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
560{
efe25c2c 561#ifdef CONFIG_RT_GROUP_SCHED
e9ca2670
JL
562 SEQ_printf(m, "\n");
563 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
efe25c2c 564#else
e9ca2670
JL
565 SEQ_printf(m, "\n");
566 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
efe25c2c 567#endif
ada18de2
PZ
568
569#define P(x) \
570 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
48365b38
DBO
571#define PU(x) \
572 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
ada18de2
PZ
573#define PN(x) \
574 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
575
48365b38
DBO
576 PU(rt_nr_running);
577#ifdef CONFIG_SMP
578 PU(rt_nr_migratory);
579#endif
ada18de2
PZ
580 P(rt_throttled);
581 PN(rt_time);
582 PN(rt_runtime);
583
584#undef PN
48365b38 585#undef PU
ada18de2
PZ
586#undef P
587}
588
acb32132
WL
589void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
590{
ef477183
SRRH
591 struct dl_bw *dl_bw;
592
e9ca2670
JL
593 SEQ_printf(m, "\n");
594 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
48365b38
DBO
595
596#define PU(x) \
597 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
598
599 PU(dl_nr_running);
ef477183 600#ifdef CONFIG_SMP
48365b38 601 PU(dl_nr_migratory);
ef477183
SRRH
602 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
603#else
604 dl_bw = &dl_rq->dl_bw;
605#endif
606 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
607 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
48365b38
DBO
608
609#undef PU
acb32132
WL
610}
611
a48da48b 612static void print_cpu(struct seq_file *m, int cpu)
43ae34cb 613{
348ec61e 614 struct rq *rq = cpu_rq(cpu);
efe25c2c 615 unsigned long flags;
43ae34cb
IM
616
617#ifdef CONFIG_X86
618 {
619 unsigned int freq = cpu_khz ? : 1;
620
bbbfeac9 621 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
43ae34cb
IM
622 cpu, freq / 1000, (freq % 1000));
623 }
624#else
bbbfeac9 625 SEQ_printf(m, "cpu#%d\n", cpu);
43ae34cb
IM
626#endif
627
13e099d2
PZ
628#define P(x) \
629do { \
630 if (sizeof(rq->x) == 4) \
631 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
632 else \
633 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
634} while (0)
635
ef83a571
IM
636#define PN(x) \
637 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
43ae34cb
IM
638
639 P(nr_running);
43ae34cb 640 P(nr_switches);
43ae34cb 641 P(nr_uninterruptible);
ef83a571 642 PN(next_balance);
fc840914 643 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
ef83a571 644 PN(clock);
5a537597 645 PN(clock_task);
43ae34cb 646#undef P
ef83a571 647#undef PN
43ae34cb 648
1b9508f6 649#ifdef CONFIG_SMP
db6ea2fb 650#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
1b9508f6 651 P64(avg_idle);
37e6bae8 652 P64(max_idle_balance_cost);
db6ea2fb 653#undef P64
1b9508f6 654#endif
5ac5c4d6 655
4fa8d299 656#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
cb251765
MG
657 if (schedstat_enabled()) {
658 P(yld_count);
659 P(sched_count);
660 P(sched_goidle);
661 P(ttwu_count);
662 P(ttwu_local);
663 }
5ac5c4d6 664#undef P
4fa8d299 665
efe25c2c 666 spin_lock_irqsave(&sched_debug_lock, flags);
5cef9eca 667 print_cfs_stats(m, cpu);
ada18de2 668 print_rt_stats(m, cpu);
acb32132 669 print_dl_stats(m, cpu);
43ae34cb 670
a48da48b 671 print_rq(m, rq, cpu);
efe25c2c 672 spin_unlock_irqrestore(&sched_debug_lock, flags);
bbbfeac9 673 SEQ_printf(m, "\n");
43ae34cb
IM
674}
675
1983a922
CE
676static const char *sched_tunable_scaling_names[] = {
677 "none",
ad2e379d 678 "logarithmic",
1983a922
CE
679 "linear"
680};
681
bbbfeac9 682static void sched_debug_header(struct seq_file *m)
43ae34cb 683{
5bb6b1ea
PZ
684 u64 ktime, sched_clk, cpu_clk;
685 unsigned long flags;
43ae34cb 686
5bb6b1ea
PZ
687 local_irq_save(flags);
688 ktime = ktime_to_ns(ktime_get());
689 sched_clk = sched_clock();
690 cpu_clk = local_clock();
691 local_irq_restore(flags);
692
b32e86b4 693 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
43ae34cb
IM
694 init_utsname()->release,
695 (int)strcspn(init_utsname()->version, " "),
696 init_utsname()->version);
697
5bb6b1ea
PZ
698#define P(x) \
699 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
700#define PN(x) \
701 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
702 PN(ktime);
703 PN(sched_clk);
704 PN(cpu_clk);
705 P(jiffies);
706#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e6 707 P(sched_clock_stable());
5bb6b1ea
PZ
708#endif
709#undef PN
710#undef P
711
712 SEQ_printf(m, "\n");
713 SEQ_printf(m, "sysctl_sched\n");
43ae34cb 714
1aa4731e 715#define P(x) \
d822cece 716 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
1aa4731e 717#define PN(x) \
d822cece 718 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1aa4731e 719 PN(sysctl_sched_latency);
b2be5e96 720 PN(sysctl_sched_min_granularity);
1aa4731e 721 PN(sysctl_sched_wakeup_granularity);
eebef746 722 P(sysctl_sched_child_runs_first);
1aa4731e
IM
723 P(sysctl_sched_features);
724#undef PN
725#undef P
726
bbbfeac9
NZ
727 SEQ_printf(m, " .%-40s: %d (%s)\n",
728 "sysctl_sched_tunable_scaling",
1983a922
CE
729 sysctl_sched_tunable_scaling,
730 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
bbbfeac9
NZ
731 SEQ_printf(m, "\n");
732}
1983a922 733
bbbfeac9
NZ
734static int sched_debug_show(struct seq_file *m, void *v)
735{
736 int cpu = (unsigned long)(v - 2);
43ae34cb 737
bbbfeac9
NZ
738 if (cpu != -1)
739 print_cpu(m, cpu);
740 else
741 sched_debug_header(m);
43ae34cb
IM
742
743 return 0;
744}
745
029632fb 746void sysrq_sched_debug_show(void)
43ae34cb 747{
bbbfeac9
NZ
748 int cpu;
749
750 sched_debug_header(NULL);
02d4ac58
WL
751 for_each_online_cpu(cpu) {
752 /*
753 * Need to reset softlockup watchdogs on all CPUs, because
754 * another CPU might be blocked waiting for us to process
755 * an IPI or stop_machine.
756 */
757 touch_nmi_watchdog();
758 touch_all_softlockup_watchdogs();
bbbfeac9 759 print_cpu(NULL, cpu);
02d4ac58 760 }
bbbfeac9
NZ
761}
762
763/*
764 * This itererator needs some explanation.
765 * It returns 1 for the header position.
97fb7a0a
IM
766 * This means 2 is CPU 0.
767 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
768 * to use cpumask_* to iterate over the CPUs.
bbbfeac9
NZ
769 */
770static void *sched_debug_start(struct seq_file *file, loff_t *offset)
771{
772 unsigned long n = *offset;
773
774 if (n == 0)
775 return (void *) 1;
776
777 n--;
778
779 if (n > 0)
780 n = cpumask_next(n - 1, cpu_online_mask);
781 else
782 n = cpumask_first(cpu_online_mask);
783
784 *offset = n + 1;
785
786 if (n < nr_cpu_ids)
787 return (void *)(unsigned long)(n + 2);
97fb7a0a 788
bbbfeac9
NZ
789 return NULL;
790}
791
792static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
793{
794 (*offset)++;
795 return sched_debug_start(file, offset);
796}
797
798static void sched_debug_stop(struct seq_file *file, void *data)
799{
800}
801
802static const struct seq_operations sched_debug_sops = {
97fb7a0a
IM
803 .start = sched_debug_start,
804 .next = sched_debug_next,
805 .stop = sched_debug_stop,
806 .show = sched_debug_show,
bbbfeac9
NZ
807};
808
43ae34cb
IM
809static int __init init_sched_debug_procfs(void)
810{
fddda2b7 811 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
43ae34cb 812 return -ENOMEM;
43ae34cb
IM
813 return 0;
814}
815
816__initcall(init_sched_debug_procfs);
817
9e3bf946
VS
818#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
819#define __P(F) __PS(#F, F)
820#define P(F) __PS(#F, p->F)
821#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
822#define __PN(F) __PSN(#F, F)
823#define PN(F) __PSN(#F, p->F)
b32e86b4
IM
824
825
397f2378
SD
826#ifdef CONFIG_NUMA_BALANCING
827void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
828 unsigned long tpf, unsigned long gsf, unsigned long gpf)
829{
830 SEQ_printf(m, "numa_faults node=%d ", node);
67d9f6c2
SD
831 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
832 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
397f2378
SD
833}
834#endif
835
836
b32e86b4
IM
837static void sched_show_numa(struct task_struct *p, struct seq_file *m)
838{
839#ifdef CONFIG_NUMA_BALANCING
840 struct mempolicy *pol;
b32e86b4
IM
841
842 if (p->mm)
843 P(mm->numa_scan_seq);
844
845 task_lock(p);
846 pol = p->mempolicy;
847 if (pol && !(pol->flags & MPOL_F_MORON))
848 pol = NULL;
849 mpol_get(pol);
850 task_unlock(p);
851
397f2378
SD
852 P(numa_pages_migrated);
853 P(numa_preferred_nid);
854 P(total_numa_faults);
855 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
856 task_node(p), task_numa_group_id(p));
857 show_numa_stats(p, m);
b32e86b4
IM
858 mpol_put(pol);
859#endif
860}
861
74dc3384
AS
862void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
863 struct seq_file *m)
43ae34cb 864{
cc367732 865 unsigned long nr_switches;
43ae34cb 866
74dc3384 867 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
5089a976 868 get_nr_threads(p));
2d92f227 869 SEQ_printf(m,
add332a1
KB
870 "---------------------------------------------------------"
871 "----------\n");
9e3bf946
VS
872
873#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
874#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
43ae34cb 875
ef83a571
IM
876 PN(se.exec_start);
877 PN(se.vruntime);
878 PN(se.sum_exec_runtime);
6cfb0d5d 879
cc367732
IM
880 nr_switches = p->nvcsw + p->nivcsw;
881
cc367732 882 P(se.nr_migrations);
cc367732 883
cb251765 884 if (schedstat_enabled()) {
cc367732
IM
885 u64 avg_atom, avg_per_cpu;
886
4fa8d299
JP
887 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
888 PN_SCHEDSTAT(se.statistics.wait_start);
889 PN_SCHEDSTAT(se.statistics.sleep_start);
890 PN_SCHEDSTAT(se.statistics.block_start);
891 PN_SCHEDSTAT(se.statistics.sleep_max);
892 PN_SCHEDSTAT(se.statistics.block_max);
893 PN_SCHEDSTAT(se.statistics.exec_max);
894 PN_SCHEDSTAT(se.statistics.slice_max);
895 PN_SCHEDSTAT(se.statistics.wait_max);
896 PN_SCHEDSTAT(se.statistics.wait_sum);
897 P_SCHEDSTAT(se.statistics.wait_count);
898 PN_SCHEDSTAT(se.statistics.iowait_sum);
899 P_SCHEDSTAT(se.statistics.iowait_count);
900 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
901 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
902 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
903 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
904 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
905 P_SCHEDSTAT(se.statistics.nr_wakeups);
906 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
907 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
908 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
909 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
910 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
911 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
912 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
913 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
cb251765 914
cc367732
IM
915 avg_atom = p->se.sum_exec_runtime;
916 if (nr_switches)
b0ab99e7 917 avg_atom = div64_ul(avg_atom, nr_switches);
cc367732
IM
918 else
919 avg_atom = -1LL;
920
921 avg_per_cpu = p->se.sum_exec_runtime;
c1a89740 922 if (p->se.nr_migrations) {
6f6d6a1a
RZ
923 avg_per_cpu = div64_u64(avg_per_cpu,
924 p->se.nr_migrations);
c1a89740 925 } else {
cc367732 926 avg_per_cpu = -1LL;
c1a89740 927 }
cc367732
IM
928
929 __PN(avg_atom);
930 __PN(avg_per_cpu);
931 }
4fa8d299 932
cc367732 933 __P(nr_switches);
9e3bf946
VS
934 __PS("nr_voluntary_switches", p->nvcsw);
935 __PS("nr_involuntary_switches", p->nivcsw);
cc367732 936
43ae34cb 937 P(se.load.weight);
333bb864 938#ifdef CONFIG_SMP
9d89c257 939 P(se.avg.load_sum);
9f683953 940 P(se.avg.runnable_sum);
9d89c257
YD
941 P(se.avg.util_sum);
942 P(se.avg.load_avg);
9f683953 943 P(se.avg.runnable_avg);
9d89c257
YD
944 P(se.avg.util_avg);
945 P(se.avg.last_update_time);
7f65ea42
PB
946 P(se.avg.util_est.ewma);
947 P(se.avg.util_est.enqueued);
96e74ebf
VS
948#endif
949#ifdef CONFIG_UCLAMP_TASK
ad32bb41
PK
950 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
951 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
96e74ebf
VS
952 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
953 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
939fd731 954#endif
43ae34cb
IM
955 P(policy);
956 P(prio);
1da1843f 957 if (task_has_dl_policy(p)) {
59f8c298
TC
958 P(dl.runtime);
959 P(dl.deadline);
960 }
4fa8d299 961#undef PN_SCHEDSTAT
4fa8d299 962#undef P_SCHEDSTAT
43ae34cb
IM
963
964 {
29d7b90c 965 unsigned int this_cpu = raw_smp_processor_id();
43ae34cb
IM
966 u64 t0, t1;
967
29d7b90c
IM
968 t0 = cpu_clock(this_cpu);
969 t1 = cpu_clock(this_cpu);
9e3bf946 970 __PS("clock-delta", t1-t0);
43ae34cb 971 }
b32e86b4
IM
972
973 sched_show_numa(p, m);
43ae34cb
IM
974}
975
976void proc_sched_set_task(struct task_struct *p)
977{
6cfb0d5d 978#ifdef CONFIG_SCHEDSTATS
41acab88 979 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 980#endif
43ae34cb 981}