]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/sched/debug.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/egtvedt...
[mirror_ubuntu-jammy-kernel.git] / kernel / sched / debug.c
CommitLineData
43ae34cb 1/*
391e43da 2 * kernel/sched/debug.c
43ae34cb
IM
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
b32e86b4 18#include <linux/mempolicy.h>
43ae34cb 19
029632fb
PZ
20#include "sched.h"
21
efe25c2c
BR
22static DEFINE_SPINLOCK(sched_debug_lock);
23
43ae34cb
IM
24/*
25 * This allows printing both to /proc/sched_debug and
26 * to the console
27 */
28#define SEQ_printf(m, x...) \
29 do { \
30 if (m) \
31 seq_printf(m, x); \
32 else \
33 printk(x); \
34 } while (0)
35
ef83a571
IM
36/*
37 * Ease the printing of nsec fields:
38 */
90b2628f 39static long long nsec_high(unsigned long long nsec)
ef83a571 40{
90b2628f 41 if ((long long)nsec < 0) {
ef83a571
IM
42 nsec = -nsec;
43 do_div(nsec, 1000000);
44 return -nsec;
45 }
46 do_div(nsec, 1000000);
47
48 return nsec;
49}
50
90b2628f 51static unsigned long nsec_low(unsigned long long nsec)
ef83a571 52{
90b2628f 53 if ((long long)nsec < 0)
ef83a571
IM
54 nsec = -nsec;
55
56 return do_div(nsec, 1000000);
57}
58
59#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60
ff9b48c3 61#ifdef CONFIG_FAIR_GROUP_SCHED
5091faa4 62static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
ff9b48c3
BR
63{
64 struct sched_entity *se = tg->se[cpu];
ff9b48c3
BR
65
66#define P(F) \
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
68#define PN(F) \
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70
18bf2805
BS
71 if (!se) {
72 struct sched_avg *avg = &cpu_rq(cpu)->avg;
73 P(avg->runnable_avg_sum);
74 P(avg->runnable_avg_period);
75 return;
76 }
77
78
ff9b48c3
BR
79 PN(se->exec_start);
80 PN(se->vruntime);
81 PN(se->sum_exec_runtime);
82#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
83 PN(se->statistics.wait_start);
84 PN(se->statistics.sleep_start);
85 PN(se->statistics.block_start);
86 PN(se->statistics.sleep_max);
87 PN(se->statistics.block_max);
88 PN(se->statistics.exec_max);
89 PN(se->statistics.slice_max);
90 PN(se->statistics.wait_max);
91 PN(se->statistics.wait_sum);
92 P(se->statistics.wait_count);
ff9b48c3
BR
93#endif
94 P(se->load.weight);
9d85f21c
PT
95#ifdef CONFIG_SMP
96 P(se->avg.runnable_avg_sum);
97 P(se->avg.runnable_avg_period);
2dac754e 98 P(se->avg.load_avg_contrib);
9ee474f5 99 P(se->avg.decay_count);
9d85f21c 100#endif
ff9b48c3
BR
101#undef PN
102#undef P
103}
104#endif
105
efe25c2c
BR
106#ifdef CONFIG_CGROUP_SCHED
107static char group_path[PATH_MAX];
108
109static char *task_group_path(struct task_group *tg)
110{
8ecedd7a
BR
111 if (autogroup_path(tg, group_path, PATH_MAX))
112 return group_path;
113
efe25c2c
BR
114 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
115 return group_path;
116}
117#endif
118
43ae34cb 119static void
a48da48b 120print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb
IM
121{
122 if (rq->curr == p)
123 SEQ_printf(m, "R");
124 else
125 SEQ_printf(m, " ");
126
ef83a571 127 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
fc840914 128 p->comm, task_pid_nr(p),
ef83a571 129 SPLIT_NS(p->se.vruntime),
43ae34cb 130 (long long)(p->nvcsw + p->nivcsw),
6f605d83 131 p->prio);
6cfb0d5d 132#ifdef CONFIG_SCHEDSTATS
d19ca308 133 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
ef83a571
IM
134 SPLIT_NS(p->se.vruntime),
135 SPLIT_NS(p->se.sum_exec_runtime),
41acab88 136 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
6cfb0d5d 137#else
d19ca308 138 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
ef83a571 139 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
6cfb0d5d 140#endif
b32e86b4 141#ifdef CONFIG_NUMA_BALANCING
de1b301a 142 SEQ_printf(m, " %d", task_node(p));
b32e86b4 143#endif
efe25c2c
BR
144#ifdef CONFIG_CGROUP_SCHED
145 SEQ_printf(m, " %s", task_group_path(task_group(p)));
146#endif
d19ca308 147
d19ca308 148 SEQ_printf(m, "\n");
43ae34cb
IM
149}
150
a48da48b 151static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb
IM
152{
153 struct task_struct *g, *p;
ab63a633 154 unsigned long flags;
43ae34cb
IM
155
156 SEQ_printf(m,
157 "\nrunnable tasks:\n"
c86da3a3
MG
158 " task PID tree-key switches prio"
159 " exec-runtime sum-exec sum-sleep\n"
1a75b94f 160 "------------------------------------------------------"
c86da3a3 161 "----------------------------------------------------\n");
43ae34cb 162
ab63a633 163 read_lock_irqsave(&tasklist_lock, flags);
43ae34cb
IM
164
165 do_each_thread(g, p) {
b32e86b4 166 if (task_cpu(p) != rq_cpu)
43ae34cb
IM
167 continue;
168
a48da48b 169 print_task(m, rq, p);
43ae34cb
IM
170 } while_each_thread(g, p);
171
ab63a633 172 read_unlock_irqrestore(&tasklist_lock, flags);
43ae34cb
IM
173}
174
5cef9eca 175void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb 176{
86d9560c
IM
177 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
178 spread, rq0_min_vruntime, spread0;
348ec61e 179 struct rq *rq = cpu_rq(cpu);
67e12eac
IM
180 struct sched_entity *last;
181 unsigned long flags;
182
efe25c2c
BR
183#ifdef CONFIG_FAIR_GROUP_SCHED
184 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
185#else
ada18de2 186 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
efe25c2c 187#endif
ef83a571
IM
188 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
189 SPLIT_NS(cfs_rq->exec_clock));
67e12eac 190
05fa785c 191 raw_spin_lock_irqsave(&rq->lock, flags);
67e12eac 192 if (cfs_rq->rb_leftmost)
ac53db59 193 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
67e12eac
IM
194 last = __pick_last_entity(cfs_rq);
195 if (last)
196 max_vruntime = last->vruntime;
5ac5c4d6 197 min_vruntime = cfs_rq->min_vruntime;
348ec61e 198 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
05fa785c 199 raw_spin_unlock_irqrestore(&rq->lock, flags);
ef83a571
IM
200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
201 SPLIT_NS(MIN_vruntime));
202 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
203 SPLIT_NS(min_vruntime));
204 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
205 SPLIT_NS(max_vruntime));
67e12eac 206 spread = max_vruntime - MIN_vruntime;
ef83a571
IM
207 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
208 SPLIT_NS(spread));
86d9560c 209 spread0 = min_vruntime - rq0_min_vruntime;
ef83a571
IM
210 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
211 SPLIT_NS(spread0));
5ac5c4d6 212 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
ddc97297 213 cfs_rq->nr_spread_over);
c82513e5 214 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
2069dd75 215 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
c09595f6 216#ifdef CONFIG_SMP
72a4cf20 217 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
2dac754e 218 cfs_rq->runnable_load_avg);
72a4cf20 219 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
9ee474f5 220 cfs_rq->blocked_load_avg);
333bb864 221#ifdef CONFIG_FAIR_GROUP_SCHED
bf5b986e 222 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
c566e8e9 223 cfs_rq->tg_load_contrib);
bb17f655
PT
224 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
225 cfs_rq->tg_runnable_contrib);
333bb864
AS
226 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
227 atomic_long_read(&cfs_rq->tg->load_avg));
bb17f655
PT
228 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
229 atomic_read(&cfs_rq->tg->runnable_avg));
c09595f6 230#endif
333bb864 231#endif
f9f9ffc2
BS
232#ifdef CONFIG_CFS_BANDWIDTH
233 SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
234 cfs_rq->tg->cfs_bandwidth.timer_active);
235 SEQ_printf(m, " .%-30s: %d\n", "throttled",
236 cfs_rq->throttled);
237 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
238 cfs_rq->throttle_count);
239#endif
2069dd75 240
333bb864 241#ifdef CONFIG_FAIR_GROUP_SCHED
ff9b48c3 242 print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f6 243#endif
43ae34cb
IM
244}
245
ada18de2
PZ
246void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
247{
efe25c2c
BR
248#ifdef CONFIG_RT_GROUP_SCHED
249 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
250#else
ada18de2 251 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
efe25c2c 252#endif
ada18de2
PZ
253
254#define P(x) \
255 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
256#define PN(x) \
257 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
258
259 P(rt_nr_running);
260 P(rt_throttled);
261 PN(rt_time);
262 PN(rt_runtime);
263
264#undef PN
265#undef P
266}
267
5bb6b1ea
PZ
268extern __read_mostly int sched_clock_running;
269
a48da48b 270static void print_cpu(struct seq_file *m, int cpu)
43ae34cb 271{
348ec61e 272 struct rq *rq = cpu_rq(cpu);
efe25c2c 273 unsigned long flags;
43ae34cb
IM
274
275#ifdef CONFIG_X86
276 {
277 unsigned int freq = cpu_khz ? : 1;
278
bbbfeac9 279 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
43ae34cb
IM
280 cpu, freq / 1000, (freq % 1000));
281 }
282#else
bbbfeac9 283 SEQ_printf(m, "cpu#%d\n", cpu);
43ae34cb
IM
284#endif
285
13e099d2
PZ
286#define P(x) \
287do { \
288 if (sizeof(rq->x) == 4) \
289 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
290 else \
291 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
292} while (0)
293
ef83a571
IM
294#define PN(x) \
295 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
43ae34cb
IM
296
297 P(nr_running);
298 SEQ_printf(m, " .%-30s: %lu\n", "load",
495eca49 299 rq->load.weight);
43ae34cb
IM
300 P(nr_switches);
301 P(nr_load_updates);
302 P(nr_uninterruptible);
ef83a571 303 PN(next_balance);
fc840914 304 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
ef83a571 305 PN(clock);
43ae34cb
IM
306 P(cpu_load[0]);
307 P(cpu_load[1]);
308 P(cpu_load[2]);
309 P(cpu_load[3]);
310 P(cpu_load[4]);
311#undef P
ef83a571 312#undef PN
43ae34cb 313
5ac5c4d6
PZ
314#ifdef CONFIG_SCHEDSTATS
315#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
1b9508f6 316#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
5ac5c4d6 317
5ac5c4d6
PZ
318 P(yld_count);
319
5ac5c4d6
PZ
320 P(sched_count);
321 P(sched_goidle);
1b9508f6
MG
322#ifdef CONFIG_SMP
323 P64(avg_idle);
37e6bae8 324 P64(max_idle_balance_cost);
1b9508f6 325#endif
5ac5c4d6
PZ
326
327 P(ttwu_count);
328 P(ttwu_local);
329
5ac5c4d6 330#undef P
fce20979 331#undef P64
5ac5c4d6 332#endif
efe25c2c 333 spin_lock_irqsave(&sched_debug_lock, flags);
5cef9eca 334 print_cfs_stats(m, cpu);
ada18de2 335 print_rt_stats(m, cpu);
43ae34cb 336
efe25c2c 337 rcu_read_lock();
a48da48b 338 print_rq(m, rq, cpu);
efe25c2c
BR
339 rcu_read_unlock();
340 spin_unlock_irqrestore(&sched_debug_lock, flags);
bbbfeac9 341 SEQ_printf(m, "\n");
43ae34cb
IM
342}
343
1983a922
CE
344static const char *sched_tunable_scaling_names[] = {
345 "none",
346 "logaritmic",
347 "linear"
348};
349
bbbfeac9 350static void sched_debug_header(struct seq_file *m)
43ae34cb 351{
5bb6b1ea
PZ
352 u64 ktime, sched_clk, cpu_clk;
353 unsigned long flags;
43ae34cb 354
5bb6b1ea
PZ
355 local_irq_save(flags);
356 ktime = ktime_to_ns(ktime_get());
357 sched_clk = sched_clock();
358 cpu_clk = local_clock();
359 local_irq_restore(flags);
360
b32e86b4 361 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
43ae34cb
IM
362 init_utsname()->release,
363 (int)strcspn(init_utsname()->version, " "),
364 init_utsname()->version);
365
5bb6b1ea
PZ
366#define P(x) \
367 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
368#define PN(x) \
369 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
370 PN(ktime);
371 PN(sched_clk);
372 PN(cpu_clk);
373 P(jiffies);
374#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e6 375 P(sched_clock_stable());
5bb6b1ea
PZ
376#endif
377#undef PN
378#undef P
379
380 SEQ_printf(m, "\n");
381 SEQ_printf(m, "sysctl_sched\n");
43ae34cb 382
1aa4731e 383#define P(x) \
d822cece 384 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
1aa4731e 385#define PN(x) \
d822cece 386 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1aa4731e 387 PN(sysctl_sched_latency);
b2be5e96 388 PN(sysctl_sched_min_granularity);
1aa4731e 389 PN(sysctl_sched_wakeup_granularity);
eebef746 390 P(sysctl_sched_child_runs_first);
1aa4731e
IM
391 P(sysctl_sched_features);
392#undef PN
393#undef P
394
bbbfeac9
NZ
395 SEQ_printf(m, " .%-40s: %d (%s)\n",
396 "sysctl_sched_tunable_scaling",
1983a922
CE
397 sysctl_sched_tunable_scaling,
398 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
bbbfeac9
NZ
399 SEQ_printf(m, "\n");
400}
1983a922 401
bbbfeac9
NZ
402static int sched_debug_show(struct seq_file *m, void *v)
403{
404 int cpu = (unsigned long)(v - 2);
43ae34cb 405
bbbfeac9
NZ
406 if (cpu != -1)
407 print_cpu(m, cpu);
408 else
409 sched_debug_header(m);
43ae34cb
IM
410
411 return 0;
412}
413
029632fb 414void sysrq_sched_debug_show(void)
43ae34cb 415{
bbbfeac9
NZ
416 int cpu;
417
418 sched_debug_header(NULL);
419 for_each_online_cpu(cpu)
420 print_cpu(NULL, cpu);
421
422}
423
424/*
425 * This itererator needs some explanation.
426 * It returns 1 for the header position.
427 * This means 2 is cpu 0.
428 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
429 * to use cpumask_* to iterate over the cpus.
430 */
431static void *sched_debug_start(struct seq_file *file, loff_t *offset)
432{
433 unsigned long n = *offset;
434
435 if (n == 0)
436 return (void *) 1;
437
438 n--;
439
440 if (n > 0)
441 n = cpumask_next(n - 1, cpu_online_mask);
442 else
443 n = cpumask_first(cpu_online_mask);
444
445 *offset = n + 1;
446
447 if (n < nr_cpu_ids)
448 return (void *)(unsigned long)(n + 2);
449 return NULL;
450}
451
452static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
453{
454 (*offset)++;
455 return sched_debug_start(file, offset);
456}
457
458static void sched_debug_stop(struct seq_file *file, void *data)
459{
460}
461
462static const struct seq_operations sched_debug_sops = {
463 .start = sched_debug_start,
464 .next = sched_debug_next,
465 .stop = sched_debug_stop,
466 .show = sched_debug_show,
467};
468
469static int sched_debug_release(struct inode *inode, struct file *file)
470{
471 seq_release(inode, file);
472
473 return 0;
43ae34cb
IM
474}
475
476static int sched_debug_open(struct inode *inode, struct file *filp)
477{
bbbfeac9
NZ
478 int ret = 0;
479
480 ret = seq_open(filp, &sched_debug_sops);
481
482 return ret;
43ae34cb
IM
483}
484
0dbee3a6 485static const struct file_operations sched_debug_fops = {
43ae34cb
IM
486 .open = sched_debug_open,
487 .read = seq_read,
488 .llseek = seq_lseek,
bbbfeac9 489 .release = sched_debug_release,
43ae34cb
IM
490};
491
492static int __init init_sched_debug_procfs(void)
493{
494 struct proc_dir_entry *pe;
495
a9cf4ddb 496 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
43ae34cb
IM
497 if (!pe)
498 return -ENOMEM;
43ae34cb
IM
499 return 0;
500}
501
502__initcall(init_sched_debug_procfs);
503
b32e86b4
IM
504#define __P(F) \
505 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
506#define P(F) \
507 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
508#define __PN(F) \
509 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
510#define PN(F) \
511 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
512
513
514static void sched_show_numa(struct task_struct *p, struct seq_file *m)
515{
516#ifdef CONFIG_NUMA_BALANCING
517 struct mempolicy *pol;
518 int node, i;
519
520 if (p->mm)
521 P(mm->numa_scan_seq);
522
523 task_lock(p);
524 pol = p->mempolicy;
525 if (pol && !(pol->flags & MPOL_F_MORON))
526 pol = NULL;
527 mpol_get(pol);
528 task_unlock(p);
529
530 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
531
532 for_each_online_node(node) {
533 for (i = 0; i < 2; i++) {
534 unsigned long nr_faults = -1;
535 int cpu_current, home_node;
536
ff1df896
RR
537 if (p->numa_faults_memory)
538 nr_faults = p->numa_faults_memory[2*node + i];
b32e86b4
IM
539
540 cpu_current = !i ? (task_node(p) == node) :
541 (pol && node_isset(node, pol->v.nodes));
542
543 home_node = (p->numa_preferred_nid == node);
544
ff1df896 545 SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
b32e86b4
IM
546 i, node, cpu_current, home_node, nr_faults);
547 }
548 }
549
550 mpol_put(pol);
551#endif
552}
553
43ae34cb
IM
554void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
555{
cc367732 556 unsigned long nr_switches;
43ae34cb 557
fc840914 558 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
5089a976 559 get_nr_threads(p));
2d92f227 560 SEQ_printf(m,
add332a1
KB
561 "---------------------------------------------------------"
562 "----------\n");
cc367732 563#define __P(F) \
add332a1 564 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
43ae34cb 565#define P(F) \
add332a1 566 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
cc367732 567#define __PN(F) \
add332a1 568 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
ef83a571 569#define PN(F) \
add332a1 570 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
43ae34cb 571
ef83a571
IM
572 PN(se.exec_start);
573 PN(se.vruntime);
574 PN(se.sum_exec_runtime);
6cfb0d5d 575
cc367732
IM
576 nr_switches = p->nvcsw + p->nivcsw;
577
6cfb0d5d 578#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
579 PN(se.statistics.wait_start);
580 PN(se.statistics.sleep_start);
581 PN(se.statistics.block_start);
582 PN(se.statistics.sleep_max);
583 PN(se.statistics.block_max);
584 PN(se.statistics.exec_max);
585 PN(se.statistics.slice_max);
586 PN(se.statistics.wait_max);
587 PN(se.statistics.wait_sum);
588 P(se.statistics.wait_count);
589 PN(se.statistics.iowait_sum);
590 P(se.statistics.iowait_count);
cc367732 591 P(se.nr_migrations);
41acab88
LDM
592 P(se.statistics.nr_migrations_cold);
593 P(se.statistics.nr_failed_migrations_affine);
594 P(se.statistics.nr_failed_migrations_running);
595 P(se.statistics.nr_failed_migrations_hot);
596 P(se.statistics.nr_forced_migrations);
597 P(se.statistics.nr_wakeups);
598 P(se.statistics.nr_wakeups_sync);
599 P(se.statistics.nr_wakeups_migrate);
600 P(se.statistics.nr_wakeups_local);
601 P(se.statistics.nr_wakeups_remote);
602 P(se.statistics.nr_wakeups_affine);
603 P(se.statistics.nr_wakeups_affine_attempts);
604 P(se.statistics.nr_wakeups_passive);
605 P(se.statistics.nr_wakeups_idle);
cc367732
IM
606
607 {
608 u64 avg_atom, avg_per_cpu;
609
610 avg_atom = p->se.sum_exec_runtime;
611 if (nr_switches)
612 do_div(avg_atom, nr_switches);
613 else
614 avg_atom = -1LL;
615
616 avg_per_cpu = p->se.sum_exec_runtime;
c1a89740 617 if (p->se.nr_migrations) {
6f6d6a1a
RZ
618 avg_per_cpu = div64_u64(avg_per_cpu,
619 p->se.nr_migrations);
c1a89740 620 } else {
cc367732 621 avg_per_cpu = -1LL;
c1a89740 622 }
cc367732
IM
623
624 __PN(avg_atom);
625 __PN(avg_per_cpu);
626 }
6cfb0d5d 627#endif
cc367732 628 __P(nr_switches);
add332a1 629 SEQ_printf(m, "%-45s:%21Ld\n",
cc367732 630 "nr_voluntary_switches", (long long)p->nvcsw);
add332a1 631 SEQ_printf(m, "%-45s:%21Ld\n",
cc367732
IM
632 "nr_involuntary_switches", (long long)p->nivcsw);
633
43ae34cb 634 P(se.load.weight);
333bb864 635#ifdef CONFIG_SMP
939fd731
KB
636 P(se.avg.runnable_avg_sum);
637 P(se.avg.runnable_avg_period);
638 P(se.avg.load_avg_contrib);
639 P(se.avg.decay_count);
640#endif
43ae34cb
IM
641 P(policy);
642 P(prio);
ef83a571 643#undef PN
cc367732
IM
644#undef __PN
645#undef P
646#undef __P
43ae34cb
IM
647
648 {
29d7b90c 649 unsigned int this_cpu = raw_smp_processor_id();
43ae34cb
IM
650 u64 t0, t1;
651
29d7b90c
IM
652 t0 = cpu_clock(this_cpu);
653 t1 = cpu_clock(this_cpu);
add332a1 654 SEQ_printf(m, "%-45s:%21Ld\n",
43ae34cb
IM
655 "clock-delta", (long long)(t1-t0));
656 }
b32e86b4
IM
657
658 sched_show_numa(p, m);
43ae34cb
IM
659}
660
661void proc_sched_set_task(struct task_struct *p)
662{
6cfb0d5d 663#ifdef CONFIG_SCHEDSTATS
41acab88 664 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 665#endif
43ae34cb 666}