4 * Print the CFS rbtree and other debugging details
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 static DEFINE_SPINLOCK(sched_debug_lock
);
17 * This allows printing both to /proc/sched_debug and
20 #define SEQ_printf(m, x...) \
29 * Ease the printing of nsec fields:
31 static long long nsec_high(unsigned long long nsec
)
33 if ((long long)nsec
< 0) {
35 do_div(nsec
, 1000000);
38 do_div(nsec
, 1000000);
43 static unsigned long nsec_low(unsigned long long nsec
)
45 if ((long long)nsec
< 0)
48 return do_div(nsec
, 1000000);
51 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
53 #define SCHED_FEAT(name, enabled) \
56 static const char * const sched_feat_names
[] = {
62 static int sched_feat_show(struct seq_file
*m
, void *v
)
66 for (i
= 0; i
< __SCHED_FEAT_NR
; i
++) {
67 if (!(sysctl_sched_features
& (1UL << i
)))
69 seq_printf(m
, "%s ", sched_feat_names
[i
]);
76 #ifdef CONFIG_JUMP_LABEL
78 #define jump_label_key__true STATIC_KEY_INIT_TRUE
79 #define jump_label_key__false STATIC_KEY_INIT_FALSE
81 #define SCHED_FEAT(name, enabled) \
82 jump_label_key__##enabled ,
84 struct static_key sched_feat_keys
[__SCHED_FEAT_NR
] = {
90 static void sched_feat_disable(int i
)
92 static_key_disable_cpuslocked(&sched_feat_keys
[i
]);
95 static void sched_feat_enable(int i
)
97 static_key_enable_cpuslocked(&sched_feat_keys
[i
]);
100 static void sched_feat_disable(int i
) { };
101 static void sched_feat_enable(int i
) { };
102 #endif /* CONFIG_JUMP_LABEL */
104 static int sched_feat_set(char *cmp
)
109 if (strncmp(cmp
, "NO_", 3) == 0) {
114 i
= match_string(sched_feat_names
, __SCHED_FEAT_NR
, cmp
);
119 sysctl_sched_features
&= ~(1UL << i
);
120 sched_feat_disable(i
);
122 sysctl_sched_features
|= (1UL << i
);
123 sched_feat_enable(i
);
130 sched_feat_write(struct file
*filp
, const char __user
*ubuf
,
131 size_t cnt
, loff_t
*ppos
)
141 if (copy_from_user(&buf
, ubuf
, cnt
))
147 /* Ensure the static_key remains in a consistent state */
148 inode
= file_inode(filp
);
151 ret
= sched_feat_set(cmp
);
162 static int sched_feat_open(struct inode
*inode
, struct file
*filp
)
164 return single_open(filp
, sched_feat_show
, NULL
);
167 static const struct file_operations sched_feat_fops
= {
168 .open
= sched_feat_open
,
169 .write
= sched_feat_write
,
172 .release
= single_release
,
175 __read_mostly
bool sched_debug_enabled
;
177 static __init
int sched_init_debug(void)
179 debugfs_create_file("sched_features", 0644, NULL
, NULL
,
182 debugfs_create_bool("sched_debug", 0644, NULL
,
183 &sched_debug_enabled
);
187 late_initcall(sched_init_debug
);
193 static struct ctl_table sd_ctl_dir
[] = {
195 .procname
= "sched_domain",
201 static struct ctl_table sd_ctl_root
[] = {
203 .procname
= "kernel",
210 static struct ctl_table
*sd_alloc_ctl_entry(int n
)
212 struct ctl_table
*entry
=
213 kcalloc(n
, sizeof(struct ctl_table
), GFP_KERNEL
);
218 static void sd_free_ctl_entry(struct ctl_table
**tablep
)
220 struct ctl_table
*entry
;
223 * In the intermediate directories, both the child directory and
224 * procname are dynamically allocated and could fail but the mode
225 * will always be set. In the lowest directory the names are
226 * static strings and all have proc handlers.
228 for (entry
= *tablep
; entry
->mode
; entry
++) {
230 sd_free_ctl_entry(&entry
->child
);
231 if (entry
->proc_handler
== NULL
)
232 kfree(entry
->procname
);
239 static int min_load_idx
= 0;
240 static int max_load_idx
= CPU_LOAD_IDX_MAX
-1;
243 set_table_entry(struct ctl_table
*entry
,
244 const char *procname
, void *data
, int maxlen
,
245 umode_t mode
, proc_handler
*proc_handler
,
248 entry
->procname
= procname
;
250 entry
->maxlen
= maxlen
;
252 entry
->proc_handler
= proc_handler
;
255 entry
->extra1
= &min_load_idx
;
256 entry
->extra2
= &max_load_idx
;
260 static struct ctl_table
*
261 sd_alloc_ctl_domain_table(struct sched_domain
*sd
)
263 struct ctl_table
*table
= sd_alloc_ctl_entry(14);
268 set_table_entry(&table
[0] , "min_interval", &sd
->min_interval
, sizeof(long), 0644, proc_doulongvec_minmax
, false);
269 set_table_entry(&table
[1] , "max_interval", &sd
->max_interval
, sizeof(long), 0644, proc_doulongvec_minmax
, false);
270 set_table_entry(&table
[2] , "busy_idx", &sd
->busy_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
271 set_table_entry(&table
[3] , "idle_idx", &sd
->idle_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
272 set_table_entry(&table
[4] , "newidle_idx", &sd
->newidle_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
273 set_table_entry(&table
[5] , "wake_idx", &sd
->wake_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
274 set_table_entry(&table
[6] , "forkexec_idx", &sd
->forkexec_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
275 set_table_entry(&table
[7] , "busy_factor", &sd
->busy_factor
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
276 set_table_entry(&table
[8] , "imbalance_pct", &sd
->imbalance_pct
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
277 set_table_entry(&table
[9] , "cache_nice_tries", &sd
->cache_nice_tries
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
278 set_table_entry(&table
[10], "flags", &sd
->flags
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
279 set_table_entry(&table
[11], "max_newidle_lb_cost", &sd
->max_newidle_lb_cost
, sizeof(long), 0644, proc_doulongvec_minmax
, false);
280 set_table_entry(&table
[12], "name", sd
->name
, CORENAME_MAX_SIZE
, 0444, proc_dostring
, false);
281 /* &table[13] is terminator */
286 static struct ctl_table
*sd_alloc_ctl_cpu_table(int cpu
)
288 struct ctl_table
*entry
, *table
;
289 struct sched_domain
*sd
;
290 int domain_num
= 0, i
;
293 for_each_domain(cpu
, sd
)
295 entry
= table
= sd_alloc_ctl_entry(domain_num
+ 1);
300 for_each_domain(cpu
, sd
) {
301 snprintf(buf
, 32, "domain%d", i
);
302 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
304 entry
->child
= sd_alloc_ctl_domain_table(sd
);
311 static cpumask_var_t sd_sysctl_cpus
;
312 static struct ctl_table_header
*sd_sysctl_header
;
314 void register_sched_domain_sysctl(void)
316 static struct ctl_table
*cpu_entries
;
317 static struct ctl_table
**cpu_idx
;
318 static bool init_done
= false;
323 cpu_entries
= sd_alloc_ctl_entry(num_possible_cpus() + 1);
327 WARN_ON(sd_ctl_dir
[0].child
);
328 sd_ctl_dir
[0].child
= cpu_entries
;
332 struct ctl_table
*e
= cpu_entries
;
334 cpu_idx
= kcalloc(nr_cpu_ids
, sizeof(struct ctl_table
*), GFP_KERNEL
);
338 /* deal with sparse possible map */
339 for_each_possible_cpu(i
) {
345 if (!cpumask_available(sd_sysctl_cpus
)) {
346 if (!alloc_cpumask_var(&sd_sysctl_cpus
, GFP_KERNEL
))
352 /* init to possible to not have holes in @cpu_entries */
353 cpumask_copy(sd_sysctl_cpus
, cpu_possible_mask
);
356 for_each_cpu(i
, sd_sysctl_cpus
) {
357 struct ctl_table
*e
= cpu_idx
[i
];
360 sd_free_ctl_entry(&e
->child
);
363 snprintf(buf
, 32, "cpu%d", i
);
364 e
->procname
= kstrdup(buf
, GFP_KERNEL
);
367 e
->child
= sd_alloc_ctl_cpu_table(i
);
369 __cpumask_clear_cpu(i
, sd_sysctl_cpus
);
372 WARN_ON(sd_sysctl_header
);
373 sd_sysctl_header
= register_sysctl_table(sd_ctl_root
);
376 void dirty_sched_domain_sysctl(int cpu
)
378 if (cpumask_available(sd_sysctl_cpus
))
379 __cpumask_set_cpu(cpu
, sd_sysctl_cpus
);
382 /* may be called multiple times per register */
383 void unregister_sched_domain_sysctl(void)
385 unregister_sysctl_table(sd_sysctl_header
);
386 sd_sysctl_header
= NULL
;
388 #endif /* CONFIG_SYSCTL */
389 #endif /* CONFIG_SMP */
391 #ifdef CONFIG_FAIR_GROUP_SCHED
392 static void print_cfs_group_stats(struct seq_file
*m
, int cpu
, struct task_group
*tg
)
394 struct sched_entity
*se
= tg
->se
[cpu
];
396 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
397 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
398 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
399 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
406 PN(se
->sum_exec_runtime
);
408 if (schedstat_enabled()) {
409 PN_SCHEDSTAT(se
->statistics
.wait_start
);
410 PN_SCHEDSTAT(se
->statistics
.sleep_start
);
411 PN_SCHEDSTAT(se
->statistics
.block_start
);
412 PN_SCHEDSTAT(se
->statistics
.sleep_max
);
413 PN_SCHEDSTAT(se
->statistics
.block_max
);
414 PN_SCHEDSTAT(se
->statistics
.exec_max
);
415 PN_SCHEDSTAT(se
->statistics
.slice_max
);
416 PN_SCHEDSTAT(se
->statistics
.wait_max
);
417 PN_SCHEDSTAT(se
->statistics
.wait_sum
);
418 P_SCHEDSTAT(se
->statistics
.wait_count
);
422 P(se
->runnable_weight
);
426 P(se
->avg
.runnable_load_avg
);
436 #ifdef CONFIG_CGROUP_SCHED
437 static char group_path
[PATH_MAX
];
439 static char *task_group_path(struct task_group
*tg
)
441 if (autogroup_path(tg
, group_path
, PATH_MAX
))
444 cgroup_path(tg
->css
.cgroup
, group_path
, PATH_MAX
);
451 print_task(struct seq_file
*m
, struct rq
*rq
, struct task_struct
*p
)
456 SEQ_printf(m
, " %c", task_state_to_char(p
));
458 SEQ_printf(m
, "%15s %5d %9Ld.%06ld %9Ld %5d ",
459 p
->comm
, task_pid_nr(p
),
460 SPLIT_NS(p
->se
.vruntime
),
461 (long long)(p
->nvcsw
+ p
->nivcsw
),
464 SEQ_printf(m
, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
465 SPLIT_NS(schedstat_val_or_zero(p
->se
.statistics
.wait_sum
)),
466 SPLIT_NS(p
->se
.sum_exec_runtime
),
467 SPLIT_NS(schedstat_val_or_zero(p
->se
.statistics
.sum_sleep_runtime
)));
469 #ifdef CONFIG_NUMA_BALANCING
470 SEQ_printf(m
, " %d %d", task_node(p
), task_numa_group_id(p
));
472 #ifdef CONFIG_CGROUP_SCHED
473 SEQ_printf(m
, " %s", task_group_path(task_group(p
)));
479 static void print_rq(struct seq_file
*m
, struct rq
*rq
, int rq_cpu
)
481 struct task_struct
*g
, *p
;
484 SEQ_printf(m
, "runnable tasks:\n");
485 SEQ_printf(m
, " S task PID tree-key switches prio"
486 " wait-time sum-exec sum-sleep\n");
487 SEQ_printf(m
, "-------------------------------------------------------"
488 "----------------------------------------------------\n");
491 for_each_process_thread(g
, p
) {
492 if (task_cpu(p
) != rq_cpu
)
495 print_task(m
, rq
, p
);
500 void print_cfs_rq(struct seq_file
*m
, int cpu
, struct cfs_rq
*cfs_rq
)
502 s64 MIN_vruntime
= -1, min_vruntime
, max_vruntime
= -1,
503 spread
, rq0_min_vruntime
, spread0
;
504 struct rq
*rq
= cpu_rq(cpu
);
505 struct sched_entity
*last
;
508 #ifdef CONFIG_FAIR_GROUP_SCHED
510 SEQ_printf(m
, "cfs_rq[%d]:%s\n", cpu
, task_group_path(cfs_rq
->tg
));
513 SEQ_printf(m
, "cfs_rq[%d]:\n", cpu
);
515 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "exec_clock",
516 SPLIT_NS(cfs_rq
->exec_clock
));
518 raw_spin_lock_irqsave(&rq
->lock
, flags
);
519 if (rb_first_cached(&cfs_rq
->tasks_timeline
))
520 MIN_vruntime
= (__pick_first_entity(cfs_rq
))->vruntime
;
521 last
= __pick_last_entity(cfs_rq
);
523 max_vruntime
= last
->vruntime
;
524 min_vruntime
= cfs_rq
->min_vruntime
;
525 rq0_min_vruntime
= cpu_rq(0)->cfs
.min_vruntime
;
526 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
527 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
528 SPLIT_NS(MIN_vruntime
));
529 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "min_vruntime",
530 SPLIT_NS(min_vruntime
));
531 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "max_vruntime",
532 SPLIT_NS(max_vruntime
));
533 spread
= max_vruntime
- MIN_vruntime
;
534 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread",
536 spread0
= min_vruntime
- rq0_min_vruntime
;
537 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread0",
539 SEQ_printf(m
, " .%-30s: %d\n", "nr_spread_over",
540 cfs_rq
->nr_spread_over
);
541 SEQ_printf(m
, " .%-30s: %d\n", "nr_running", cfs_rq
->nr_running
);
542 SEQ_printf(m
, " .%-30s: %ld\n", "load", cfs_rq
->load
.weight
);
544 SEQ_printf(m
, " .%-30s: %ld\n", "runnable_weight", cfs_rq
->runnable_weight
);
545 SEQ_printf(m
, " .%-30s: %lu\n", "load_avg",
546 cfs_rq
->avg
.load_avg
);
547 SEQ_printf(m
, " .%-30s: %lu\n", "runnable_load_avg",
548 cfs_rq
->avg
.runnable_load_avg
);
549 SEQ_printf(m
, " .%-30s: %lu\n", "util_avg",
550 cfs_rq
->avg
.util_avg
);
551 SEQ_printf(m
, " .%-30s: %u\n", "util_est_enqueued",
552 cfs_rq
->avg
.util_est
.enqueued
);
553 SEQ_printf(m
, " .%-30s: %ld\n", "removed.load_avg",
554 cfs_rq
->removed
.load_avg
);
555 SEQ_printf(m
, " .%-30s: %ld\n", "removed.util_avg",
556 cfs_rq
->removed
.util_avg
);
557 SEQ_printf(m
, " .%-30s: %ld\n", "removed.runnable_sum",
558 cfs_rq
->removed
.runnable_sum
);
559 #ifdef CONFIG_FAIR_GROUP_SCHED
560 SEQ_printf(m
, " .%-30s: %lu\n", "tg_load_avg_contrib",
561 cfs_rq
->tg_load_avg_contrib
);
562 SEQ_printf(m
, " .%-30s: %ld\n", "tg_load_avg",
563 atomic_long_read(&cfs_rq
->tg
->load_avg
));
566 #ifdef CONFIG_CFS_BANDWIDTH
567 SEQ_printf(m
, " .%-30s: %d\n", "throttled",
569 SEQ_printf(m
, " .%-30s: %d\n", "throttle_count",
570 cfs_rq
->throttle_count
);
573 #ifdef CONFIG_FAIR_GROUP_SCHED
574 print_cfs_group_stats(m
, cpu
, cfs_rq
->tg
);
578 void print_rt_rq(struct seq_file
*m
, int cpu
, struct rt_rq
*rt_rq
)
580 #ifdef CONFIG_RT_GROUP_SCHED
582 SEQ_printf(m
, "rt_rq[%d]:%s\n", cpu
, task_group_path(rt_rq
->tg
));
585 SEQ_printf(m
, "rt_rq[%d]:\n", cpu
);
589 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
591 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
593 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
608 void print_dl_rq(struct seq_file
*m
, int cpu
, struct dl_rq
*dl_rq
)
613 SEQ_printf(m
, "dl_rq[%d]:\n", cpu
);
616 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
621 dl_bw
= &cpu_rq(cpu
)->rd
->dl_bw
;
623 dl_bw
= &dl_rq
->dl_bw
;
625 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->bw", dl_bw
->bw
);
626 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw
->total_bw
);
631 static void print_cpu(struct seq_file
*m
, int cpu
)
633 struct rq
*rq
= cpu_rq(cpu
);
638 unsigned int freq
= cpu_khz
? : 1;
640 SEQ_printf(m
, "cpu#%d, %u.%03u MHz\n",
641 cpu
, freq
/ 1000, (freq
% 1000));
644 SEQ_printf(m
, "cpu#%d\n", cpu
);
649 if (sizeof(rq->x) == 4) \
650 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
652 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
656 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
659 SEQ_printf(m
, " .%-30s: %lu\n", "load",
663 P(nr_uninterruptible
);
665 SEQ_printf(m
, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq
->curr
)));
677 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
679 P64(max_idle_balance_cost
);
683 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
684 if (schedstat_enabled()) {
693 spin_lock_irqsave(&sched_debug_lock
, flags
);
694 print_cfs_stats(m
, cpu
);
695 print_rt_stats(m
, cpu
);
696 print_dl_stats(m
, cpu
);
698 print_rq(m
, rq
, cpu
);
699 spin_unlock_irqrestore(&sched_debug_lock
, flags
);
703 static const char *sched_tunable_scaling_names
[] = {
709 static void sched_debug_header(struct seq_file
*m
)
711 u64 ktime
, sched_clk
, cpu_clk
;
714 local_irq_save(flags
);
715 ktime
= ktime_to_ns(ktime_get());
716 sched_clk
= sched_clock();
717 cpu_clk
= local_clock();
718 local_irq_restore(flags
);
720 SEQ_printf(m
, "Sched Debug Version: v0.11, %s %.*s\n",
721 init_utsname()->release
,
722 (int)strcspn(init_utsname()->version
, " "),
723 init_utsname()->version
);
726 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
728 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
733 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
734 P(sched_clock_stable());
740 SEQ_printf(m
, "sysctl_sched\n");
743 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
745 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
746 PN(sysctl_sched_latency
);
747 PN(sysctl_sched_min_granularity
);
748 PN(sysctl_sched_wakeup_granularity
);
749 P(sysctl_sched_child_runs_first
);
750 P(sysctl_sched_features
);
754 SEQ_printf(m
, " .%-40s: %d (%s)\n",
755 "sysctl_sched_tunable_scaling",
756 sysctl_sched_tunable_scaling
,
757 sched_tunable_scaling_names
[sysctl_sched_tunable_scaling
]);
761 static int sched_debug_show(struct seq_file
*m
, void *v
)
763 int cpu
= (unsigned long)(v
- 2);
768 sched_debug_header(m
);
773 void sysrq_sched_debug_show(void)
777 sched_debug_header(NULL
);
778 for_each_online_cpu(cpu
)
779 print_cpu(NULL
, cpu
);
784 * This itererator needs some explanation.
785 * It returns 1 for the header position.
786 * This means 2 is CPU 0.
787 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
788 * to use cpumask_* to iterate over the CPUs.
790 static void *sched_debug_start(struct seq_file
*file
, loff_t
*offset
)
792 unsigned long n
= *offset
;
800 n
= cpumask_next(n
- 1, cpu_online_mask
);
802 n
= cpumask_first(cpu_online_mask
);
807 return (void *)(unsigned long)(n
+ 2);
812 static void *sched_debug_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
815 return sched_debug_start(file
, offset
);
818 static void sched_debug_stop(struct seq_file
*file
, void *data
)
822 static const struct seq_operations sched_debug_sops
= {
823 .start
= sched_debug_start
,
824 .next
= sched_debug_next
,
825 .stop
= sched_debug_stop
,
826 .show
= sched_debug_show
,
829 static int __init
init_sched_debug_procfs(void)
831 if (!proc_create_seq("sched_debug", 0444, NULL
, &sched_debug_sops
))
836 __initcall(init_sched_debug_procfs
);
838 #define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
839 #define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
840 #define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
841 #define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
844 #ifdef CONFIG_NUMA_BALANCING
845 void print_numa_stats(struct seq_file
*m
, int node
, unsigned long tsf
,
846 unsigned long tpf
, unsigned long gsf
, unsigned long gpf
)
848 SEQ_printf(m
, "numa_faults node=%d ", node
);
849 SEQ_printf(m
, "task_private=%lu task_shared=%lu ", tpf
, tsf
);
850 SEQ_printf(m
, "group_private=%lu group_shared=%lu\n", gpf
, gsf
);
855 static void sched_show_numa(struct task_struct
*p
, struct seq_file
*m
)
857 #ifdef CONFIG_NUMA_BALANCING
858 struct mempolicy
*pol
;
861 P(mm
->numa_scan_seq
);
865 if (pol
&& !(pol
->flags
& MPOL_F_MORON
))
870 P(numa_pages_migrated
);
871 P(numa_preferred_nid
);
872 P(total_numa_faults
);
873 SEQ_printf(m
, "current_node=%d, numa_group_id=%d\n",
874 task_node(p
), task_numa_group_id(p
));
875 show_numa_stats(p
, m
);
880 void proc_sched_show_task(struct task_struct
*p
, struct pid_namespace
*ns
,
883 unsigned long nr_switches
;
885 SEQ_printf(m
, "%s (%d, #threads: %d)\n", p
->comm
, task_pid_nr_ns(p
, ns
),
888 "---------------------------------------------------------"
891 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
893 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
894 #define P_SCHEDSTAT(F) \
895 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
897 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
899 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
900 #define PN_SCHEDSTAT(F) \
901 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
905 PN(se
.sum_exec_runtime
);
907 nr_switches
= p
->nvcsw
+ p
->nivcsw
;
911 if (schedstat_enabled()) {
912 u64 avg_atom
, avg_per_cpu
;
914 PN_SCHEDSTAT(se
.statistics
.sum_sleep_runtime
);
915 PN_SCHEDSTAT(se
.statistics
.wait_start
);
916 PN_SCHEDSTAT(se
.statistics
.sleep_start
);
917 PN_SCHEDSTAT(se
.statistics
.block_start
);
918 PN_SCHEDSTAT(se
.statistics
.sleep_max
);
919 PN_SCHEDSTAT(se
.statistics
.block_max
);
920 PN_SCHEDSTAT(se
.statistics
.exec_max
);
921 PN_SCHEDSTAT(se
.statistics
.slice_max
);
922 PN_SCHEDSTAT(se
.statistics
.wait_max
);
923 PN_SCHEDSTAT(se
.statistics
.wait_sum
);
924 P_SCHEDSTAT(se
.statistics
.wait_count
);
925 PN_SCHEDSTAT(se
.statistics
.iowait_sum
);
926 P_SCHEDSTAT(se
.statistics
.iowait_count
);
927 P_SCHEDSTAT(se
.statistics
.nr_migrations_cold
);
928 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_affine
);
929 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_running
);
930 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_hot
);
931 P_SCHEDSTAT(se
.statistics
.nr_forced_migrations
);
932 P_SCHEDSTAT(se
.statistics
.nr_wakeups
);
933 P_SCHEDSTAT(se
.statistics
.nr_wakeups_sync
);
934 P_SCHEDSTAT(se
.statistics
.nr_wakeups_migrate
);
935 P_SCHEDSTAT(se
.statistics
.nr_wakeups_local
);
936 P_SCHEDSTAT(se
.statistics
.nr_wakeups_remote
);
937 P_SCHEDSTAT(se
.statistics
.nr_wakeups_affine
);
938 P_SCHEDSTAT(se
.statistics
.nr_wakeups_affine_attempts
);
939 P_SCHEDSTAT(se
.statistics
.nr_wakeups_passive
);
940 P_SCHEDSTAT(se
.statistics
.nr_wakeups_idle
);
942 avg_atom
= p
->se
.sum_exec_runtime
;
944 avg_atom
= div64_ul(avg_atom
, nr_switches
);
948 avg_per_cpu
= p
->se
.sum_exec_runtime
;
949 if (p
->se
.nr_migrations
) {
950 avg_per_cpu
= div64_u64(avg_per_cpu
,
951 p
->se
.nr_migrations
);
961 SEQ_printf(m
, "%-45s:%21Ld\n",
962 "nr_voluntary_switches", (long long)p
->nvcsw
);
963 SEQ_printf(m
, "%-45s:%21Ld\n",
964 "nr_involuntary_switches", (long long)p
->nivcsw
);
967 P(se
.runnable_weight
);
970 P(se
.avg
.runnable_load_sum
);
973 P(se
.avg
.runnable_load_avg
);
975 P(se
.avg
.last_update_time
);
976 P(se
.avg
.util_est
.ewma
);
977 P(se
.avg
.util_est
.enqueued
);
981 if (task_has_dl_policy(p
)) {
993 unsigned int this_cpu
= raw_smp_processor_id();
996 t0
= cpu_clock(this_cpu
);
997 t1
= cpu_clock(this_cpu
);
998 SEQ_printf(m
, "%-45s:%21Ld\n",
999 "clock-delta", (long long)(t1
-t0
));
1002 sched_show_numa(p
, m
);
1005 void proc_sched_set_task(struct task_struct
*p
)
1007 #ifdef CONFIG_SCHEDSTATS
1008 memset(&p
->se
.statistics
, 0, sizeof(p
->se
.statistics
));