]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/sched/debug.c
Merge tag 'iwlwifi-next-for-kalle-2016-10-25-2' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-zesty-kernel.git] / kernel / sched / debug.c
1 /*
2 * kernel/sched/debug.c
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 #include <linux/mempolicy.h>
19 #include <linux/debugfs.h>
20
21 #include "sched.h"
22
23 static DEFINE_SPINLOCK(sched_debug_lock);
24
25 /*
26 * This allows printing both to /proc/sched_debug and
27 * to the console
28 */
29 #define SEQ_printf(m, x...) \
30 do { \
31 if (m) \
32 seq_printf(m, x); \
33 else \
34 printk(x); \
35 } while (0)
36
37 /*
38 * Ease the printing of nsec fields:
39 */
40 static long long nsec_high(unsigned long long nsec)
41 {
42 if ((long long)nsec < 0) {
43 nsec = -nsec;
44 do_div(nsec, 1000000);
45 return -nsec;
46 }
47 do_div(nsec, 1000000);
48
49 return nsec;
50 }
51
52 static unsigned long nsec_low(unsigned long long nsec)
53 {
54 if ((long long)nsec < 0)
55 nsec = -nsec;
56
57 return do_div(nsec, 1000000);
58 }
59
60 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
61
62 #define SCHED_FEAT(name, enabled) \
63 #name ,
64
65 static const char * const sched_feat_names[] = {
66 #include "features.h"
67 };
68
69 #undef SCHED_FEAT
70
71 static int sched_feat_show(struct seq_file *m, void *v)
72 {
73 int i;
74
75 for (i = 0; i < __SCHED_FEAT_NR; i++) {
76 if (!(sysctl_sched_features & (1UL << i)))
77 seq_puts(m, "NO_");
78 seq_printf(m, "%s ", sched_feat_names[i]);
79 }
80 seq_puts(m, "\n");
81
82 return 0;
83 }
84
85 #ifdef HAVE_JUMP_LABEL
86
87 #define jump_label_key__true STATIC_KEY_INIT_TRUE
88 #define jump_label_key__false STATIC_KEY_INIT_FALSE
89
90 #define SCHED_FEAT(name, enabled) \
91 jump_label_key__##enabled ,
92
93 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
94 #include "features.h"
95 };
96
97 #undef SCHED_FEAT
98
99 static void sched_feat_disable(int i)
100 {
101 static_key_disable(&sched_feat_keys[i]);
102 }
103
104 static void sched_feat_enable(int i)
105 {
106 static_key_enable(&sched_feat_keys[i]);
107 }
108 #else
109 static void sched_feat_disable(int i) { };
110 static void sched_feat_enable(int i) { };
111 #endif /* HAVE_JUMP_LABEL */
112
113 static int sched_feat_set(char *cmp)
114 {
115 int i;
116 int neg = 0;
117
118 if (strncmp(cmp, "NO_", 3) == 0) {
119 neg = 1;
120 cmp += 3;
121 }
122
123 for (i = 0; i < __SCHED_FEAT_NR; i++) {
124 if (strcmp(cmp, sched_feat_names[i]) == 0) {
125 if (neg) {
126 sysctl_sched_features &= ~(1UL << i);
127 sched_feat_disable(i);
128 } else {
129 sysctl_sched_features |= (1UL << i);
130 sched_feat_enable(i);
131 }
132 break;
133 }
134 }
135
136 return i;
137 }
138
139 static ssize_t
140 sched_feat_write(struct file *filp, const char __user *ubuf,
141 size_t cnt, loff_t *ppos)
142 {
143 char buf[64];
144 char *cmp;
145 int i;
146 struct inode *inode;
147
148 if (cnt > 63)
149 cnt = 63;
150
151 if (copy_from_user(&buf, ubuf, cnt))
152 return -EFAULT;
153
154 buf[cnt] = 0;
155 cmp = strstrip(buf);
156
157 /* Ensure the static_key remains in a consistent state */
158 inode = file_inode(filp);
159 inode_lock(inode);
160 i = sched_feat_set(cmp);
161 inode_unlock(inode);
162 if (i == __SCHED_FEAT_NR)
163 return -EINVAL;
164
165 *ppos += cnt;
166
167 return cnt;
168 }
169
170 static int sched_feat_open(struct inode *inode, struct file *filp)
171 {
172 return single_open(filp, sched_feat_show, NULL);
173 }
174
175 static const struct file_operations sched_feat_fops = {
176 .open = sched_feat_open,
177 .write = sched_feat_write,
178 .read = seq_read,
179 .llseek = seq_lseek,
180 .release = single_release,
181 };
182
183 static __init int sched_init_debug(void)
184 {
185 debugfs_create_file("sched_features", 0644, NULL, NULL,
186 &sched_feat_fops);
187
188 return 0;
189 }
190 late_initcall(sched_init_debug);
191
192 #ifdef CONFIG_SMP
193
194 #ifdef CONFIG_SYSCTL
195
196 static struct ctl_table sd_ctl_dir[] = {
197 {
198 .procname = "sched_domain",
199 .mode = 0555,
200 },
201 {}
202 };
203
204 static struct ctl_table sd_ctl_root[] = {
205 {
206 .procname = "kernel",
207 .mode = 0555,
208 .child = sd_ctl_dir,
209 },
210 {}
211 };
212
213 static struct ctl_table *sd_alloc_ctl_entry(int n)
214 {
215 struct ctl_table *entry =
216 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
217
218 return entry;
219 }
220
221 static void sd_free_ctl_entry(struct ctl_table **tablep)
222 {
223 struct ctl_table *entry;
224
225 /*
226 * In the intermediate directories, both the child directory and
227 * procname are dynamically allocated and could fail but the mode
228 * will always be set. In the lowest directory the names are
229 * static strings and all have proc handlers.
230 */
231 for (entry = *tablep; entry->mode; entry++) {
232 if (entry->child)
233 sd_free_ctl_entry(&entry->child);
234 if (entry->proc_handler == NULL)
235 kfree(entry->procname);
236 }
237
238 kfree(*tablep);
239 *tablep = NULL;
240 }
241
242 static int min_load_idx = 0;
243 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
244
245 static void
246 set_table_entry(struct ctl_table *entry,
247 const char *procname, void *data, int maxlen,
248 umode_t mode, proc_handler *proc_handler,
249 bool load_idx)
250 {
251 entry->procname = procname;
252 entry->data = data;
253 entry->maxlen = maxlen;
254 entry->mode = mode;
255 entry->proc_handler = proc_handler;
256
257 if (load_idx) {
258 entry->extra1 = &min_load_idx;
259 entry->extra2 = &max_load_idx;
260 }
261 }
262
263 static struct ctl_table *
264 sd_alloc_ctl_domain_table(struct sched_domain *sd)
265 {
266 struct ctl_table *table = sd_alloc_ctl_entry(14);
267
268 if (table == NULL)
269 return NULL;
270
271 set_table_entry(&table[0], "min_interval", &sd->min_interval,
272 sizeof(long), 0644, proc_doulongvec_minmax, false);
273 set_table_entry(&table[1], "max_interval", &sd->max_interval,
274 sizeof(long), 0644, proc_doulongvec_minmax, false);
275 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
276 sizeof(int), 0644, proc_dointvec_minmax, true);
277 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
278 sizeof(int), 0644, proc_dointvec_minmax, true);
279 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
280 sizeof(int), 0644, proc_dointvec_minmax, true);
281 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
282 sizeof(int), 0644, proc_dointvec_minmax, true);
283 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
284 sizeof(int), 0644, proc_dointvec_minmax, true);
285 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
286 sizeof(int), 0644, proc_dointvec_minmax, false);
287 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
288 sizeof(int), 0644, proc_dointvec_minmax, false);
289 set_table_entry(&table[9], "cache_nice_tries",
290 &sd->cache_nice_tries,
291 sizeof(int), 0644, proc_dointvec_minmax, false);
292 set_table_entry(&table[10], "flags", &sd->flags,
293 sizeof(int), 0644, proc_dointvec_minmax, false);
294 set_table_entry(&table[11], "max_newidle_lb_cost",
295 &sd->max_newidle_lb_cost,
296 sizeof(long), 0644, proc_doulongvec_minmax, false);
297 set_table_entry(&table[12], "name", sd->name,
298 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
299 /* &table[13] is terminator */
300
301 return table;
302 }
303
304 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
305 {
306 struct ctl_table *entry, *table;
307 struct sched_domain *sd;
308 int domain_num = 0, i;
309 char buf[32];
310
311 for_each_domain(cpu, sd)
312 domain_num++;
313 entry = table = sd_alloc_ctl_entry(domain_num + 1);
314 if (table == NULL)
315 return NULL;
316
317 i = 0;
318 for_each_domain(cpu, sd) {
319 snprintf(buf, 32, "domain%d", i);
320 entry->procname = kstrdup(buf, GFP_KERNEL);
321 entry->mode = 0555;
322 entry->child = sd_alloc_ctl_domain_table(sd);
323 entry++;
324 i++;
325 }
326 return table;
327 }
328
329 static struct ctl_table_header *sd_sysctl_header;
330 void register_sched_domain_sysctl(void)
331 {
332 int i, cpu_num = num_possible_cpus();
333 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
334 char buf[32];
335
336 WARN_ON(sd_ctl_dir[0].child);
337 sd_ctl_dir[0].child = entry;
338
339 if (entry == NULL)
340 return;
341
342 for_each_possible_cpu(i) {
343 snprintf(buf, 32, "cpu%d", i);
344 entry->procname = kstrdup(buf, GFP_KERNEL);
345 entry->mode = 0555;
346 entry->child = sd_alloc_ctl_cpu_table(i);
347 entry++;
348 }
349
350 WARN_ON(sd_sysctl_header);
351 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
352 }
353
354 /* may be called multiple times per register */
355 void unregister_sched_domain_sysctl(void)
356 {
357 unregister_sysctl_table(sd_sysctl_header);
358 sd_sysctl_header = NULL;
359 if (sd_ctl_dir[0].child)
360 sd_free_ctl_entry(&sd_ctl_dir[0].child);
361 }
362 #endif /* CONFIG_SYSCTL */
363 #endif /* CONFIG_SMP */
364
365 #ifdef CONFIG_FAIR_GROUP_SCHED
366 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
367 {
368 struct sched_entity *se = tg->se[cpu];
369
370 #define P(F) \
371 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
372 #define P_SCHEDSTAT(F) \
373 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
374 #define PN(F) \
375 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
376 #define PN_SCHEDSTAT(F) \
377 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
378
379 if (!se)
380 return;
381
382 PN(se->exec_start);
383 PN(se->vruntime);
384 PN(se->sum_exec_runtime);
385 if (schedstat_enabled()) {
386 PN_SCHEDSTAT(se->statistics.wait_start);
387 PN_SCHEDSTAT(se->statistics.sleep_start);
388 PN_SCHEDSTAT(se->statistics.block_start);
389 PN_SCHEDSTAT(se->statistics.sleep_max);
390 PN_SCHEDSTAT(se->statistics.block_max);
391 PN_SCHEDSTAT(se->statistics.exec_max);
392 PN_SCHEDSTAT(se->statistics.slice_max);
393 PN_SCHEDSTAT(se->statistics.wait_max);
394 PN_SCHEDSTAT(se->statistics.wait_sum);
395 P_SCHEDSTAT(se->statistics.wait_count);
396 }
397 P(se->load.weight);
398 #ifdef CONFIG_SMP
399 P(se->avg.load_avg);
400 P(se->avg.util_avg);
401 #endif
402
403 #undef PN_SCHEDSTAT
404 #undef PN
405 #undef P_SCHEDSTAT
406 #undef P
407 }
408 #endif
409
410 #ifdef CONFIG_CGROUP_SCHED
411 static char group_path[PATH_MAX];
412
413 static char *task_group_path(struct task_group *tg)
414 {
415 if (autogroup_path(tg, group_path, PATH_MAX))
416 return group_path;
417
418 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
419 }
420 #endif
421
422 static void
423 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
424 {
425 if (rq->curr == p)
426 SEQ_printf(m, "R");
427 else
428 SEQ_printf(m, " ");
429
430 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
431 p->comm, task_pid_nr(p),
432 SPLIT_NS(p->se.vruntime),
433 (long long)(p->nvcsw + p->nivcsw),
434 p->prio);
435
436 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
437 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
438 SPLIT_NS(p->se.sum_exec_runtime),
439 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
440
441 #ifdef CONFIG_NUMA_BALANCING
442 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
443 #endif
444 #ifdef CONFIG_CGROUP_SCHED
445 SEQ_printf(m, " %s", task_group_path(task_group(p)));
446 #endif
447
448 SEQ_printf(m, "\n");
449 }
450
451 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
452 {
453 struct task_struct *g, *p;
454
455 SEQ_printf(m,
456 "\nrunnable tasks:\n"
457 " task PID tree-key switches prio"
458 " wait-time sum-exec sum-sleep\n"
459 "------------------------------------------------------"
460 "----------------------------------------------------\n");
461
462 rcu_read_lock();
463 for_each_process_thread(g, p) {
464 if (task_cpu(p) != rq_cpu)
465 continue;
466
467 print_task(m, rq, p);
468 }
469 rcu_read_unlock();
470 }
471
472 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
473 {
474 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
475 spread, rq0_min_vruntime, spread0;
476 struct rq *rq = cpu_rq(cpu);
477 struct sched_entity *last;
478 unsigned long flags;
479
480 #ifdef CONFIG_FAIR_GROUP_SCHED
481 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
482 #else
483 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
484 #endif
485 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
486 SPLIT_NS(cfs_rq->exec_clock));
487
488 raw_spin_lock_irqsave(&rq->lock, flags);
489 if (cfs_rq->rb_leftmost)
490 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
491 last = __pick_last_entity(cfs_rq);
492 if (last)
493 max_vruntime = last->vruntime;
494 min_vruntime = cfs_rq->min_vruntime;
495 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
496 raw_spin_unlock_irqrestore(&rq->lock, flags);
497 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
498 SPLIT_NS(MIN_vruntime));
499 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
500 SPLIT_NS(min_vruntime));
501 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
502 SPLIT_NS(max_vruntime));
503 spread = max_vruntime - MIN_vruntime;
504 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
505 SPLIT_NS(spread));
506 spread0 = min_vruntime - rq0_min_vruntime;
507 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
508 SPLIT_NS(spread0));
509 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
510 cfs_rq->nr_spread_over);
511 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
512 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
513 #ifdef CONFIG_SMP
514 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
515 cfs_rq->avg.load_avg);
516 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
517 cfs_rq->runnable_load_avg);
518 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
519 cfs_rq->avg.util_avg);
520 SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg",
521 atomic_long_read(&cfs_rq->removed_load_avg));
522 SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg",
523 atomic_long_read(&cfs_rq->removed_util_avg));
524 #ifdef CONFIG_FAIR_GROUP_SCHED
525 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
526 cfs_rq->tg_load_avg_contrib);
527 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
528 atomic_long_read(&cfs_rq->tg->load_avg));
529 #endif
530 #endif
531 #ifdef CONFIG_CFS_BANDWIDTH
532 SEQ_printf(m, " .%-30s: %d\n", "throttled",
533 cfs_rq->throttled);
534 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
535 cfs_rq->throttle_count);
536 #endif
537
538 #ifdef CONFIG_FAIR_GROUP_SCHED
539 print_cfs_group_stats(m, cpu, cfs_rq->tg);
540 #endif
541 }
542
543 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
544 {
545 #ifdef CONFIG_RT_GROUP_SCHED
546 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
547 #else
548 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
549 #endif
550
551 #define P(x) \
552 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
553 #define PN(x) \
554 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
555
556 P(rt_nr_running);
557 P(rt_throttled);
558 PN(rt_time);
559 PN(rt_runtime);
560
561 #undef PN
562 #undef P
563 }
564
565 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
566 {
567 struct dl_bw *dl_bw;
568
569 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
570 SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
571 #ifdef CONFIG_SMP
572 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
573 #else
574 dl_bw = &dl_rq->dl_bw;
575 #endif
576 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
577 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
578 }
579
580 extern __read_mostly int sched_clock_running;
581
582 static void print_cpu(struct seq_file *m, int cpu)
583 {
584 struct rq *rq = cpu_rq(cpu);
585 unsigned long flags;
586
587 #ifdef CONFIG_X86
588 {
589 unsigned int freq = cpu_khz ? : 1;
590
591 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
592 cpu, freq / 1000, (freq % 1000));
593 }
594 #else
595 SEQ_printf(m, "cpu#%d\n", cpu);
596 #endif
597
598 #define P(x) \
599 do { \
600 if (sizeof(rq->x) == 4) \
601 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
602 else \
603 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
604 } while (0)
605
606 #define PN(x) \
607 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
608
609 P(nr_running);
610 SEQ_printf(m, " .%-30s: %lu\n", "load",
611 rq->load.weight);
612 P(nr_switches);
613 P(nr_load_updates);
614 P(nr_uninterruptible);
615 PN(next_balance);
616 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
617 PN(clock);
618 PN(clock_task);
619 P(cpu_load[0]);
620 P(cpu_load[1]);
621 P(cpu_load[2]);
622 P(cpu_load[3]);
623 P(cpu_load[4]);
624 #undef P
625 #undef PN
626
627 #ifdef CONFIG_SMP
628 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
629 P64(avg_idle);
630 P64(max_idle_balance_cost);
631 #undef P64
632 #endif
633
634 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
635 if (schedstat_enabled()) {
636 P(yld_count);
637 P(sched_count);
638 P(sched_goidle);
639 P(ttwu_count);
640 P(ttwu_local);
641 }
642 #undef P
643
644 spin_lock_irqsave(&sched_debug_lock, flags);
645 print_cfs_stats(m, cpu);
646 print_rt_stats(m, cpu);
647 print_dl_stats(m, cpu);
648
649 print_rq(m, rq, cpu);
650 spin_unlock_irqrestore(&sched_debug_lock, flags);
651 SEQ_printf(m, "\n");
652 }
653
654 static const char *sched_tunable_scaling_names[] = {
655 "none",
656 "logaritmic",
657 "linear"
658 };
659
660 static void sched_debug_header(struct seq_file *m)
661 {
662 u64 ktime, sched_clk, cpu_clk;
663 unsigned long flags;
664
665 local_irq_save(flags);
666 ktime = ktime_to_ns(ktime_get());
667 sched_clk = sched_clock();
668 cpu_clk = local_clock();
669 local_irq_restore(flags);
670
671 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
672 init_utsname()->release,
673 (int)strcspn(init_utsname()->version, " "),
674 init_utsname()->version);
675
676 #define P(x) \
677 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
678 #define PN(x) \
679 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
680 PN(ktime);
681 PN(sched_clk);
682 PN(cpu_clk);
683 P(jiffies);
684 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
685 P(sched_clock_stable());
686 #endif
687 #undef PN
688 #undef P
689
690 SEQ_printf(m, "\n");
691 SEQ_printf(m, "sysctl_sched\n");
692
693 #define P(x) \
694 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
695 #define PN(x) \
696 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
697 PN(sysctl_sched_latency);
698 PN(sysctl_sched_min_granularity);
699 PN(sysctl_sched_wakeup_granularity);
700 P(sysctl_sched_child_runs_first);
701 P(sysctl_sched_features);
702 #undef PN
703 #undef P
704
705 SEQ_printf(m, " .%-40s: %d (%s)\n",
706 "sysctl_sched_tunable_scaling",
707 sysctl_sched_tunable_scaling,
708 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
709 SEQ_printf(m, "\n");
710 }
711
712 static int sched_debug_show(struct seq_file *m, void *v)
713 {
714 int cpu = (unsigned long)(v - 2);
715
716 if (cpu != -1)
717 print_cpu(m, cpu);
718 else
719 sched_debug_header(m);
720
721 return 0;
722 }
723
724 void sysrq_sched_debug_show(void)
725 {
726 int cpu;
727
728 sched_debug_header(NULL);
729 for_each_online_cpu(cpu)
730 print_cpu(NULL, cpu);
731
732 }
733
734 /*
735 * This itererator needs some explanation.
736 * It returns 1 for the header position.
737 * This means 2 is cpu 0.
738 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
739 * to use cpumask_* to iterate over the cpus.
740 */
741 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
742 {
743 unsigned long n = *offset;
744
745 if (n == 0)
746 return (void *) 1;
747
748 n--;
749
750 if (n > 0)
751 n = cpumask_next(n - 1, cpu_online_mask);
752 else
753 n = cpumask_first(cpu_online_mask);
754
755 *offset = n + 1;
756
757 if (n < nr_cpu_ids)
758 return (void *)(unsigned long)(n + 2);
759 return NULL;
760 }
761
762 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
763 {
764 (*offset)++;
765 return sched_debug_start(file, offset);
766 }
767
768 static void sched_debug_stop(struct seq_file *file, void *data)
769 {
770 }
771
772 static const struct seq_operations sched_debug_sops = {
773 .start = sched_debug_start,
774 .next = sched_debug_next,
775 .stop = sched_debug_stop,
776 .show = sched_debug_show,
777 };
778
779 static int sched_debug_release(struct inode *inode, struct file *file)
780 {
781 seq_release(inode, file);
782
783 return 0;
784 }
785
786 static int sched_debug_open(struct inode *inode, struct file *filp)
787 {
788 int ret = 0;
789
790 ret = seq_open(filp, &sched_debug_sops);
791
792 return ret;
793 }
794
795 static const struct file_operations sched_debug_fops = {
796 .open = sched_debug_open,
797 .read = seq_read,
798 .llseek = seq_lseek,
799 .release = sched_debug_release,
800 };
801
802 static int __init init_sched_debug_procfs(void)
803 {
804 struct proc_dir_entry *pe;
805
806 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
807 if (!pe)
808 return -ENOMEM;
809 return 0;
810 }
811
812 __initcall(init_sched_debug_procfs);
813
814 #define __P(F) \
815 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
816 #define P(F) \
817 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
818 #define __PN(F) \
819 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
820 #define PN(F) \
821 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
822
823
824 #ifdef CONFIG_NUMA_BALANCING
825 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
826 unsigned long tpf, unsigned long gsf, unsigned long gpf)
827 {
828 SEQ_printf(m, "numa_faults node=%d ", node);
829 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
830 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
831 }
832 #endif
833
834
835 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
836 {
837 #ifdef CONFIG_NUMA_BALANCING
838 struct mempolicy *pol;
839
840 if (p->mm)
841 P(mm->numa_scan_seq);
842
843 task_lock(p);
844 pol = p->mempolicy;
845 if (pol && !(pol->flags & MPOL_F_MORON))
846 pol = NULL;
847 mpol_get(pol);
848 task_unlock(p);
849
850 P(numa_pages_migrated);
851 P(numa_preferred_nid);
852 P(total_numa_faults);
853 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
854 task_node(p), task_numa_group_id(p));
855 show_numa_stats(p, m);
856 mpol_put(pol);
857 #endif
858 }
859
860 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
861 {
862 unsigned long nr_switches;
863
864 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
865 get_nr_threads(p));
866 SEQ_printf(m,
867 "---------------------------------------------------------"
868 "----------\n");
869 #define __P(F) \
870 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
871 #define P(F) \
872 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
873 #define P_SCHEDSTAT(F) \
874 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
875 #define __PN(F) \
876 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
877 #define PN(F) \
878 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
879 #define PN_SCHEDSTAT(F) \
880 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
881
882 PN(se.exec_start);
883 PN(se.vruntime);
884 PN(se.sum_exec_runtime);
885
886 nr_switches = p->nvcsw + p->nivcsw;
887
888 P(se.nr_migrations);
889
890 if (schedstat_enabled()) {
891 u64 avg_atom, avg_per_cpu;
892
893 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
894 PN_SCHEDSTAT(se.statistics.wait_start);
895 PN_SCHEDSTAT(se.statistics.sleep_start);
896 PN_SCHEDSTAT(se.statistics.block_start);
897 PN_SCHEDSTAT(se.statistics.sleep_max);
898 PN_SCHEDSTAT(se.statistics.block_max);
899 PN_SCHEDSTAT(se.statistics.exec_max);
900 PN_SCHEDSTAT(se.statistics.slice_max);
901 PN_SCHEDSTAT(se.statistics.wait_max);
902 PN_SCHEDSTAT(se.statistics.wait_sum);
903 P_SCHEDSTAT(se.statistics.wait_count);
904 PN_SCHEDSTAT(se.statistics.iowait_sum);
905 P_SCHEDSTAT(se.statistics.iowait_count);
906 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
907 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
908 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
909 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
910 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
911 P_SCHEDSTAT(se.statistics.nr_wakeups);
912 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
913 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
914 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
915 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
916 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
917 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
918 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
919 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
920
921 avg_atom = p->se.sum_exec_runtime;
922 if (nr_switches)
923 avg_atom = div64_ul(avg_atom, nr_switches);
924 else
925 avg_atom = -1LL;
926
927 avg_per_cpu = p->se.sum_exec_runtime;
928 if (p->se.nr_migrations) {
929 avg_per_cpu = div64_u64(avg_per_cpu,
930 p->se.nr_migrations);
931 } else {
932 avg_per_cpu = -1LL;
933 }
934
935 __PN(avg_atom);
936 __PN(avg_per_cpu);
937 }
938
939 __P(nr_switches);
940 SEQ_printf(m, "%-45s:%21Ld\n",
941 "nr_voluntary_switches", (long long)p->nvcsw);
942 SEQ_printf(m, "%-45s:%21Ld\n",
943 "nr_involuntary_switches", (long long)p->nivcsw);
944
945 P(se.load.weight);
946 #ifdef CONFIG_SMP
947 P(se.avg.load_sum);
948 P(se.avg.util_sum);
949 P(se.avg.load_avg);
950 P(se.avg.util_avg);
951 P(se.avg.last_update_time);
952 #endif
953 P(policy);
954 P(prio);
955 #undef PN_SCHEDSTAT
956 #undef PN
957 #undef __PN
958 #undef P_SCHEDSTAT
959 #undef P
960 #undef __P
961
962 {
963 unsigned int this_cpu = raw_smp_processor_id();
964 u64 t0, t1;
965
966 t0 = cpu_clock(this_cpu);
967 t1 = cpu_clock(this_cpu);
968 SEQ_printf(m, "%-45s:%21Ld\n",
969 "clock-delta", (long long)(t1-t0));
970 }
971
972 sched_show_numa(p, m);
973 }
974
975 void proc_sched_set_task(struct task_struct *p)
976 {
977 #ifdef CONFIG_SCHEDSTATS
978 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
979 #endif
980 }