]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - tools/perf/builtin-sched.c
perf sched: Sanity check context switch events
[mirror_ubuntu-zesty-kernel.git] / tools / perf / builtin-sched.c
CommitLineData
0a02ad93 1#include "builtin.h"
b1ffe8f3 2#include "perf.h"
0a02ad93
IM
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
b1ffe8f3 11#include "util/trace-event.h"
0a02ad93 12
0a02ad93
IM
13#include "util/debug.h"
14
ec156764 15#include <sys/types.h>
b1ffe8f3 16#include <sys/prctl.h>
0a02ad93 17
b1ffe8f3
IM
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
419ab0d6 21
ec156764
IM
22static char const *input_name = "perf.data";
23static int input;
24static unsigned long page_size;
25static unsigned long mmap_window = 32;
0a02ad93 26
ec156764 27static unsigned long total_comm = 0;
0a02ad93 28
ec156764
IM
29static struct rb_root threads;
30static struct thread *last_match;
0a02ad93 31
ec156764
IM
32static struct perf_header *header;
33static u64 sample_type;
0a02ad93 34
daa1d7a5
FW
35static char default_sort_order[] = "avg, max, switch, runtime";
36static char *sort_order = default_sort_order;
37
b1ffe8f3
IM
38#define PR_SET_NAME 15 /* Set process name */
39#define MAX_CPUS 4096
0a02ad93 40
b1ffe8f3 41#define BUG_ON(x) assert(!(x))
ec156764 42
b1ffe8f3
IM
43static u64 run_measurement_overhead;
44static u64 sleep_measurement_overhead;
ec156764 45
b1ffe8f3
IM
46#define COMM_LEN 20
47#define SYM_LEN 129
ec156764 48
b1ffe8f3 49#define MAX_PID 65536
ec156764 50
b1ffe8f3 51static unsigned long nr_tasks;
ec156764 52
39aeb52f 53struct sched_atom;
ec156764 54
b1ffe8f3
IM
55struct task_desc {
56 unsigned long nr;
57 unsigned long pid;
58 char comm[COMM_LEN];
ec156764 59
b1ffe8f3
IM
60 unsigned long nr_events;
61 unsigned long curr_event;
39aeb52f 62 struct sched_atom **atoms;
b1ffe8f3
IM
63
64 pthread_t thread;
65 sem_t sleep_sem;
ec156764 66
b1ffe8f3
IM
67 sem_t ready_for_work;
68 sem_t work_done_sem;
69
70 u64 cpu_usage;
71};
72
73enum sched_event_type {
74 SCHED_EVENT_RUN,
75 SCHED_EVENT_SLEEP,
76 SCHED_EVENT_WAKEUP,
77};
78
39aeb52f 79struct sched_atom {
b1ffe8f3
IM
80 enum sched_event_type type;
81 u64 timestamp;
82 u64 duration;
83 unsigned long nr;
84 int specific_wait;
85 sem_t *wait_sem;
86 struct task_desc *wakee;
87};
88
89static struct task_desc *pid_to_task[MAX_PID];
90
91static struct task_desc **tasks;
92
93static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
94static u64 start_time;
95
96static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
ec156764 97
b1ffe8f3
IM
98static unsigned long nr_run_events;
99static unsigned long nr_sleep_events;
100static unsigned long nr_wakeup_events;
101
102static unsigned long nr_sleep_corrections;
103static unsigned long nr_run_events_optimized;
104
105static unsigned long targetless_wakeups;
106static unsigned long multitarget_wakeups;
107
108static u64 cpu_usage;
109static u64 runavg_cpu_usage;
110static u64 parent_cpu_usage;
111static u64 runavg_parent_cpu_usage;
112
113static unsigned long nr_runs;
114static u64 sum_runtime;
115static u64 sum_fluct;
116static u64 run_avg;
117
118static unsigned long replay_repeat = 10;
ea57c4f5 119static unsigned long nr_timestamps;
dc02bf71
IM
120static unsigned long nr_unordered_timestamps;
121static unsigned long nr_state_machine_bugs;
c8a37751 122static unsigned long nr_context_switch_bugs;
dc02bf71
IM
123static unsigned long nr_events;
124static unsigned long nr_lost_chunks;
125static unsigned long nr_lost_events;
b1ffe8f3
IM
126
127#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
128
129enum thread_state {
130 THREAD_SLEEPING = 0,
131 THREAD_WAIT_CPU,
132 THREAD_SCHED_IN,
133 THREAD_IGNORE
134};
135
136struct work_atom {
137 struct list_head list;
138 enum thread_state state;
aa1ab9d2 139 u64 sched_out_time;
b1ffe8f3
IM
140 u64 wake_up_time;
141 u64 sched_in_time;
142 u64 runtime;
143};
144
39aeb52f 145struct work_atoms {
146 struct list_head work_list;
b1ffe8f3
IM
147 struct thread *thread;
148 struct rb_node node;
149 u64 max_lat;
150 u64 total_lat;
151 u64 nb_atoms;
152 u64 total_runtime;
153};
154
39aeb52f 155typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
b1ffe8f3
IM
156
157static struct rb_root atom_root, sorted_atom_root;
158
159static u64 all_runtime;
160static u64 all_count;
161
162static int read_events(void);
163
164
165static u64 get_nsecs(void)
ec156764
IM
166{
167 struct timespec ts;
168
169 clock_gettime(CLOCK_MONOTONIC, &ts);
170
171 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
172}
173
b1ffe8f3 174static void burn_nsecs(u64 nsecs)
ec156764 175{
b1ffe8f3 176 u64 T0 = get_nsecs(), T1;
ec156764
IM
177
178 do {
179 T1 = get_nsecs();
180 } while (T1 + run_measurement_overhead < T0 + nsecs);
181}
182
b1ffe8f3 183static void sleep_nsecs(u64 nsecs)
ec156764
IM
184{
185 struct timespec ts;
186
187 ts.tv_nsec = nsecs % 999999999;
188 ts.tv_sec = nsecs / 999999999;
189
190 nanosleep(&ts, NULL);
191}
192
193static void calibrate_run_measurement_overhead(void)
194{
b1ffe8f3 195 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
196 int i;
197
198 for (i = 0; i < 10; i++) {
199 T0 = get_nsecs();
200 burn_nsecs(0);
201 T1 = get_nsecs();
202 delta = T1-T0;
203 min_delta = min(min_delta, delta);
204 }
205 run_measurement_overhead = min_delta;
206
ad236fd2 207 printf("run measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
208}
209
210static void calibrate_sleep_measurement_overhead(void)
211{
b1ffe8f3 212 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
213 int i;
214
215 for (i = 0; i < 10; i++) {
216 T0 = get_nsecs();
217 sleep_nsecs(10000);
218 T1 = get_nsecs();
219 delta = T1-T0;
220 min_delta = min(min_delta, delta);
221 }
222 min_delta -= 10000;
223 sleep_measurement_overhead = min_delta;
224
ad236fd2 225 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
226}
227
39aeb52f 228static struct sched_atom *
b1ffe8f3 229get_new_event(struct task_desc *task, u64 timestamp)
ec156764 230{
39aeb52f 231 struct sched_atom *event = calloc(1, sizeof(*event));
ec156764
IM
232 unsigned long idx = task->nr_events;
233 size_t size;
234
235 event->timestamp = timestamp;
236 event->nr = idx;
237
238 task->nr_events++;
39aeb52f 239 size = sizeof(struct sched_atom *) * task->nr_events;
240 task->atoms = realloc(task->atoms, size);
241 BUG_ON(!task->atoms);
ec156764 242
39aeb52f 243 task->atoms[idx] = event;
ec156764
IM
244
245 return event;
246}
247
39aeb52f 248static struct sched_atom *last_event(struct task_desc *task)
ec156764
IM
249{
250 if (!task->nr_events)
251 return NULL;
252
39aeb52f 253 return task->atoms[task->nr_events - 1];
ec156764
IM
254}
255
256static void
b1ffe8f3 257add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
ec156764 258{
39aeb52f 259 struct sched_atom *event, *curr_event = last_event(task);
ec156764
IM
260
261 /*
fbf94829
IM
262 * optimize an existing RUN event by merging this one
263 * to it:
264 */
ec156764
IM
265 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
266 nr_run_events_optimized++;
267 curr_event->duration += duration;
268 return;
269 }
270
271 event = get_new_event(task, timestamp);
272
273 event->type = SCHED_EVENT_RUN;
274 event->duration = duration;
275
276 nr_run_events++;
277}
278
ec156764 279static void
b1ffe8f3 280add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
ec156764
IM
281 struct task_desc *wakee)
282{
39aeb52f 283 struct sched_atom *event, *wakee_event;
ec156764
IM
284
285 event = get_new_event(task, timestamp);
286 event->type = SCHED_EVENT_WAKEUP;
287 event->wakee = wakee;
288
289 wakee_event = last_event(wakee);
290 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
291 targetless_wakeups++;
292 return;
293 }
294 if (wakee_event->wait_sem) {
295 multitarget_wakeups++;
296 return;
297 }
298
299 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
300 sem_init(wakee_event->wait_sem, 0, 0);
301 wakee_event->specific_wait = 1;
302 event->wait_sem = wakee_event->wait_sem;
303
304 nr_wakeup_events++;
305}
306
307static void
b1ffe8f3 308add_sched_event_sleep(struct task_desc *task, u64 timestamp,
ad236fd2 309 u64 task_state __used)
ec156764 310{
39aeb52f 311 struct sched_atom *event = get_new_event(task, timestamp);
ec156764
IM
312
313 event->type = SCHED_EVENT_SLEEP;
314
315 nr_sleep_events++;
316}
317
318static struct task_desc *register_pid(unsigned long pid, const char *comm)
319{
320 struct task_desc *task;
321
322 BUG_ON(pid >= MAX_PID);
323
324 task = pid_to_task[pid];
325
326 if (task)
327 return task;
328
329 task = calloc(1, sizeof(*task));
330 task->pid = pid;
331 task->nr = nr_tasks;
332 strcpy(task->comm, comm);
333 /*
334 * every task starts in sleeping state - this gets ignored
335 * if there's no wakeup pointing to this sleep state:
336 */
337 add_sched_event_sleep(task, 0, 0);
338
339 pid_to_task[pid] = task;
340 nr_tasks++;
341 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
342 BUG_ON(!tasks);
343 tasks[task->nr] = task;
344
ad236fd2
IM
345 if (verbose)
346 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
ec156764
IM
347
348 return task;
349}
350
351
ec156764
IM
352static void print_task_traces(void)
353{
354 struct task_desc *task;
355 unsigned long i;
356
357 for (i = 0; i < nr_tasks; i++) {
358 task = tasks[i];
ad236fd2 359 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
ec156764
IM
360 task->nr, task->comm, task->pid, task->nr_events);
361 }
362}
363
364static void add_cross_task_wakeups(void)
365{
366 struct task_desc *task1, *task2;
367 unsigned long i, j;
368
369 for (i = 0; i < nr_tasks; i++) {
370 task1 = tasks[i];
371 j = i + 1;
372 if (j == nr_tasks)
373 j = 0;
374 task2 = tasks[j];
375 add_sched_event_wakeup(task1, 0, task2);
376 }
377}
378
379static void
39aeb52f 380process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
ec156764
IM
381{
382 int ret = 0;
b1ffe8f3 383 u64 now;
ec156764
IM
384 long long delta;
385
386 now = get_nsecs();
39aeb52f 387 delta = start_time + atom->timestamp - now;
ec156764 388
39aeb52f 389 switch (atom->type) {
ec156764 390 case SCHED_EVENT_RUN:
39aeb52f 391 burn_nsecs(atom->duration);
ec156764
IM
392 break;
393 case SCHED_EVENT_SLEEP:
39aeb52f 394 if (atom->wait_sem)
395 ret = sem_wait(atom->wait_sem);
ec156764
IM
396 BUG_ON(ret);
397 break;
398 case SCHED_EVENT_WAKEUP:
39aeb52f 399 if (atom->wait_sem)
400 ret = sem_post(atom->wait_sem);
ec156764
IM
401 BUG_ON(ret);
402 break;
403 default:
404 BUG_ON(1);
405 }
406}
407
b1ffe8f3 408static u64 get_cpu_usage_nsec_parent(void)
ec156764
IM
409{
410 struct rusage ru;
b1ffe8f3 411 u64 sum;
ec156764
IM
412 int err;
413
414 err = getrusage(RUSAGE_SELF, &ru);
415 BUG_ON(err);
416
417 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
418 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
419
420 return sum;
421}
422
b1ffe8f3 423static u64 get_cpu_usage_nsec_self(void)
ec156764
IM
424{
425 char filename [] = "/proc/1234567890/sched";
426 unsigned long msecs, nsecs;
427 char *line = NULL;
b1ffe8f3 428 u64 total = 0;
ec156764
IM
429 size_t len = 0;
430 ssize_t chars;
431 FILE *file;
432 int ret;
433
434 sprintf(filename, "/proc/%d/sched", getpid());
435 file = fopen(filename, "r");
436 BUG_ON(!file);
437
438 while ((chars = getline(&line, &len, file)) != -1) {
ec156764
IM
439 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
440 &msecs, &nsecs);
441 if (ret == 2) {
442 total = msecs*1e6 + nsecs;
ec156764
IM
443 break;
444 }
445 }
446 if (line)
447 free(line);
448 fclose(file);
449
450 return total;
451}
452
453static void *thread_func(void *ctx)
454{
455 struct task_desc *this_task = ctx;
b1ffe8f3 456 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
457 unsigned long i, ret;
458 char comm2[22];
459
ec156764
IM
460 sprintf(comm2, ":%s", this_task->comm);
461 prctl(PR_SET_NAME, comm2);
462
463again:
464 ret = sem_post(&this_task->ready_for_work);
465 BUG_ON(ret);
ec156764
IM
466 ret = pthread_mutex_lock(&start_work_mutex);
467 BUG_ON(ret);
468 ret = pthread_mutex_unlock(&start_work_mutex);
469 BUG_ON(ret);
ec156764
IM
470
471 cpu_usage_0 = get_cpu_usage_nsec_self();
472
473 for (i = 0; i < this_task->nr_events; i++) {
474 this_task->curr_event = i;
39aeb52f 475 process_sched_event(this_task, this_task->atoms[i]);
ec156764
IM
476 }
477
478 cpu_usage_1 = get_cpu_usage_nsec_self();
479 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
480
ec156764
IM
481 ret = sem_post(&this_task->work_done_sem);
482 BUG_ON(ret);
ec156764
IM
483
484 ret = pthread_mutex_lock(&work_done_wait_mutex);
485 BUG_ON(ret);
486 ret = pthread_mutex_unlock(&work_done_wait_mutex);
487 BUG_ON(ret);
ec156764
IM
488
489 goto again;
490}
491
492static void create_tasks(void)
493{
494 struct task_desc *task;
495 pthread_attr_t attr;
496 unsigned long i;
497 int err;
498
499 err = pthread_attr_init(&attr);
500 BUG_ON(err);
501 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
502 BUG_ON(err);
503 err = pthread_mutex_lock(&start_work_mutex);
504 BUG_ON(err);
505 err = pthread_mutex_lock(&work_done_wait_mutex);
506 BUG_ON(err);
507 for (i = 0; i < nr_tasks; i++) {
508 task = tasks[i];
509 sem_init(&task->sleep_sem, 0, 0);
510 sem_init(&task->ready_for_work, 0, 0);
511 sem_init(&task->work_done_sem, 0, 0);
512 task->curr_event = 0;
513 err = pthread_create(&task->thread, &attr, thread_func, task);
514 BUG_ON(err);
515 }
516}
517
ec156764
IM
518static void wait_for_tasks(void)
519{
b1ffe8f3 520 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
521 struct task_desc *task;
522 unsigned long i, ret;
523
ec156764 524 start_time = get_nsecs();
ec156764
IM
525 cpu_usage = 0;
526 pthread_mutex_unlock(&work_done_wait_mutex);
527
528 for (i = 0; i < nr_tasks; i++) {
529 task = tasks[i];
530 ret = sem_wait(&task->ready_for_work);
531 BUG_ON(ret);
532 sem_init(&task->ready_for_work, 0, 0);
533 }
534 ret = pthread_mutex_lock(&work_done_wait_mutex);
535 BUG_ON(ret);
536
537 cpu_usage_0 = get_cpu_usage_nsec_parent();
538
539 pthread_mutex_unlock(&start_work_mutex);
540
ec156764
IM
541 for (i = 0; i < nr_tasks; i++) {
542 task = tasks[i];
543 ret = sem_wait(&task->work_done_sem);
544 BUG_ON(ret);
545 sem_init(&task->work_done_sem, 0, 0);
546 cpu_usage += task->cpu_usage;
547 task->cpu_usage = 0;
548 }
549
550 cpu_usage_1 = get_cpu_usage_nsec_parent();
551 if (!runavg_cpu_usage)
552 runavg_cpu_usage = cpu_usage;
553 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
554
555 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
556 if (!runavg_parent_cpu_usage)
557 runavg_parent_cpu_usage = parent_cpu_usage;
558 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
559 parent_cpu_usage)/10;
560
561 ret = pthread_mutex_lock(&start_work_mutex);
562 BUG_ON(ret);
563
564 for (i = 0; i < nr_tasks; i++) {
565 task = tasks[i];
566 sem_init(&task->sleep_sem, 0, 0);
567 task->curr_event = 0;
568 }
569}
570
ec156764
IM
571static void run_one_test(void)
572{
b1ffe8f3 573 u64 T0, T1, delta, avg_delta, fluct, std_dev;
ec156764
IM
574
575 T0 = get_nsecs();
576 wait_for_tasks();
577 T1 = get_nsecs();
578
579 delta = T1 - T0;
580 sum_runtime += delta;
581 nr_runs++;
582
583 avg_delta = sum_runtime / nr_runs;
584 if (delta < avg_delta)
585 fluct = avg_delta - delta;
586 else
587 fluct = delta - avg_delta;
588 sum_fluct += fluct;
589 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
590 if (!run_avg)
591 run_avg = delta;
592 run_avg = (run_avg*9 + delta)/10;
593
ad236fd2 594 printf("#%-3ld: %0.3f, ",
ec156764
IM
595 nr_runs, (double)delta/1000000.0);
596
ad236fd2 597 printf("ravg: %0.2f, ",
ec156764
IM
598 (double)run_avg/1e6);
599
ad236fd2 600 printf("cpu: %0.2f / %0.2f",
ec156764
IM
601 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
602
603#if 0
604 /*
fbf94829
IM
605 * rusage statistics done by the parent, these are less
606 * accurate than the sum_exec_runtime based statistics:
607 */
ad236fd2 608 printf(" [%0.2f / %0.2f]",
ec156764
IM
609 (double)parent_cpu_usage/1e6,
610 (double)runavg_parent_cpu_usage/1e6);
611#endif
612
ad236fd2 613 printf("\n");
ec156764
IM
614
615 if (nr_sleep_corrections)
ad236fd2 616 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
ec156764
IM
617 nr_sleep_corrections = 0;
618}
619
620static void test_calibrations(void)
621{
b1ffe8f3 622 u64 T0, T1;
ec156764
IM
623
624 T0 = get_nsecs();
625 burn_nsecs(1e6);
626 T1 = get_nsecs();
627
ad236fd2 628 printf("the run test took %Ld nsecs\n", T1-T0);
ec156764
IM
629
630 T0 = get_nsecs();
631 sleep_nsecs(1e6);
632 T1 = get_nsecs();
633
ad236fd2 634 printf("the sleep test took %Ld nsecs\n", T1-T0);
ec156764
IM
635}
636
46f392c9
IM
637static void __cmd_replay(void)
638{
f2858d8a 639 unsigned long i;
46f392c9
IM
640
641 calibrate_run_measurement_overhead();
642 calibrate_sleep_measurement_overhead();
643
644 test_calibrations();
645
646 read_events();
647
648 printf("nr_run_events: %ld\n", nr_run_events);
649 printf("nr_sleep_events: %ld\n", nr_sleep_events);
650 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
651
652 if (targetless_wakeups)
653 printf("target-less wakeups: %ld\n", targetless_wakeups);
654 if (multitarget_wakeups)
655 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
656 if (nr_run_events_optimized)
39aeb52f 657 printf("run atoms optimized: %ld\n",
46f392c9
IM
658 nr_run_events_optimized);
659
660 print_task_traces();
661 add_cross_task_wakeups();
662
663 create_tasks();
664 printf("------------------------------------------------------------\n");
f2858d8a 665 for (i = 0; i < replay_repeat; i++)
46f392c9
IM
666 run_one_test();
667}
668
0a02ad93
IM
669static int
670process_comm_event(event_t *event, unsigned long offset, unsigned long head)
671{
672 struct thread *thread;
673
674 thread = threads__findnew(event->comm.pid, &threads, &last_match);
675
dc02bf71 676 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
0a02ad93
IM
677 (void *)(offset + head),
678 (void *)(long)(event->header.size),
679 event->comm.comm, event->comm.pid);
680
681 if (thread == NULL ||
682 thread__set_comm(thread, event->comm.comm)) {
dc02bf71 683 dump_printf("problem processing perf_event_comm, skipping event.\n");
0a02ad93
IM
684 return -1;
685 }
686 total_comm++;
687
688 return 0;
689}
690
46538818
FW
691
692struct raw_event_sample {
693 u32 size;
694 char data[0];
695};
696
697#define FILL_FIELD(ptr, field, event, data) \
698 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
699
700#define FILL_ARRAY(ptr, array, event, data) \
701do { \
702 void *__array = raw_field_ptr(event, #array, data); \
703 memcpy(ptr.array, __array, sizeof(ptr.array)); \
704} while(0)
705
706#define FILL_COMMON_FIELDS(ptr, event, data) \
707do { \
708 FILL_FIELD(ptr, common_type, event, data); \
709 FILL_FIELD(ptr, common_flags, event, data); \
710 FILL_FIELD(ptr, common_preempt_count, event, data); \
711 FILL_FIELD(ptr, common_pid, event, data); \
712 FILL_FIELD(ptr, common_tgid, event, data); \
713} while (0)
714
419ab0d6
FW
715
716
717struct trace_switch_event {
718 u32 size;
719
720 u16 common_type;
721 u8 common_flags;
722 u8 common_preempt_count;
723 u32 common_pid;
724 u32 common_tgid;
725
726 char prev_comm[16];
727 u32 prev_pid;
728 u32 prev_prio;
729 u64 prev_state;
730 char next_comm[16];
731 u32 next_pid;
732 u32 next_prio;
733};
734
39aeb52f 735struct trace_runtime_event {
736 u32 size;
737
738 u16 common_type;
739 u8 common_flags;
740 u8 common_preempt_count;
741 u32 common_pid;
742 u32 common_tgid;
743
744 char comm[16];
745 u32 pid;
746 u64 runtime;
747 u64 vruntime;
748};
419ab0d6 749
fbf94829
IM
750struct trace_wakeup_event {
751 u32 size;
752
753 u16 common_type;
754 u8 common_flags;
755 u8 common_preempt_count;
756 u32 common_pid;
757 u32 common_tgid;
758
759 char comm[16];
760 u32 pid;
761
762 u32 prio;
763 u32 success;
764 u32 cpu;
765};
766
419ab0d6
FW
767struct trace_fork_event {
768 u32 size;
46538818 769
419ab0d6
FW
770 u16 common_type;
771 u8 common_flags;
772 u8 common_preempt_count;
773 u32 common_pid;
774 u32 common_tgid;
775
776 char parent_comm[16];
777 u32 parent_pid;
778 char child_comm[16];
779 u32 child_pid;
780};
781
782struct trace_sched_handler {
783 void (*switch_event)(struct trace_switch_event *,
784 struct event *,
785 int cpu,
786 u64 timestamp,
787 struct thread *thread);
788
39aeb52f 789 void (*runtime_event)(struct trace_runtime_event *,
790 struct event *,
791 int cpu,
792 u64 timestamp,
793 struct thread *thread);
794
419ab0d6
FW
795 void (*wakeup_event)(struct trace_wakeup_event *,
796 struct event *,
797 int cpu,
798 u64 timestamp,
799 struct thread *thread);
800
801 void (*fork_event)(struct trace_fork_event *,
802 struct event *,
803 int cpu,
804 u64 timestamp,
805 struct thread *thread);
806};
46538818 807
46538818 808
419ab0d6
FW
809static void
810replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
811 struct event *event,
812 int cpu __used,
813 u64 timestamp __used,
814 struct thread *thread __used)
815{
816 struct task_desc *waker, *wakee;
fbf94829 817
ad236fd2
IM
818 if (verbose) {
819 printf("sched_wakeup event %p\n", event);
fbf94829 820
ad236fd2 821 printf(" ... pid %d woke up %s/%d\n",
419ab0d6
FW
822 wakeup_event->common_pid,
823 wakeup_event->comm,
824 wakeup_event->pid);
ad236fd2 825 }
fbf94829 826
419ab0d6
FW
827 waker = register_pid(wakeup_event->common_pid, "<unknown>");
828 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
fbf94829
IM
829
830 add_sched_event_wakeup(waker, timestamp, wakee);
ec156764
IM
831}
832
d1153389 833static u64 cpu_last_switched[MAX_CPUS];
fbf94829
IM
834
835static void
419ab0d6
FW
836replay_switch_event(struct trace_switch_event *switch_event,
837 struct event *event,
838 int cpu,
839 u64 timestamp,
840 struct thread *thread __used)
ec156764 841{
fbf94829
IM
842 struct task_desc *prev, *next;
843 u64 timestamp0;
844 s64 delta;
845
ad236fd2
IM
846 if (verbose)
847 printf("sched_switch event %p\n", event);
848
fbf94829
IM
849 if (cpu >= MAX_CPUS || cpu < 0)
850 return;
851
852 timestamp0 = cpu_last_switched[cpu];
853 if (timestamp0)
854 delta = timestamp - timestamp0;
855 else
856 delta = 0;
857
858 if (delta < 0)
859 die("hm, delta: %Ld < 0 ?\n", delta);
860
ad236fd2
IM
861 if (verbose) {
862 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
419ab0d6
FW
863 switch_event->prev_comm, switch_event->prev_pid,
864 switch_event->next_comm, switch_event->next_pid,
ad236fd2
IM
865 delta);
866 }
fbf94829 867
419ab0d6
FW
868 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
869 next = register_pid(switch_event->next_pid, switch_event->next_comm);
fbf94829
IM
870
871 cpu_last_switched[cpu] = timestamp;
872
873 add_sched_event_run(prev, timestamp, delta);
419ab0d6 874 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
fbf94829
IM
875}
876
fbf94829 877
419ab0d6
FW
878static void
879replay_fork_event(struct trace_fork_event *fork_event,
880 struct event *event,
881 int cpu __used,
882 u64 timestamp __used,
883 struct thread *thread __used)
884{
885 if (verbose) {
886 printf("sched_fork event %p\n", event);
887 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
888 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
889 }
890 register_pid(fork_event->parent_pid, fork_event->parent_comm);
891 register_pid(fork_event->child_pid, fork_event->child_comm);
892}
fbf94829 893
419ab0d6 894static struct trace_sched_handler replay_ops = {
ea92ed5a
IM
895 .wakeup_event = replay_wakeup_event,
896 .switch_event = replay_switch_event,
897 .fork_event = replay_fork_event,
fbf94829
IM
898};
899
b1ffe8f3
IM
900struct sort_dimension {
901 const char *name;
b5fae128 902 sort_fn_t cmp;
b1ffe8f3
IM
903 struct list_head list;
904};
905
906static LIST_HEAD(cmp_pid);
907
daa1d7a5 908static int
39aeb52f 909thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
910{
911 struct sort_dimension *sort;
912 int ret = 0;
913
b5fae128
IM
914 BUG_ON(list_empty(list));
915
daa1d7a5
FW
916 list_for_each_entry(sort, list, list) {
917 ret = sort->cmp(l, r);
918 if (ret)
919 return ret;
920 }
921
922 return ret;
923}
924
39aeb52f 925static struct work_atoms *
b5fae128
IM
926thread_atoms_search(struct rb_root *root, struct thread *thread,
927 struct list_head *sort_list)
928{
929 struct rb_node *node = root->rb_node;
39aeb52f 930 struct work_atoms key = { .thread = thread };
b5fae128
IM
931
932 while (node) {
39aeb52f 933 struct work_atoms *atoms;
b5fae128
IM
934 int cmp;
935
39aeb52f 936 atoms = container_of(node, struct work_atoms, node);
b5fae128
IM
937
938 cmp = thread_lat_cmp(sort_list, &key, atoms);
939 if (cmp > 0)
940 node = node->rb_left;
941 else if (cmp < 0)
942 node = node->rb_right;
943 else {
944 BUG_ON(thread != atoms->thread);
945 return atoms;
946 }
947 }
948 return NULL;
949}
950
cdce9d73 951static void
39aeb52f 952__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
daa1d7a5 953 struct list_head *sort_list)
cdce9d73
FW
954{
955 struct rb_node **new = &(root->rb_node), *parent = NULL;
956
957 while (*new) {
39aeb52f 958 struct work_atoms *this;
daa1d7a5 959 int cmp;
cdce9d73 960
39aeb52f 961 this = container_of(*new, struct work_atoms, node);
cdce9d73 962 parent = *new;
daa1d7a5
FW
963
964 cmp = thread_lat_cmp(sort_list, data, this);
965
966 if (cmp > 0)
cdce9d73 967 new = &((*new)->rb_left);
cdce9d73 968 else
daa1d7a5 969 new = &((*new)->rb_right);
cdce9d73
FW
970 }
971
972 rb_link_node(&data->node, parent, new);
973 rb_insert_color(&data->node, root);
974}
975
b1ffe8f3 976static void thread_atoms_insert(struct thread *thread)
cdce9d73 977{
39aeb52f 978 struct work_atoms *atoms;
b1ffe8f3 979
17562205
FW
980 atoms = calloc(sizeof(*atoms), 1);
981 if (!atoms)
cdce9d73
FW
982 die("No memory");
983
17562205 984 atoms->thread = thread;
39aeb52f 985 INIT_LIST_HEAD(&atoms->work_list);
b1ffe8f3 986 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
cdce9d73
FW
987}
988
989static void
990latency_fork_event(struct trace_fork_event *fork_event __used,
991 struct event *event __used,
992 int cpu __used,
993 u64 timestamp __used,
994 struct thread *thread __used)
995{
996 /* should insert the newcomer */
997}
998
ea92ed5a 999__used
cdce9d73
FW
1000static char sched_out_state(struct trace_switch_event *switch_event)
1001{
1002 const char *str = TASK_STATE_TO_CHAR_STR;
1003
1004 return str[switch_event->prev_state];
1005}
1006
1007static void
39aeb52f 1008add_sched_out_event(struct work_atoms *atoms,
1009 char run_state,
1010 u64 timestamp)
cdce9d73 1011{
b1ffe8f3 1012 struct work_atom *atom;
cdce9d73 1013
b1ffe8f3
IM
1014 atom = calloc(sizeof(*atom), 1);
1015 if (!atom)
cdce9d73
FW
1016 die("Non memory");
1017
aa1ab9d2
FW
1018 atom->sched_out_time = timestamp;
1019
39aeb52f 1020 if (run_state == 'R') {
b1ffe8f3 1021 atom->state = THREAD_WAIT_CPU;
aa1ab9d2 1022 atom->wake_up_time = atom->sched_out_time;
c6ced611
FW
1023 }
1024
39aeb52f 1025 list_add_tail(&atom->list, &atoms->work_list);
cdce9d73
FW
1026}
1027
1028static void
39aeb52f 1029add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
1030{
1031 struct work_atom *atom;
1032
1033 BUG_ON(list_empty(&atoms->work_list));
1034
1035 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1036
1037 atom->runtime += delta;
1038 atoms->total_runtime += delta;
1039}
1040
1041static void
1042add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
cdce9d73 1043{
b1ffe8f3 1044 struct work_atom *atom;
66685678 1045 u64 delta;
cdce9d73 1046
39aeb52f 1047 if (list_empty(&atoms->work_list))
cdce9d73
FW
1048 return;
1049
39aeb52f 1050 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1051
b1ffe8f3 1052 if (atom->state != THREAD_WAIT_CPU)
cdce9d73
FW
1053 return;
1054
b1ffe8f3
IM
1055 if (timestamp < atom->wake_up_time) {
1056 atom->state = THREAD_IGNORE;
cdce9d73
FW
1057 return;
1058 }
1059
b1ffe8f3
IM
1060 atom->state = THREAD_SCHED_IN;
1061 atom->sched_in_time = timestamp;
66685678 1062
b1ffe8f3 1063 delta = atom->sched_in_time - atom->wake_up_time;
66685678
FW
1064 atoms->total_lat += delta;
1065 if (delta > atoms->max_lat)
1066 atoms->max_lat = delta;
1067 atoms->nb_atoms++;
cdce9d73
FW
1068}
1069
cdce9d73
FW
1070static void
1071latency_switch_event(struct trace_switch_event *switch_event,
1072 struct event *event __used,
ea92ed5a 1073 int cpu,
cdce9d73
FW
1074 u64 timestamp,
1075 struct thread *thread __used)
1076{
39aeb52f 1077 struct work_atoms *out_events, *in_events;
cdce9d73 1078 struct thread *sched_out, *sched_in;
ea92ed5a
IM
1079 u64 timestamp0;
1080 s64 delta;
1081
39aeb52f 1082 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
ea92ed5a
IM
1083
1084 timestamp0 = cpu_last_switched[cpu];
1085 cpu_last_switched[cpu] = timestamp;
1086 if (timestamp0)
1087 delta = timestamp - timestamp0;
1088 else
1089 delta = 0;
1090
1091 if (delta < 0)
1092 die("hm, delta: %Ld < 0 ?\n", delta);
1093
cdce9d73
FW
1094
1095 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1096 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1097
39aeb52f 1098 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1099 if (!out_events) {
1100 thread_atoms_insert(sched_out);
1101 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1102 if (!out_events)
1103 die("out-event: Internal tree error");
1104 }
1105 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1106
1107 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1108 if (!in_events) {
b1ffe8f3 1109 thread_atoms_insert(sched_in);
39aeb52f 1110 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1111 if (!in_events)
1112 die("in-event: Internal tree error");
1113 /*
1114 * Take came in we have not heard about yet,
1115 * add in an initial atom in runnable state:
1116 */
1117 add_sched_out_event(in_events, 'R', timestamp);
cdce9d73 1118 }
39aeb52f 1119 add_sched_in_event(in_events, timestamp);
1120}
cdce9d73 1121
39aeb52f 1122static void
1123latency_runtime_event(struct trace_runtime_event *runtime_event,
1124 struct event *event __used,
1125 int cpu,
1126 u64 timestamp,
1127 struct thread *this_thread __used)
1128{
1129 struct work_atoms *atoms;
1130 struct thread *thread;
1131
1132 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1133
1134 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1135 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1136 if (!atoms) {
1137 thread_atoms_insert(thread);
1138 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1139 if (!atoms)
1140 die("in-event: Internal tree error");
1141 add_sched_out_event(atoms, 'R', timestamp);
cdce9d73
FW
1142 }
1143
39aeb52f 1144 add_runtime_event(atoms, runtime_event->runtime, timestamp);
cdce9d73
FW
1145}
1146
1147static void
1148latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
39aeb52f 1149 struct event *__event __used,
cdce9d73
FW
1150 int cpu __used,
1151 u64 timestamp,
1152 struct thread *thread __used)
1153{
39aeb52f 1154 struct work_atoms *atoms;
b1ffe8f3 1155 struct work_atom *atom;
cdce9d73
FW
1156 struct thread *wakee;
1157
1158 /* Note for later, it may be interesting to observe the failing cases */
1159 if (!wakeup_event->success)
1160 return;
1161
1162 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
b5fae128 1163 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
17562205 1164 if (!atoms) {
b1ffe8f3 1165 thread_atoms_insert(wakee);
39aeb52f 1166 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1167 if (!atoms)
1168 die("wakeup-event: Internal tree error");
1169 add_sched_out_event(atoms, 'S', timestamp);
cdce9d73
FW
1170 }
1171
39aeb52f 1172 BUG_ON(list_empty(&atoms->work_list));
cdce9d73 1173
39aeb52f 1174 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1175
dc02bf71
IM
1176 if (atom->state != THREAD_SLEEPING)
1177 nr_state_machine_bugs++;
cdce9d73 1178
ea57c4f5
IM
1179 nr_timestamps++;
1180 if (atom->sched_out_time > timestamp) {
dc02bf71 1181 nr_unordered_timestamps++;
aa1ab9d2 1182 return;
ea57c4f5 1183 }
aa1ab9d2 1184
b1ffe8f3
IM
1185 atom->state = THREAD_WAIT_CPU;
1186 atom->wake_up_time = timestamp;
cdce9d73
FW
1187}
1188
1189static struct trace_sched_handler lat_ops = {
ea92ed5a
IM
1190 .wakeup_event = latency_wakeup_event,
1191 .switch_event = latency_switch_event,
39aeb52f 1192 .runtime_event = latency_runtime_event,
ea92ed5a 1193 .fork_event = latency_fork_event,
cdce9d73
FW
1194};
1195
39aeb52f 1196static void output_lat_thread(struct work_atoms *work_list)
cdce9d73 1197{
cdce9d73
FW
1198 int i;
1199 int ret;
66685678 1200 u64 avg;
cdce9d73 1201
39aeb52f 1202 if (!work_list->nb_atoms)
cdce9d73 1203 return;
ea57c4f5
IM
1204 /*
1205 * Ignore idle threads:
1206 */
39aeb52f 1207 if (!work_list->thread->pid)
ea57c4f5 1208 return;
cdce9d73 1209
39aeb52f 1210 all_runtime += work_list->total_runtime;
1211 all_count += work_list->nb_atoms;
66685678 1212
39aeb52f 1213 ret = printf(" %s-%d ", work_list->thread->comm, work_list->thread->pid);
cdce9d73 1214
08f69e6c 1215 for (i = 0; i < 24 - ret; i++)
cdce9d73
FW
1216 printf(" ");
1217
39aeb52f 1218 avg = work_list->total_lat / work_list->nb_atoms;
cdce9d73 1219
dc02bf71 1220 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
39aeb52f 1221 (double)work_list->total_runtime / 1e6,
1222 work_list->nb_atoms, (double)avg / 1e6,
1223 (double)work_list->max_lat / 1e6);
cdce9d73
FW
1224}
1225
39aeb52f 1226static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5 1227{
daa1d7a5
FW
1228 if (l->thread->pid < r->thread->pid)
1229 return -1;
1230 if (l->thread->pid > r->thread->pid)
1231 return 1;
1232
1233 return 0;
1234}
1235
1236static struct sort_dimension pid_sort_dimension = {
b5fae128
IM
1237 .name = "pid",
1238 .cmp = pid_cmp,
daa1d7a5
FW
1239};
1240
39aeb52f 1241static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1242{
1243 u64 avgl, avgr;
1244
1245 if (!l->nb_atoms)
1246 return -1;
1247
1248 if (!r->nb_atoms)
1249 return 1;
1250
1251 avgl = l->total_lat / l->nb_atoms;
1252 avgr = r->total_lat / r->nb_atoms;
1253
1254 if (avgl < avgr)
1255 return -1;
1256 if (avgl > avgr)
1257 return 1;
1258
1259 return 0;
1260}
1261
1262static struct sort_dimension avg_sort_dimension = {
b5fae128
IM
1263 .name = "avg",
1264 .cmp = avg_cmp,
daa1d7a5
FW
1265};
1266
39aeb52f 1267static int max_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1268{
1269 if (l->max_lat < r->max_lat)
1270 return -1;
1271 if (l->max_lat > r->max_lat)
1272 return 1;
1273
1274 return 0;
1275}
1276
1277static struct sort_dimension max_sort_dimension = {
b5fae128
IM
1278 .name = "max",
1279 .cmp = max_cmp,
daa1d7a5
FW
1280};
1281
39aeb52f 1282static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1283{
1284 if (l->nb_atoms < r->nb_atoms)
1285 return -1;
1286 if (l->nb_atoms > r->nb_atoms)
1287 return 1;
1288
1289 return 0;
1290}
1291
1292static struct sort_dimension switch_sort_dimension = {
b5fae128
IM
1293 .name = "switch",
1294 .cmp = switch_cmp,
daa1d7a5
FW
1295};
1296
39aeb52f 1297static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1298{
1299 if (l->total_runtime < r->total_runtime)
1300 return -1;
1301 if (l->total_runtime > r->total_runtime)
1302 return 1;
1303
1304 return 0;
1305}
1306
1307static struct sort_dimension runtime_sort_dimension = {
b5fae128
IM
1308 .name = "runtime",
1309 .cmp = runtime_cmp,
daa1d7a5
FW
1310};
1311
1312static struct sort_dimension *available_sorts[] = {
1313 &pid_sort_dimension,
1314 &avg_sort_dimension,
1315 &max_sort_dimension,
1316 &switch_sort_dimension,
1317 &runtime_sort_dimension,
1318};
1319
1320#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1321
1322static LIST_HEAD(sort_list);
1323
1324static int sort_dimension__add(char *tok, struct list_head *list)
1325{
1326 int i;
1327
1328 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1329 if (!strcmp(available_sorts[i]->name, tok)) {
1330 list_add_tail(&available_sorts[i]->list, list);
1331
1332 return 0;
1333 }
1334 }
1335
1336 return -1;
1337}
1338
1339static void setup_sorting(void);
1340
1341static void sort_lat(void)
1342{
1343 struct rb_node *node;
1344
1345 for (;;) {
39aeb52f 1346 struct work_atoms *data;
b1ffe8f3 1347 node = rb_first(&atom_root);
daa1d7a5
FW
1348 if (!node)
1349 break;
1350
b1ffe8f3 1351 rb_erase(node, &atom_root);
39aeb52f 1352 data = rb_entry(node, struct work_atoms, node);
b1ffe8f3 1353 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
daa1d7a5
FW
1354 }
1355}
1356
46f392c9 1357static void __cmd_lat(void)
cdce9d73
FW
1358{
1359 struct rb_node *next;
1360
46f392c9
IM
1361 setup_pager();
1362 read_events();
daa1d7a5 1363 sort_lat();
46f392c9 1364
dc02bf71
IM
1365 printf("\n -----------------------------------------------------------------------------------------\n");
1366 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1367 printf(" -----------------------------------------------------------------------------------------\n");
cdce9d73 1368
b1ffe8f3 1369 next = rb_first(&sorted_atom_root);
cdce9d73
FW
1370
1371 while (next) {
39aeb52f 1372 struct work_atoms *work_list;
cdce9d73 1373
39aeb52f 1374 work_list = rb_entry(next, struct work_atoms, node);
1375 output_lat_thread(work_list);
cdce9d73
FW
1376 next = rb_next(next);
1377 }
d9340c1d 1378
dc02bf71
IM
1379 printf(" -----------------------------------------------------------------------------------------\n");
1380 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
73622626 1381 (double)all_runtime/1e6, all_count);
ea57c4f5 1382
dc02bf71
IM
1383 printf(" ---------------------------------------------------\n");
1384 if (nr_unordered_timestamps && nr_timestamps) {
1385 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1386 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1387 nr_unordered_timestamps, nr_timestamps);
ea57c4f5 1388 } else {
dc02bf71
IM
1389 }
1390 if (nr_lost_events && nr_events) {
1391 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1392 (double)nr_lost_events/(double)nr_events*100.0,
1393 nr_lost_events, nr_events, nr_lost_chunks);
1394 }
1395 if (nr_state_machine_bugs && nr_timestamps) {
1396 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1397 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1398 nr_state_machine_bugs, nr_timestamps);
1399 if (nr_lost_events)
1400 printf(" (due to lost events?)");
ea57c4f5
IM
1401 printf("\n");
1402 }
c8a37751
IM
1403 if (nr_context_switch_bugs && nr_timestamps) {
1404 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1405 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1406 nr_context_switch_bugs, nr_timestamps);
1407 if (nr_lost_events)
1408 printf(" (due to lost events?)");
1409 printf("\n");
1410 }
dc02bf71 1411 printf("\n");
ea57c4f5 1412
cdce9d73 1413}
419ab0d6
FW
1414
1415static struct trace_sched_handler *trace_handler;
1416
fbf94829 1417static void
419ab0d6
FW
1418process_sched_wakeup_event(struct raw_event_sample *raw,
1419 struct event *event,
1420 int cpu __used,
1421 u64 timestamp __used,
1422 struct thread *thread __used)
1423{
1424 struct trace_wakeup_event wakeup_event;
1425
1426 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1427
1428 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1429 FILL_FIELD(wakeup_event, pid, event, raw->data);
1430 FILL_FIELD(wakeup_event, prio, event, raw->data);
1431 FILL_FIELD(wakeup_event, success, event, raw->data);
1432 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1433
1434 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1435}
1436
c8a37751
IM
1437/*
1438 * Track the current task - that way we can know whether there's any
1439 * weird events, such as a task being switched away that is not current.
1440 */
1441static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1442
419ab0d6
FW
1443static void
1444process_sched_switch_event(struct raw_event_sample *raw,
1445 struct event *event,
c8a37751 1446 int cpu,
419ab0d6
FW
1447 u64 timestamp __used,
1448 struct thread *thread __used)
1449{
1450 struct trace_switch_event switch_event;
1451
1452 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1453
1454 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1455 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1456 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1457 FILL_FIELD(switch_event, prev_state, event, raw->data);
1458 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1459 FILL_FIELD(switch_event, next_pid, event, raw->data);
1460 FILL_FIELD(switch_event, next_prio, event, raw->data);
1461
c8a37751
IM
1462 if (curr_pid[cpu] != (u32)-1) {
1463 /*
1464 * Are we trying to switch away a PID that is
1465 * not current?
1466 */
1467 if (curr_pid[cpu] != switch_event.prev_pid)
1468 nr_context_switch_bugs++;
1469 }
1470 curr_pid[cpu] = switch_event.next_pid;
1471
419ab0d6
FW
1472 trace_handler->switch_event(&switch_event, event, cpu, timestamp, thread);
1473}
1474
39aeb52f 1475static void
1476process_sched_runtime_event(struct raw_event_sample *raw,
1477 struct event *event,
1478 int cpu __used,
1479 u64 timestamp __used,
1480 struct thread *thread __used)
1481{
1482 struct trace_runtime_event runtime_event;
1483
1484 FILL_ARRAY(runtime_event, comm, event, raw->data);
1485 FILL_FIELD(runtime_event, pid, event, raw->data);
1486 FILL_FIELD(runtime_event, runtime, event, raw->data);
1487 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1488
1489 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1490}
1491
419ab0d6
FW
1492static void
1493process_sched_fork_event(struct raw_event_sample *raw,
1494 struct event *event,
1495 int cpu __used,
1496 u64 timestamp __used,
1497 struct thread *thread __used)
fbf94829 1498{
46538818
FW
1499 struct trace_fork_event fork_event;
1500
1501 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1502
1503 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1504 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1505 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1506 FILL_FIELD(fork_event, child_pid, event, raw->data);
1507
419ab0d6 1508 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
fbf94829
IM
1509}
1510
419ab0d6
FW
1511static void
1512process_sched_exit_event(struct event *event,
1513 int cpu __used,
1514 u64 timestamp __used,
1515 struct thread *thread __used)
fbf94829 1516{
ad236fd2
IM
1517 if (verbose)
1518 printf("sched_exit event %p\n", event);
ec156764
IM
1519}
1520
1521static void
ad236fd2 1522process_raw_event(event_t *raw_event __used, void *more_data,
ec156764
IM
1523 int cpu, u64 timestamp, struct thread *thread)
1524{
46538818 1525 struct raw_event_sample *raw = more_data;
ec156764
IM
1526 struct event *event;
1527 int type;
1528
1529 type = trace_parse_common_type(raw->data);
1530 event = trace_find_event(type);
1531
ec156764 1532 if (!strcmp(event->name, "sched_switch"))
46538818 1533 process_sched_switch_event(raw, event, cpu, timestamp, thread);
39aeb52f 1534 if (!strcmp(event->name, "sched_stat_runtime"))
1535 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
ec156764 1536 if (!strcmp(event->name, "sched_wakeup"))
46538818 1537 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
fbf94829 1538 if (!strcmp(event->name, "sched_wakeup_new"))
46538818 1539 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
fbf94829 1540 if (!strcmp(event->name, "sched_process_fork"))
46538818 1541 process_sched_fork_event(raw, event, cpu, timestamp, thread);
fbf94829
IM
1542 if (!strcmp(event->name, "sched_process_exit"))
1543 process_sched_exit_event(event, cpu, timestamp, thread);
ec156764
IM
1544}
1545
0a02ad93
IM
1546static int
1547process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1548{
1549 char level;
1550 int show = 0;
1551 struct dso *dso = NULL;
1552 struct thread *thread;
1553 u64 ip = event->ip.ip;
1554 u64 timestamp = -1;
1555 u32 cpu = -1;
1556 u64 period = 1;
1557 void *more_data = event->ip.__more_data;
1558 int cpumode;
1559
1560 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1561
1562 if (sample_type & PERF_SAMPLE_TIME) {
1563 timestamp = *(u64 *)more_data;
1564 more_data += sizeof(u64);
1565 }
1566
1567 if (sample_type & PERF_SAMPLE_CPU) {
1568 cpu = *(u32 *)more_data;
1569 more_data += sizeof(u32);
1570 more_data += sizeof(u32); /* reserved */
1571 }
1572
1573 if (sample_type & PERF_SAMPLE_PERIOD) {
1574 period = *(u64 *)more_data;
1575 more_data += sizeof(u64);
1576 }
1577
1578 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1579 (void *)(offset + head),
1580 (void *)(long)(event->header.size),
1581 event->header.misc,
1582 event->ip.pid, event->ip.tid,
1583 (void *)(long)ip,
1584 (long long)period);
1585
1586 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1587
1588 if (thread == NULL) {
1589 eprintf("problem processing %d event, skipping it.\n",
1590 event->header.type);
1591 return -1;
1592 }
1593
1594 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
1595
1596 if (cpumode == PERF_EVENT_MISC_KERNEL) {
1597 show = SHOW_KERNEL;
1598 level = 'k';
1599
1600 dso = kernel_dso;
1601
1602 dump_printf(" ...... dso: %s\n", dso->name);
1603
1604 } else if (cpumode == PERF_EVENT_MISC_USER) {
1605
1606 show = SHOW_USER;
1607 level = '.';
1608
1609 } else {
1610 show = SHOW_HV;
1611 level = 'H';
1612
1613 dso = hypervisor_dso;
1614
1615 dump_printf(" ...... dso: [hypervisor]\n");
1616 }
1617
ec156764
IM
1618 if (sample_type & PERF_SAMPLE_RAW)
1619 process_raw_event(event, more_data, cpu, timestamp, thread);
0a02ad93
IM
1620
1621 return 0;
1622}
1623
1624static int
1625process_event(event_t *event, unsigned long offset, unsigned long head)
1626{
1627 trace_event(event);
1628
dc02bf71 1629 nr_events++;
0a02ad93 1630 switch (event->header.type) {
dc02bf71
IM
1631 case PERF_EVENT_MMAP:
1632 return 0;
1633 case PERF_EVENT_LOST:
1634 nr_lost_chunks++;
1635 nr_lost_events += event->lost.lost;
0a02ad93
IM
1636 return 0;
1637
1638 case PERF_EVENT_COMM:
1639 return process_comm_event(event, offset, head);
1640
1641 case PERF_EVENT_EXIT ... PERF_EVENT_READ:
1642 return 0;
1643
1644 case PERF_EVENT_SAMPLE:
1645 return process_sample_event(event, offset, head);
1646
1647 case PERF_EVENT_MAX:
1648 default:
1649 return -1;
1650 }
1651
1652 return 0;
1653}
1654
46f392c9 1655static int read_events(void)
0a02ad93
IM
1656{
1657 int ret, rc = EXIT_FAILURE;
1658 unsigned long offset = 0;
1659 unsigned long head = 0;
1660 struct stat perf_stat;
1661 event_t *event;
1662 uint32_t size;
1663 char *buf;
1664
1665 trace_report();
1666 register_idle_thread(&threads, &last_match);
1667
1668 input = open(input_name, O_RDONLY);
1669 if (input < 0) {
1670 perror("failed to open file");
1671 exit(-1);
1672 }
1673
1674 ret = fstat(input, &perf_stat);
1675 if (ret < 0) {
1676 perror("failed to stat file");
1677 exit(-1);
1678 }
1679
1680 if (!perf_stat.st_size) {
1681 fprintf(stderr, "zero-sized file, nothing to do!\n");
1682 exit(0);
1683 }
1684 header = perf_header__read(input);
1685 head = header->data_offset;
1686 sample_type = perf_header__sample_type(header);
1687
1688 if (!(sample_type & PERF_SAMPLE_RAW))
1689 die("No trace sample to read. Did you call perf record "
1690 "without -R?");
1691
1692 if (load_kernel() < 0) {
1693 perror("failed to load kernel symbols");
1694 return EXIT_FAILURE;
1695 }
1696
1697remap:
1698 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1699 MAP_SHARED, input, offset);
1700 if (buf == MAP_FAILED) {
1701 perror("failed to mmap file");
1702 exit(-1);
1703 }
1704
1705more:
1706 event = (event_t *)(buf + head);
1707
1708 size = event->header.size;
1709 if (!size)
1710 size = 8;
1711
1712 if (head + event->header.size >= page_size * mmap_window) {
1713 unsigned long shift = page_size * (head / page_size);
1714 int res;
1715
1716 res = munmap(buf, page_size * mmap_window);
1717 assert(res == 0);
1718
1719 offset += shift;
1720 head -= shift;
1721 goto remap;
1722 }
1723
1724 size = event->header.size;
1725
1726
1727 if (!size || process_event(event, offset, head) < 0) {
1728
1729 /*
1730 * assume we lost track of the stream, check alignment, and
1731 * increment a single u64 in the hope to catch on again 'soon'.
1732 */
1733
1734 if (unlikely(head & 7))
1735 head &= ~7ULL;
1736
1737 size = 8;
1738 }
1739
1740 head += size;
1741
1742 if (offset + head < (unsigned long)perf_stat.st_size)
1743 goto more;
1744
1745 rc = EXIT_SUCCESS;
1746 close(input);
1747
1748 return rc;
1749}
1750
46f392c9 1751static const char * const sched_usage[] = {
c13f0d3c 1752 "perf sched [<options>] {record|latency|replay|trace}",
0a02ad93
IM
1753 NULL
1754};
1755
f2858d8a
IM
1756static const struct option sched_options[] = {
1757 OPT_BOOLEAN('v', "verbose", &verbose,
1758 "be more verbose (show symbol address, etc)"),
0a02ad93
IM
1759 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1760 "dump raw trace in ASCII"),
f2858d8a
IM
1761 OPT_END()
1762};
1763
1764static const char * const latency_usage[] = {
1765 "perf sched latency [<options>]",
1766 NULL
1767};
1768
1769static const struct option latency_options[] = {
daa1d7a5
FW
1770 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1771 "sort by key(s): runtime, switch, avg, max"),
0a02ad93
IM
1772 OPT_BOOLEAN('v', "verbose", &verbose,
1773 "be more verbose (show symbol address, etc)"),
f2858d8a
IM
1774 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1775 "dump raw trace in ASCII"),
1776 OPT_END()
1777};
1778
1779static const char * const replay_usage[] = {
1780 "perf sched replay [<options>]",
1781 NULL
1782};
1783
1784static const struct option replay_options[] = {
1785 OPT_INTEGER('r', "repeat", &replay_repeat,
1786 "repeat the workload replay N times (-1: infinite)"),
1787 OPT_BOOLEAN('v', "verbose", &verbose,
1788 "be more verbose (show symbol address, etc)"),
1789 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1790 "dump raw trace in ASCII"),
0a02ad93
IM
1791 OPT_END()
1792};
1793
daa1d7a5
FW
1794static void setup_sorting(void)
1795{
1796 char *tmp, *tok, *str = strdup(sort_order);
1797
1798 for (tok = strtok_r(str, ", ", &tmp);
1799 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1800 if (sort_dimension__add(tok, &sort_list) < 0) {
1801 error("Unknown --sort key: `%s'", tok);
f2858d8a 1802 usage_with_options(latency_usage, latency_options);
daa1d7a5
FW
1803 }
1804 }
1805
1806 free(str);
1807
1808 sort_dimension__add((char *)"pid", &cmp_pid);
1809}
1810
1fc35b29
IM
1811static const char *record_args[] = {
1812 "record",
1813 "-a",
1814 "-R",
d1302522 1815 "-M",
ea57c4f5 1816 "-f",
dc02bf71 1817 "-m", "1024",
1fc35b29
IM
1818 "-c", "1",
1819 "-e", "sched:sched_switch:r",
1820 "-e", "sched:sched_stat_wait:r",
1821 "-e", "sched:sched_stat_sleep:r",
1822 "-e", "sched:sched_stat_iowait:r",
ea57c4f5 1823 "-e", "sched:sched_stat_runtime:r",
1fc35b29
IM
1824 "-e", "sched:sched_process_exit:r",
1825 "-e", "sched:sched_process_fork:r",
1826 "-e", "sched:sched_wakeup:r",
1827 "-e", "sched:sched_migrate_task:r",
1828};
1829
1830static int __cmd_record(int argc, const char **argv)
1831{
1832 unsigned int rec_argc, i, j;
1833 const char **rec_argv;
1834
1835 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1836 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1837
1838 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1839 rec_argv[i] = strdup(record_args[i]);
1840
1841 for (j = 1; j < (unsigned int)argc; j++, i++)
1842 rec_argv[i] = argv[j];
1843
1844 BUG_ON(i != rec_argc);
1845
1846 return cmd_record(i, rec_argv, NULL);
1847}
1848
0a02ad93
IM
1849int cmd_sched(int argc, const char **argv, const char *prefix __used)
1850{
1851 symbol__init();
1852 page_size = getpagesize();
1853
f2858d8a
IM
1854 argc = parse_options(argc, argv, sched_options, sched_usage,
1855 PARSE_OPT_STOP_AT_NON_OPTION);
1856 if (!argc)
1857 usage_with_options(sched_usage, sched_options);
0a02ad93 1858
1fc35b29
IM
1859 if (!strncmp(argv[0], "rec", 3)) {
1860 return __cmd_record(argc, argv);
1861 } else if (!strncmp(argv[0], "lat", 3)) {
cdce9d73 1862 trace_handler = &lat_ops;
f2858d8a
IM
1863 if (argc > 1) {
1864 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1865 if (argc)
1866 usage_with_options(latency_usage, latency_options);
f2858d8a 1867 }
b5fae128 1868 setup_sorting();
46f392c9 1869 __cmd_lat();
f2858d8a
IM
1870 } else if (!strncmp(argv[0], "rep", 3)) {
1871 trace_handler = &replay_ops;
1872 if (argc) {
1873 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1874 if (argc)
1875 usage_with_options(replay_usage, replay_options);
1876 }
1877 __cmd_replay();
c13f0d3c
IM
1878 } else if (!strcmp(argv[0], "trace")) {
1879 /*
1880 * Aliased to 'perf trace' for now:
1881 */
1882 return cmd_trace(argc, argv, prefix);
f2858d8a
IM
1883 } else {
1884 usage_with_options(sched_usage, sched_options);
1885 }
1886
ec156764 1887 return 0;
0a02ad93 1888}