]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - tools/perf/builtin-sched.c
perf tools: Merge trace.info content into perf.data
[mirror_ubuntu-bionic-kernel.git] / tools / perf / builtin-sched.c
CommitLineData
0a02ad93 1#include "builtin.h"
b1ffe8f3 2#include "perf.h"
0a02ad93
IM
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
b1ffe8f3 11#include "util/trace-event.h"
0a02ad93 12
0a02ad93
IM
13#include "util/debug.h"
14
ec156764 15#include <sys/types.h>
b1ffe8f3 16#include <sys/prctl.h>
0a02ad93 17
b1ffe8f3
IM
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
419ab0d6 21
ec156764
IM
22static char const *input_name = "perf.data";
23static int input;
24static unsigned long page_size;
25static unsigned long mmap_window = 32;
0a02ad93 26
ec156764 27static unsigned long total_comm = 0;
0a02ad93 28
ec156764
IM
29static struct rb_root threads;
30static struct thread *last_match;
0a02ad93 31
ec156764
IM
32static struct perf_header *header;
33static u64 sample_type;
0a02ad93 34
daa1d7a5
FW
35static char default_sort_order[] = "avg, max, switch, runtime";
36static char *sort_order = default_sort_order;
37
b1ffe8f3
IM
38#define PR_SET_NAME 15 /* Set process name */
39#define MAX_CPUS 4096
0a02ad93 40
b1ffe8f3 41#define BUG_ON(x) assert(!(x))
ec156764 42
b1ffe8f3
IM
43static u64 run_measurement_overhead;
44static u64 sleep_measurement_overhead;
ec156764 45
b1ffe8f3
IM
46#define COMM_LEN 20
47#define SYM_LEN 129
ec156764 48
b1ffe8f3 49#define MAX_PID 65536
ec156764 50
b1ffe8f3 51static unsigned long nr_tasks;
ec156764 52
39aeb52f 53struct sched_atom;
ec156764 54
b1ffe8f3
IM
55struct task_desc {
56 unsigned long nr;
57 unsigned long pid;
58 char comm[COMM_LEN];
ec156764 59
b1ffe8f3
IM
60 unsigned long nr_events;
61 unsigned long curr_event;
39aeb52f 62 struct sched_atom **atoms;
b1ffe8f3
IM
63
64 pthread_t thread;
65 sem_t sleep_sem;
ec156764 66
b1ffe8f3
IM
67 sem_t ready_for_work;
68 sem_t work_done_sem;
69
70 u64 cpu_usage;
71};
72
73enum sched_event_type {
74 SCHED_EVENT_RUN,
75 SCHED_EVENT_SLEEP,
76 SCHED_EVENT_WAKEUP,
77};
78
39aeb52f 79struct sched_atom {
b1ffe8f3
IM
80 enum sched_event_type type;
81 u64 timestamp;
82 u64 duration;
83 unsigned long nr;
84 int specific_wait;
85 sem_t *wait_sem;
86 struct task_desc *wakee;
87};
88
89static struct task_desc *pid_to_task[MAX_PID];
90
91static struct task_desc **tasks;
92
93static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
94static u64 start_time;
95
96static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
ec156764 97
b1ffe8f3
IM
98static unsigned long nr_run_events;
99static unsigned long nr_sleep_events;
100static unsigned long nr_wakeup_events;
101
102static unsigned long nr_sleep_corrections;
103static unsigned long nr_run_events_optimized;
104
105static unsigned long targetless_wakeups;
106static unsigned long multitarget_wakeups;
107
108static u64 cpu_usage;
109static u64 runavg_cpu_usage;
110static u64 parent_cpu_usage;
111static u64 runavg_parent_cpu_usage;
112
113static unsigned long nr_runs;
114static u64 sum_runtime;
115static u64 sum_fluct;
116static u64 run_avg;
117
118static unsigned long replay_repeat = 10;
ea57c4f5 119static unsigned long nr_timestamps;
dc02bf71
IM
120static unsigned long nr_unordered_timestamps;
121static unsigned long nr_state_machine_bugs;
c8a37751 122static unsigned long nr_context_switch_bugs;
dc02bf71
IM
123static unsigned long nr_events;
124static unsigned long nr_lost_chunks;
125static unsigned long nr_lost_events;
b1ffe8f3
IM
126
127#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
128
129enum thread_state {
130 THREAD_SLEEPING = 0,
131 THREAD_WAIT_CPU,
132 THREAD_SCHED_IN,
133 THREAD_IGNORE
134};
135
136struct work_atom {
137 struct list_head list;
138 enum thread_state state;
aa1ab9d2 139 u64 sched_out_time;
b1ffe8f3
IM
140 u64 wake_up_time;
141 u64 sched_in_time;
142 u64 runtime;
143};
144
39aeb52f 145struct work_atoms {
146 struct list_head work_list;
b1ffe8f3
IM
147 struct thread *thread;
148 struct rb_node node;
149 u64 max_lat;
150 u64 total_lat;
151 u64 nb_atoms;
152 u64 total_runtime;
153};
154
39aeb52f 155typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
b1ffe8f3
IM
156
157static struct rb_root atom_root, sorted_atom_root;
158
159static u64 all_runtime;
160static u64 all_count;
161
b1ffe8f3
IM
162
163static u64 get_nsecs(void)
ec156764
IM
164{
165 struct timespec ts;
166
167 clock_gettime(CLOCK_MONOTONIC, &ts);
168
169 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
170}
171
b1ffe8f3 172static void burn_nsecs(u64 nsecs)
ec156764 173{
b1ffe8f3 174 u64 T0 = get_nsecs(), T1;
ec156764
IM
175
176 do {
177 T1 = get_nsecs();
178 } while (T1 + run_measurement_overhead < T0 + nsecs);
179}
180
b1ffe8f3 181static void sleep_nsecs(u64 nsecs)
ec156764
IM
182{
183 struct timespec ts;
184
185 ts.tv_nsec = nsecs % 999999999;
186 ts.tv_sec = nsecs / 999999999;
187
188 nanosleep(&ts, NULL);
189}
190
191static void calibrate_run_measurement_overhead(void)
192{
b1ffe8f3 193 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
194 int i;
195
196 for (i = 0; i < 10; i++) {
197 T0 = get_nsecs();
198 burn_nsecs(0);
199 T1 = get_nsecs();
200 delta = T1-T0;
201 min_delta = min(min_delta, delta);
202 }
203 run_measurement_overhead = min_delta;
204
ad236fd2 205 printf("run measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
206}
207
208static void calibrate_sleep_measurement_overhead(void)
209{
b1ffe8f3 210 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
211 int i;
212
213 for (i = 0; i < 10; i++) {
214 T0 = get_nsecs();
215 sleep_nsecs(10000);
216 T1 = get_nsecs();
217 delta = T1-T0;
218 min_delta = min(min_delta, delta);
219 }
220 min_delta -= 10000;
221 sleep_measurement_overhead = min_delta;
222
ad236fd2 223 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
224}
225
39aeb52f 226static struct sched_atom *
b1ffe8f3 227get_new_event(struct task_desc *task, u64 timestamp)
ec156764 228{
39aeb52f 229 struct sched_atom *event = calloc(1, sizeof(*event));
ec156764
IM
230 unsigned long idx = task->nr_events;
231 size_t size;
232
233 event->timestamp = timestamp;
234 event->nr = idx;
235
236 task->nr_events++;
39aeb52f 237 size = sizeof(struct sched_atom *) * task->nr_events;
238 task->atoms = realloc(task->atoms, size);
239 BUG_ON(!task->atoms);
ec156764 240
39aeb52f 241 task->atoms[idx] = event;
ec156764
IM
242
243 return event;
244}
245
39aeb52f 246static struct sched_atom *last_event(struct task_desc *task)
ec156764
IM
247{
248 if (!task->nr_events)
249 return NULL;
250
39aeb52f 251 return task->atoms[task->nr_events - 1];
ec156764
IM
252}
253
254static void
b1ffe8f3 255add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
ec156764 256{
39aeb52f 257 struct sched_atom *event, *curr_event = last_event(task);
ec156764
IM
258
259 /*
fbf94829
IM
260 * optimize an existing RUN event by merging this one
261 * to it:
262 */
ec156764
IM
263 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
264 nr_run_events_optimized++;
265 curr_event->duration += duration;
266 return;
267 }
268
269 event = get_new_event(task, timestamp);
270
271 event->type = SCHED_EVENT_RUN;
272 event->duration = duration;
273
274 nr_run_events++;
275}
276
ec156764 277static void
b1ffe8f3 278add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
ec156764
IM
279 struct task_desc *wakee)
280{
39aeb52f 281 struct sched_atom *event, *wakee_event;
ec156764
IM
282
283 event = get_new_event(task, timestamp);
284 event->type = SCHED_EVENT_WAKEUP;
285 event->wakee = wakee;
286
287 wakee_event = last_event(wakee);
288 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
289 targetless_wakeups++;
290 return;
291 }
292 if (wakee_event->wait_sem) {
293 multitarget_wakeups++;
294 return;
295 }
296
297 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
298 sem_init(wakee_event->wait_sem, 0, 0);
299 wakee_event->specific_wait = 1;
300 event->wait_sem = wakee_event->wait_sem;
301
302 nr_wakeup_events++;
303}
304
305static void
b1ffe8f3 306add_sched_event_sleep(struct task_desc *task, u64 timestamp,
ad236fd2 307 u64 task_state __used)
ec156764 308{
39aeb52f 309 struct sched_atom *event = get_new_event(task, timestamp);
ec156764
IM
310
311 event->type = SCHED_EVENT_SLEEP;
312
313 nr_sleep_events++;
314}
315
316static struct task_desc *register_pid(unsigned long pid, const char *comm)
317{
318 struct task_desc *task;
319
320 BUG_ON(pid >= MAX_PID);
321
322 task = pid_to_task[pid];
323
324 if (task)
325 return task;
326
327 task = calloc(1, sizeof(*task));
328 task->pid = pid;
329 task->nr = nr_tasks;
330 strcpy(task->comm, comm);
331 /*
332 * every task starts in sleeping state - this gets ignored
333 * if there's no wakeup pointing to this sleep state:
334 */
335 add_sched_event_sleep(task, 0, 0);
336
337 pid_to_task[pid] = task;
338 nr_tasks++;
339 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
340 BUG_ON(!tasks);
341 tasks[task->nr] = task;
342
ad236fd2
IM
343 if (verbose)
344 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
ec156764
IM
345
346 return task;
347}
348
349
ec156764
IM
350static void print_task_traces(void)
351{
352 struct task_desc *task;
353 unsigned long i;
354
355 for (i = 0; i < nr_tasks; i++) {
356 task = tasks[i];
ad236fd2 357 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
ec156764
IM
358 task->nr, task->comm, task->pid, task->nr_events);
359 }
360}
361
362static void add_cross_task_wakeups(void)
363{
364 struct task_desc *task1, *task2;
365 unsigned long i, j;
366
367 for (i = 0; i < nr_tasks; i++) {
368 task1 = tasks[i];
369 j = i + 1;
370 if (j == nr_tasks)
371 j = 0;
372 task2 = tasks[j];
373 add_sched_event_wakeup(task1, 0, task2);
374 }
375}
376
377static void
39aeb52f 378process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
ec156764
IM
379{
380 int ret = 0;
b1ffe8f3 381 u64 now;
ec156764
IM
382 long long delta;
383
384 now = get_nsecs();
39aeb52f 385 delta = start_time + atom->timestamp - now;
ec156764 386
39aeb52f 387 switch (atom->type) {
ec156764 388 case SCHED_EVENT_RUN:
39aeb52f 389 burn_nsecs(atom->duration);
ec156764
IM
390 break;
391 case SCHED_EVENT_SLEEP:
39aeb52f 392 if (atom->wait_sem)
393 ret = sem_wait(atom->wait_sem);
ec156764
IM
394 BUG_ON(ret);
395 break;
396 case SCHED_EVENT_WAKEUP:
39aeb52f 397 if (atom->wait_sem)
398 ret = sem_post(atom->wait_sem);
ec156764
IM
399 BUG_ON(ret);
400 break;
401 default:
402 BUG_ON(1);
403 }
404}
405
b1ffe8f3 406static u64 get_cpu_usage_nsec_parent(void)
ec156764
IM
407{
408 struct rusage ru;
b1ffe8f3 409 u64 sum;
ec156764
IM
410 int err;
411
412 err = getrusage(RUSAGE_SELF, &ru);
413 BUG_ON(err);
414
415 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
416 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
417
418 return sum;
419}
420
b1ffe8f3 421static u64 get_cpu_usage_nsec_self(void)
ec156764
IM
422{
423 char filename [] = "/proc/1234567890/sched";
424 unsigned long msecs, nsecs;
425 char *line = NULL;
b1ffe8f3 426 u64 total = 0;
ec156764
IM
427 size_t len = 0;
428 ssize_t chars;
429 FILE *file;
430 int ret;
431
432 sprintf(filename, "/proc/%d/sched", getpid());
433 file = fopen(filename, "r");
434 BUG_ON(!file);
435
436 while ((chars = getline(&line, &len, file)) != -1) {
ec156764
IM
437 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
438 &msecs, &nsecs);
439 if (ret == 2) {
440 total = msecs*1e6 + nsecs;
ec156764
IM
441 break;
442 }
443 }
444 if (line)
445 free(line);
446 fclose(file);
447
448 return total;
449}
450
451static void *thread_func(void *ctx)
452{
453 struct task_desc *this_task = ctx;
b1ffe8f3 454 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
455 unsigned long i, ret;
456 char comm2[22];
457
ec156764
IM
458 sprintf(comm2, ":%s", this_task->comm);
459 prctl(PR_SET_NAME, comm2);
460
461again:
462 ret = sem_post(&this_task->ready_for_work);
463 BUG_ON(ret);
ec156764
IM
464 ret = pthread_mutex_lock(&start_work_mutex);
465 BUG_ON(ret);
466 ret = pthread_mutex_unlock(&start_work_mutex);
467 BUG_ON(ret);
ec156764
IM
468
469 cpu_usage_0 = get_cpu_usage_nsec_self();
470
471 for (i = 0; i < this_task->nr_events; i++) {
472 this_task->curr_event = i;
39aeb52f 473 process_sched_event(this_task, this_task->atoms[i]);
ec156764
IM
474 }
475
476 cpu_usage_1 = get_cpu_usage_nsec_self();
477 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
478
ec156764
IM
479 ret = sem_post(&this_task->work_done_sem);
480 BUG_ON(ret);
ec156764
IM
481
482 ret = pthread_mutex_lock(&work_done_wait_mutex);
483 BUG_ON(ret);
484 ret = pthread_mutex_unlock(&work_done_wait_mutex);
485 BUG_ON(ret);
ec156764
IM
486
487 goto again;
488}
489
490static void create_tasks(void)
491{
492 struct task_desc *task;
493 pthread_attr_t attr;
494 unsigned long i;
495 int err;
496
497 err = pthread_attr_init(&attr);
498 BUG_ON(err);
499 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
500 BUG_ON(err);
501 err = pthread_mutex_lock(&start_work_mutex);
502 BUG_ON(err);
503 err = pthread_mutex_lock(&work_done_wait_mutex);
504 BUG_ON(err);
505 for (i = 0; i < nr_tasks; i++) {
506 task = tasks[i];
507 sem_init(&task->sleep_sem, 0, 0);
508 sem_init(&task->ready_for_work, 0, 0);
509 sem_init(&task->work_done_sem, 0, 0);
510 task->curr_event = 0;
511 err = pthread_create(&task->thread, &attr, thread_func, task);
512 BUG_ON(err);
513 }
514}
515
ec156764
IM
516static void wait_for_tasks(void)
517{
b1ffe8f3 518 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
519 struct task_desc *task;
520 unsigned long i, ret;
521
ec156764 522 start_time = get_nsecs();
ec156764
IM
523 cpu_usage = 0;
524 pthread_mutex_unlock(&work_done_wait_mutex);
525
526 for (i = 0; i < nr_tasks; i++) {
527 task = tasks[i];
528 ret = sem_wait(&task->ready_for_work);
529 BUG_ON(ret);
530 sem_init(&task->ready_for_work, 0, 0);
531 }
532 ret = pthread_mutex_lock(&work_done_wait_mutex);
533 BUG_ON(ret);
534
535 cpu_usage_0 = get_cpu_usage_nsec_parent();
536
537 pthread_mutex_unlock(&start_work_mutex);
538
ec156764
IM
539 for (i = 0; i < nr_tasks; i++) {
540 task = tasks[i];
541 ret = sem_wait(&task->work_done_sem);
542 BUG_ON(ret);
543 sem_init(&task->work_done_sem, 0, 0);
544 cpu_usage += task->cpu_usage;
545 task->cpu_usage = 0;
546 }
547
548 cpu_usage_1 = get_cpu_usage_nsec_parent();
549 if (!runavg_cpu_usage)
550 runavg_cpu_usage = cpu_usage;
551 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
552
553 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
554 if (!runavg_parent_cpu_usage)
555 runavg_parent_cpu_usage = parent_cpu_usage;
556 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
557 parent_cpu_usage)/10;
558
559 ret = pthread_mutex_lock(&start_work_mutex);
560 BUG_ON(ret);
561
562 for (i = 0; i < nr_tasks; i++) {
563 task = tasks[i];
564 sem_init(&task->sleep_sem, 0, 0);
565 task->curr_event = 0;
566 }
567}
568
ec156764
IM
569static void run_one_test(void)
570{
b1ffe8f3 571 u64 T0, T1, delta, avg_delta, fluct, std_dev;
ec156764
IM
572
573 T0 = get_nsecs();
574 wait_for_tasks();
575 T1 = get_nsecs();
576
577 delta = T1 - T0;
578 sum_runtime += delta;
579 nr_runs++;
580
581 avg_delta = sum_runtime / nr_runs;
582 if (delta < avg_delta)
583 fluct = avg_delta - delta;
584 else
585 fluct = delta - avg_delta;
586 sum_fluct += fluct;
587 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
588 if (!run_avg)
589 run_avg = delta;
590 run_avg = (run_avg*9 + delta)/10;
591
ad236fd2 592 printf("#%-3ld: %0.3f, ",
ec156764
IM
593 nr_runs, (double)delta/1000000.0);
594
ad236fd2 595 printf("ravg: %0.2f, ",
ec156764
IM
596 (double)run_avg/1e6);
597
ad236fd2 598 printf("cpu: %0.2f / %0.2f",
ec156764
IM
599 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
600
601#if 0
602 /*
fbf94829
IM
603 * rusage statistics done by the parent, these are less
604 * accurate than the sum_exec_runtime based statistics:
605 */
ad236fd2 606 printf(" [%0.2f / %0.2f]",
ec156764
IM
607 (double)parent_cpu_usage/1e6,
608 (double)runavg_parent_cpu_usage/1e6);
609#endif
610
ad236fd2 611 printf("\n");
ec156764
IM
612
613 if (nr_sleep_corrections)
ad236fd2 614 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
ec156764
IM
615 nr_sleep_corrections = 0;
616}
617
618static void test_calibrations(void)
619{
b1ffe8f3 620 u64 T0, T1;
ec156764
IM
621
622 T0 = get_nsecs();
623 burn_nsecs(1e6);
624 T1 = get_nsecs();
625
ad236fd2 626 printf("the run test took %Ld nsecs\n", T1-T0);
ec156764
IM
627
628 T0 = get_nsecs();
629 sleep_nsecs(1e6);
630 T1 = get_nsecs();
631
ad236fd2 632 printf("the sleep test took %Ld nsecs\n", T1-T0);
ec156764
IM
633}
634
0a02ad93
IM
635static int
636process_comm_event(event_t *event, unsigned long offset, unsigned long head)
637{
638 struct thread *thread;
639
640 thread = threads__findnew(event->comm.pid, &threads, &last_match);
641
dc02bf71 642 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
0a02ad93
IM
643 (void *)(offset + head),
644 (void *)(long)(event->header.size),
645 event->comm.comm, event->comm.pid);
646
647 if (thread == NULL ||
648 thread__set_comm(thread, event->comm.comm)) {
dc02bf71 649 dump_printf("problem processing perf_event_comm, skipping event.\n");
0a02ad93
IM
650 return -1;
651 }
652 total_comm++;
653
654 return 0;
655}
656
46538818
FW
657
658struct raw_event_sample {
659 u32 size;
660 char data[0];
661};
662
663#define FILL_FIELD(ptr, field, event, data) \
664 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
665
666#define FILL_ARRAY(ptr, array, event, data) \
667do { \
668 void *__array = raw_field_ptr(event, #array, data); \
669 memcpy(ptr.array, __array, sizeof(ptr.array)); \
670} while(0)
671
672#define FILL_COMMON_FIELDS(ptr, event, data) \
673do { \
674 FILL_FIELD(ptr, common_type, event, data); \
675 FILL_FIELD(ptr, common_flags, event, data); \
676 FILL_FIELD(ptr, common_preempt_count, event, data); \
677 FILL_FIELD(ptr, common_pid, event, data); \
678 FILL_FIELD(ptr, common_tgid, event, data); \
679} while (0)
680
419ab0d6
FW
681
682
683struct trace_switch_event {
684 u32 size;
685
686 u16 common_type;
687 u8 common_flags;
688 u8 common_preempt_count;
689 u32 common_pid;
690 u32 common_tgid;
691
692 char prev_comm[16];
693 u32 prev_pid;
694 u32 prev_prio;
695 u64 prev_state;
696 char next_comm[16];
697 u32 next_pid;
698 u32 next_prio;
699};
700
39aeb52f 701struct trace_runtime_event {
702 u32 size;
703
704 u16 common_type;
705 u8 common_flags;
706 u8 common_preempt_count;
707 u32 common_pid;
708 u32 common_tgid;
709
710 char comm[16];
711 u32 pid;
712 u64 runtime;
713 u64 vruntime;
714};
419ab0d6 715
fbf94829
IM
716struct trace_wakeup_event {
717 u32 size;
718
719 u16 common_type;
720 u8 common_flags;
721 u8 common_preempt_count;
722 u32 common_pid;
723 u32 common_tgid;
724
725 char comm[16];
726 u32 pid;
727
728 u32 prio;
729 u32 success;
730 u32 cpu;
731};
732
419ab0d6
FW
733struct trace_fork_event {
734 u32 size;
46538818 735
419ab0d6
FW
736 u16 common_type;
737 u8 common_flags;
738 u8 common_preempt_count;
739 u32 common_pid;
740 u32 common_tgid;
741
742 char parent_comm[16];
743 u32 parent_pid;
744 char child_comm[16];
745 u32 child_pid;
746};
747
748struct trace_sched_handler {
749 void (*switch_event)(struct trace_switch_event *,
750 struct event *,
751 int cpu,
752 u64 timestamp,
753 struct thread *thread);
754
39aeb52f 755 void (*runtime_event)(struct trace_runtime_event *,
756 struct event *,
757 int cpu,
758 u64 timestamp,
759 struct thread *thread);
760
419ab0d6
FW
761 void (*wakeup_event)(struct trace_wakeup_event *,
762 struct event *,
763 int cpu,
764 u64 timestamp,
765 struct thread *thread);
766
767 void (*fork_event)(struct trace_fork_event *,
768 struct event *,
769 int cpu,
770 u64 timestamp,
771 struct thread *thread);
772};
46538818 773
46538818 774
419ab0d6
FW
775static void
776replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
777 struct event *event,
778 int cpu __used,
779 u64 timestamp __used,
780 struct thread *thread __used)
781{
782 struct task_desc *waker, *wakee;
fbf94829 783
ad236fd2
IM
784 if (verbose) {
785 printf("sched_wakeup event %p\n", event);
fbf94829 786
ad236fd2 787 printf(" ... pid %d woke up %s/%d\n",
419ab0d6
FW
788 wakeup_event->common_pid,
789 wakeup_event->comm,
790 wakeup_event->pid);
ad236fd2 791 }
fbf94829 792
419ab0d6
FW
793 waker = register_pid(wakeup_event->common_pid, "<unknown>");
794 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
fbf94829
IM
795
796 add_sched_event_wakeup(waker, timestamp, wakee);
ec156764
IM
797}
798
d1153389 799static u64 cpu_last_switched[MAX_CPUS];
fbf94829
IM
800
801static void
419ab0d6
FW
802replay_switch_event(struct trace_switch_event *switch_event,
803 struct event *event,
804 int cpu,
805 u64 timestamp,
806 struct thread *thread __used)
ec156764 807{
fbf94829
IM
808 struct task_desc *prev, *next;
809 u64 timestamp0;
810 s64 delta;
811
ad236fd2
IM
812 if (verbose)
813 printf("sched_switch event %p\n", event);
814
fbf94829
IM
815 if (cpu >= MAX_CPUS || cpu < 0)
816 return;
817
818 timestamp0 = cpu_last_switched[cpu];
819 if (timestamp0)
820 delta = timestamp - timestamp0;
821 else
822 delta = 0;
823
824 if (delta < 0)
825 die("hm, delta: %Ld < 0 ?\n", delta);
826
ad236fd2
IM
827 if (verbose) {
828 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
419ab0d6
FW
829 switch_event->prev_comm, switch_event->prev_pid,
830 switch_event->next_comm, switch_event->next_pid,
ad236fd2
IM
831 delta);
832 }
fbf94829 833
419ab0d6
FW
834 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
835 next = register_pid(switch_event->next_pid, switch_event->next_comm);
fbf94829
IM
836
837 cpu_last_switched[cpu] = timestamp;
838
839 add_sched_event_run(prev, timestamp, delta);
419ab0d6 840 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
fbf94829
IM
841}
842
fbf94829 843
419ab0d6
FW
844static void
845replay_fork_event(struct trace_fork_event *fork_event,
846 struct event *event,
847 int cpu __used,
848 u64 timestamp __used,
849 struct thread *thread __used)
850{
851 if (verbose) {
852 printf("sched_fork event %p\n", event);
853 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
854 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
855 }
856 register_pid(fork_event->parent_pid, fork_event->parent_comm);
857 register_pid(fork_event->child_pid, fork_event->child_comm);
858}
fbf94829 859
419ab0d6 860static struct trace_sched_handler replay_ops = {
ea92ed5a
IM
861 .wakeup_event = replay_wakeup_event,
862 .switch_event = replay_switch_event,
863 .fork_event = replay_fork_event,
fbf94829
IM
864};
865
b1ffe8f3
IM
866struct sort_dimension {
867 const char *name;
b5fae128 868 sort_fn_t cmp;
b1ffe8f3
IM
869 struct list_head list;
870};
871
872static LIST_HEAD(cmp_pid);
873
daa1d7a5 874static int
39aeb52f 875thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
876{
877 struct sort_dimension *sort;
878 int ret = 0;
879
b5fae128
IM
880 BUG_ON(list_empty(list));
881
daa1d7a5
FW
882 list_for_each_entry(sort, list, list) {
883 ret = sort->cmp(l, r);
884 if (ret)
885 return ret;
886 }
887
888 return ret;
889}
890
39aeb52f 891static struct work_atoms *
b5fae128
IM
892thread_atoms_search(struct rb_root *root, struct thread *thread,
893 struct list_head *sort_list)
894{
895 struct rb_node *node = root->rb_node;
39aeb52f 896 struct work_atoms key = { .thread = thread };
b5fae128
IM
897
898 while (node) {
39aeb52f 899 struct work_atoms *atoms;
b5fae128
IM
900 int cmp;
901
39aeb52f 902 atoms = container_of(node, struct work_atoms, node);
b5fae128
IM
903
904 cmp = thread_lat_cmp(sort_list, &key, atoms);
905 if (cmp > 0)
906 node = node->rb_left;
907 else if (cmp < 0)
908 node = node->rb_right;
909 else {
910 BUG_ON(thread != atoms->thread);
911 return atoms;
912 }
913 }
914 return NULL;
915}
916
cdce9d73 917static void
39aeb52f 918__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
daa1d7a5 919 struct list_head *sort_list)
cdce9d73
FW
920{
921 struct rb_node **new = &(root->rb_node), *parent = NULL;
922
923 while (*new) {
39aeb52f 924 struct work_atoms *this;
daa1d7a5 925 int cmp;
cdce9d73 926
39aeb52f 927 this = container_of(*new, struct work_atoms, node);
cdce9d73 928 parent = *new;
daa1d7a5
FW
929
930 cmp = thread_lat_cmp(sort_list, data, this);
931
932 if (cmp > 0)
cdce9d73 933 new = &((*new)->rb_left);
cdce9d73 934 else
daa1d7a5 935 new = &((*new)->rb_right);
cdce9d73
FW
936 }
937
938 rb_link_node(&data->node, parent, new);
939 rb_insert_color(&data->node, root);
940}
941
b1ffe8f3 942static void thread_atoms_insert(struct thread *thread)
cdce9d73 943{
39aeb52f 944 struct work_atoms *atoms;
b1ffe8f3 945
17562205
FW
946 atoms = calloc(sizeof(*atoms), 1);
947 if (!atoms)
cdce9d73
FW
948 die("No memory");
949
17562205 950 atoms->thread = thread;
39aeb52f 951 INIT_LIST_HEAD(&atoms->work_list);
b1ffe8f3 952 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
cdce9d73
FW
953}
954
955static void
956latency_fork_event(struct trace_fork_event *fork_event __used,
957 struct event *event __used,
958 int cpu __used,
959 u64 timestamp __used,
960 struct thread *thread __used)
961{
962 /* should insert the newcomer */
963}
964
ea92ed5a 965__used
cdce9d73
FW
966static char sched_out_state(struct trace_switch_event *switch_event)
967{
968 const char *str = TASK_STATE_TO_CHAR_STR;
969
970 return str[switch_event->prev_state];
971}
972
973static void
39aeb52f 974add_sched_out_event(struct work_atoms *atoms,
975 char run_state,
976 u64 timestamp)
cdce9d73 977{
b1ffe8f3 978 struct work_atom *atom;
cdce9d73 979
b1ffe8f3
IM
980 atom = calloc(sizeof(*atom), 1);
981 if (!atom)
cdce9d73
FW
982 die("Non memory");
983
aa1ab9d2
FW
984 atom->sched_out_time = timestamp;
985
39aeb52f 986 if (run_state == 'R') {
b1ffe8f3 987 atom->state = THREAD_WAIT_CPU;
aa1ab9d2 988 atom->wake_up_time = atom->sched_out_time;
c6ced611
FW
989 }
990
39aeb52f 991 list_add_tail(&atom->list, &atoms->work_list);
cdce9d73
FW
992}
993
994static void
39aeb52f 995add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
996{
997 struct work_atom *atom;
998
999 BUG_ON(list_empty(&atoms->work_list));
1000
1001 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1002
1003 atom->runtime += delta;
1004 atoms->total_runtime += delta;
1005}
1006
1007static void
1008add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
cdce9d73 1009{
b1ffe8f3 1010 struct work_atom *atom;
66685678 1011 u64 delta;
cdce9d73 1012
39aeb52f 1013 if (list_empty(&atoms->work_list))
cdce9d73
FW
1014 return;
1015
39aeb52f 1016 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1017
b1ffe8f3 1018 if (atom->state != THREAD_WAIT_CPU)
cdce9d73
FW
1019 return;
1020
b1ffe8f3
IM
1021 if (timestamp < atom->wake_up_time) {
1022 atom->state = THREAD_IGNORE;
cdce9d73
FW
1023 return;
1024 }
1025
b1ffe8f3
IM
1026 atom->state = THREAD_SCHED_IN;
1027 atom->sched_in_time = timestamp;
66685678 1028
b1ffe8f3 1029 delta = atom->sched_in_time - atom->wake_up_time;
66685678
FW
1030 atoms->total_lat += delta;
1031 if (delta > atoms->max_lat)
1032 atoms->max_lat = delta;
1033 atoms->nb_atoms++;
cdce9d73
FW
1034}
1035
cdce9d73
FW
1036static void
1037latency_switch_event(struct trace_switch_event *switch_event,
1038 struct event *event __used,
ea92ed5a 1039 int cpu,
cdce9d73
FW
1040 u64 timestamp,
1041 struct thread *thread __used)
1042{
39aeb52f 1043 struct work_atoms *out_events, *in_events;
cdce9d73 1044 struct thread *sched_out, *sched_in;
ea92ed5a
IM
1045 u64 timestamp0;
1046 s64 delta;
1047
39aeb52f 1048 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
ea92ed5a
IM
1049
1050 timestamp0 = cpu_last_switched[cpu];
1051 cpu_last_switched[cpu] = timestamp;
1052 if (timestamp0)
1053 delta = timestamp - timestamp0;
1054 else
1055 delta = 0;
1056
1057 if (delta < 0)
1058 die("hm, delta: %Ld < 0 ?\n", delta);
1059
cdce9d73
FW
1060
1061 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1062 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1063
39aeb52f 1064 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1065 if (!out_events) {
1066 thread_atoms_insert(sched_out);
1067 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1068 if (!out_events)
1069 die("out-event: Internal tree error");
1070 }
1071 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1072
1073 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1074 if (!in_events) {
b1ffe8f3 1075 thread_atoms_insert(sched_in);
39aeb52f 1076 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1077 if (!in_events)
1078 die("in-event: Internal tree error");
1079 /*
1080 * Take came in we have not heard about yet,
1081 * add in an initial atom in runnable state:
1082 */
1083 add_sched_out_event(in_events, 'R', timestamp);
cdce9d73 1084 }
39aeb52f 1085 add_sched_in_event(in_events, timestamp);
1086}
cdce9d73 1087
39aeb52f 1088static void
1089latency_runtime_event(struct trace_runtime_event *runtime_event,
1090 struct event *event __used,
1091 int cpu,
1092 u64 timestamp,
1093 struct thread *this_thread __used)
1094{
1095 struct work_atoms *atoms;
1096 struct thread *thread;
1097
1098 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1099
1100 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1101 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1102 if (!atoms) {
1103 thread_atoms_insert(thread);
1104 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1105 if (!atoms)
1106 die("in-event: Internal tree error");
1107 add_sched_out_event(atoms, 'R', timestamp);
cdce9d73
FW
1108 }
1109
39aeb52f 1110 add_runtime_event(atoms, runtime_event->runtime, timestamp);
cdce9d73
FW
1111}
1112
1113static void
1114latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
39aeb52f 1115 struct event *__event __used,
cdce9d73
FW
1116 int cpu __used,
1117 u64 timestamp,
1118 struct thread *thread __used)
1119{
39aeb52f 1120 struct work_atoms *atoms;
b1ffe8f3 1121 struct work_atom *atom;
cdce9d73
FW
1122 struct thread *wakee;
1123
1124 /* Note for later, it may be interesting to observe the failing cases */
1125 if (!wakeup_event->success)
1126 return;
1127
1128 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
b5fae128 1129 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
17562205 1130 if (!atoms) {
b1ffe8f3 1131 thread_atoms_insert(wakee);
39aeb52f 1132 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1133 if (!atoms)
1134 die("wakeup-event: Internal tree error");
1135 add_sched_out_event(atoms, 'S', timestamp);
cdce9d73
FW
1136 }
1137
39aeb52f 1138 BUG_ON(list_empty(&atoms->work_list));
cdce9d73 1139
39aeb52f 1140 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1141
dc02bf71
IM
1142 if (atom->state != THREAD_SLEEPING)
1143 nr_state_machine_bugs++;
cdce9d73 1144
ea57c4f5
IM
1145 nr_timestamps++;
1146 if (atom->sched_out_time > timestamp) {
dc02bf71 1147 nr_unordered_timestamps++;
aa1ab9d2 1148 return;
ea57c4f5 1149 }
aa1ab9d2 1150
b1ffe8f3
IM
1151 atom->state = THREAD_WAIT_CPU;
1152 atom->wake_up_time = timestamp;
cdce9d73
FW
1153}
1154
1155static struct trace_sched_handler lat_ops = {
ea92ed5a
IM
1156 .wakeup_event = latency_wakeup_event,
1157 .switch_event = latency_switch_event,
39aeb52f 1158 .runtime_event = latency_runtime_event,
ea92ed5a 1159 .fork_event = latency_fork_event,
cdce9d73
FW
1160};
1161
39aeb52f 1162static void output_lat_thread(struct work_atoms *work_list)
cdce9d73 1163{
cdce9d73
FW
1164 int i;
1165 int ret;
66685678 1166 u64 avg;
cdce9d73 1167
39aeb52f 1168 if (!work_list->nb_atoms)
cdce9d73 1169 return;
ea57c4f5
IM
1170 /*
1171 * Ignore idle threads:
1172 */
80ed0987 1173 if (!strcmp(work_list->thread->comm, "swapper"))
ea57c4f5 1174 return;
cdce9d73 1175
39aeb52f 1176 all_runtime += work_list->total_runtime;
1177 all_count += work_list->nb_atoms;
66685678 1178
80ed0987 1179 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
cdce9d73 1180
08f69e6c 1181 for (i = 0; i < 24 - ret; i++)
cdce9d73
FW
1182 printf(" ");
1183
39aeb52f 1184 avg = work_list->total_lat / work_list->nb_atoms;
cdce9d73 1185
dc02bf71 1186 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
39aeb52f 1187 (double)work_list->total_runtime / 1e6,
1188 work_list->nb_atoms, (double)avg / 1e6,
1189 (double)work_list->max_lat / 1e6);
cdce9d73
FW
1190}
1191
39aeb52f 1192static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5 1193{
daa1d7a5
FW
1194 if (l->thread->pid < r->thread->pid)
1195 return -1;
1196 if (l->thread->pid > r->thread->pid)
1197 return 1;
1198
1199 return 0;
1200}
1201
1202static struct sort_dimension pid_sort_dimension = {
b5fae128
IM
1203 .name = "pid",
1204 .cmp = pid_cmp,
daa1d7a5
FW
1205};
1206
39aeb52f 1207static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1208{
1209 u64 avgl, avgr;
1210
1211 if (!l->nb_atoms)
1212 return -1;
1213
1214 if (!r->nb_atoms)
1215 return 1;
1216
1217 avgl = l->total_lat / l->nb_atoms;
1218 avgr = r->total_lat / r->nb_atoms;
1219
1220 if (avgl < avgr)
1221 return -1;
1222 if (avgl > avgr)
1223 return 1;
1224
1225 return 0;
1226}
1227
1228static struct sort_dimension avg_sort_dimension = {
b5fae128
IM
1229 .name = "avg",
1230 .cmp = avg_cmp,
daa1d7a5
FW
1231};
1232
39aeb52f 1233static int max_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1234{
1235 if (l->max_lat < r->max_lat)
1236 return -1;
1237 if (l->max_lat > r->max_lat)
1238 return 1;
1239
1240 return 0;
1241}
1242
1243static struct sort_dimension max_sort_dimension = {
b5fae128
IM
1244 .name = "max",
1245 .cmp = max_cmp,
daa1d7a5
FW
1246};
1247
39aeb52f 1248static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1249{
1250 if (l->nb_atoms < r->nb_atoms)
1251 return -1;
1252 if (l->nb_atoms > r->nb_atoms)
1253 return 1;
1254
1255 return 0;
1256}
1257
1258static struct sort_dimension switch_sort_dimension = {
b5fae128
IM
1259 .name = "switch",
1260 .cmp = switch_cmp,
daa1d7a5
FW
1261};
1262
39aeb52f 1263static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1264{
1265 if (l->total_runtime < r->total_runtime)
1266 return -1;
1267 if (l->total_runtime > r->total_runtime)
1268 return 1;
1269
1270 return 0;
1271}
1272
1273static struct sort_dimension runtime_sort_dimension = {
b5fae128
IM
1274 .name = "runtime",
1275 .cmp = runtime_cmp,
daa1d7a5
FW
1276};
1277
1278static struct sort_dimension *available_sorts[] = {
1279 &pid_sort_dimension,
1280 &avg_sort_dimension,
1281 &max_sort_dimension,
1282 &switch_sort_dimension,
1283 &runtime_sort_dimension,
1284};
1285
1286#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1287
1288static LIST_HEAD(sort_list);
1289
1290static int sort_dimension__add(char *tok, struct list_head *list)
1291{
1292 int i;
1293
1294 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1295 if (!strcmp(available_sorts[i]->name, tok)) {
1296 list_add_tail(&available_sorts[i]->list, list);
1297
1298 return 0;
1299 }
1300 }
1301
1302 return -1;
1303}
1304
1305static void setup_sorting(void);
1306
1307static void sort_lat(void)
1308{
1309 struct rb_node *node;
1310
1311 for (;;) {
39aeb52f 1312 struct work_atoms *data;
b1ffe8f3 1313 node = rb_first(&atom_root);
daa1d7a5
FW
1314 if (!node)
1315 break;
1316
b1ffe8f3 1317 rb_erase(node, &atom_root);
39aeb52f 1318 data = rb_entry(node, struct work_atoms, node);
b1ffe8f3 1319 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
daa1d7a5
FW
1320 }
1321}
1322
419ab0d6
FW
1323static struct trace_sched_handler *trace_handler;
1324
fbf94829 1325static void
419ab0d6
FW
1326process_sched_wakeup_event(struct raw_event_sample *raw,
1327 struct event *event,
1328 int cpu __used,
1329 u64 timestamp __used,
1330 struct thread *thread __used)
1331{
1332 struct trace_wakeup_event wakeup_event;
1333
1334 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1335
1336 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1337 FILL_FIELD(wakeup_event, pid, event, raw->data);
1338 FILL_FIELD(wakeup_event, prio, event, raw->data);
1339 FILL_FIELD(wakeup_event, success, event, raw->data);
1340 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1341
0ec04e16
IM
1342 if (trace_handler->wakeup_event)
1343 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
419ab0d6
FW
1344}
1345
c8a37751
IM
1346/*
1347 * Track the current task - that way we can know whether there's any
1348 * weird events, such as a task being switched away that is not current.
1349 */
40749d0f 1350static int max_cpu;
0ec04e16 1351
c8a37751
IM
1352static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1353
0ec04e16
IM
1354static struct thread *curr_thread[MAX_CPUS];
1355
1356static char next_shortname1 = 'A';
1357static char next_shortname2 = '0';
1358
1359static void
1360map_switch_event(struct trace_switch_event *switch_event,
1361 struct event *event __used,
1362 int this_cpu,
1363 u64 timestamp,
1364 struct thread *thread __used)
1365{
1366 struct thread *sched_out, *sched_in;
1367 int new_shortname;
1368 u64 timestamp0;
1369 s64 delta;
1370 int cpu;
1371
1372 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1373
1374 if (this_cpu > max_cpu)
1375 max_cpu = this_cpu;
1376
1377 timestamp0 = cpu_last_switched[this_cpu];
1378 cpu_last_switched[this_cpu] = timestamp;
1379 if (timestamp0)
1380 delta = timestamp - timestamp0;
1381 else
1382 delta = 0;
1383
1384 if (delta < 0)
1385 die("hm, delta: %Ld < 0 ?\n", delta);
1386
1387
1388 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1389 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1390
1391 curr_thread[this_cpu] = sched_in;
1392
1393 printf(" ");
1394
1395 new_shortname = 0;
1396 if (!sched_in->shortname[0]) {
1397 sched_in->shortname[0] = next_shortname1;
1398 sched_in->shortname[1] = next_shortname2;
1399
1400 if (next_shortname1 < 'Z') {
1401 next_shortname1++;
1402 } else {
1403 next_shortname1='A';
1404 if (next_shortname2 < '9') {
1405 next_shortname2++;
1406 } else {
1407 next_shortname2='0';
1408 }
1409 }
1410 new_shortname = 1;
1411 }
1412
1413 for (cpu = 0; cpu <= max_cpu; cpu++) {
1414 if (cpu != this_cpu)
1415 printf(" ");
1416 else
1417 printf("*");
1418
1419 if (curr_thread[cpu]) {
1420 if (curr_thread[cpu]->pid)
1421 printf("%2s ", curr_thread[cpu]->shortname);
1422 else
1423 printf(". ");
1424 } else
1425 printf(" ");
1426 }
1427
1428 printf(" %12.6f secs ", (double)timestamp/1e9);
1429 if (new_shortname) {
1430 printf("%s => %s:%d\n",
1431 sched_in->shortname, sched_in->comm, sched_in->pid);
1432 } else {
1433 printf("\n");
1434 }
1435}
1436
1437
419ab0d6
FW
1438static void
1439process_sched_switch_event(struct raw_event_sample *raw,
1440 struct event *event,
0ec04e16 1441 int this_cpu,
419ab0d6
FW
1442 u64 timestamp __used,
1443 struct thread *thread __used)
1444{
1445 struct trace_switch_event switch_event;
1446
1447 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1448
1449 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1450 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1451 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1452 FILL_FIELD(switch_event, prev_state, event, raw->data);
1453 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1454 FILL_FIELD(switch_event, next_pid, event, raw->data);
1455 FILL_FIELD(switch_event, next_prio, event, raw->data);
1456
0ec04e16 1457 if (curr_pid[this_cpu] != (u32)-1) {
c8a37751
IM
1458 /*
1459 * Are we trying to switch away a PID that is
1460 * not current?
1461 */
0ec04e16 1462 if (curr_pid[this_cpu] != switch_event.prev_pid)
c8a37751
IM
1463 nr_context_switch_bugs++;
1464 }
0ec04e16
IM
1465 if (trace_handler->switch_event)
1466 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
c8a37751 1467
0ec04e16 1468 curr_pid[this_cpu] = switch_event.next_pid;
419ab0d6
FW
1469}
1470
39aeb52f 1471static void
1472process_sched_runtime_event(struct raw_event_sample *raw,
1473 struct event *event,
1474 int cpu __used,
1475 u64 timestamp __used,
1476 struct thread *thread __used)
1477{
1478 struct trace_runtime_event runtime_event;
1479
1480 FILL_ARRAY(runtime_event, comm, event, raw->data);
1481 FILL_FIELD(runtime_event, pid, event, raw->data);
1482 FILL_FIELD(runtime_event, runtime, event, raw->data);
1483 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1484
0ec04e16
IM
1485 if (trace_handler->runtime_event)
1486 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
39aeb52f 1487}
1488
419ab0d6
FW
1489static void
1490process_sched_fork_event(struct raw_event_sample *raw,
1491 struct event *event,
1492 int cpu __used,
1493 u64 timestamp __used,
1494 struct thread *thread __used)
fbf94829 1495{
46538818
FW
1496 struct trace_fork_event fork_event;
1497
1498 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1499
1500 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1501 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1502 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1503 FILL_FIELD(fork_event, child_pid, event, raw->data);
1504
0ec04e16
IM
1505 if (trace_handler->fork_event)
1506 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
fbf94829
IM
1507}
1508
419ab0d6
FW
1509static void
1510process_sched_exit_event(struct event *event,
1511 int cpu __used,
1512 u64 timestamp __used,
1513 struct thread *thread __used)
fbf94829 1514{
ad236fd2
IM
1515 if (verbose)
1516 printf("sched_exit event %p\n", event);
ec156764
IM
1517}
1518
1519static void
ad236fd2 1520process_raw_event(event_t *raw_event __used, void *more_data,
ec156764
IM
1521 int cpu, u64 timestamp, struct thread *thread)
1522{
46538818 1523 struct raw_event_sample *raw = more_data;
ec156764
IM
1524 struct event *event;
1525 int type;
1526
1527 type = trace_parse_common_type(raw->data);
1528 event = trace_find_event(type);
1529
ec156764 1530 if (!strcmp(event->name, "sched_switch"))
46538818 1531 process_sched_switch_event(raw, event, cpu, timestamp, thread);
39aeb52f 1532 if (!strcmp(event->name, "sched_stat_runtime"))
1533 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
ec156764 1534 if (!strcmp(event->name, "sched_wakeup"))
46538818 1535 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
fbf94829 1536 if (!strcmp(event->name, "sched_wakeup_new"))
46538818 1537 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
fbf94829 1538 if (!strcmp(event->name, "sched_process_fork"))
46538818 1539 process_sched_fork_event(raw, event, cpu, timestamp, thread);
fbf94829
IM
1540 if (!strcmp(event->name, "sched_process_exit"))
1541 process_sched_exit_event(event, cpu, timestamp, thread);
ec156764
IM
1542}
1543
0a02ad93
IM
1544static int
1545process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1546{
0a02ad93
IM
1547 struct thread *thread;
1548 u64 ip = event->ip.ip;
1549 u64 timestamp = -1;
1550 u32 cpu = -1;
1551 u64 period = 1;
1552 void *more_data = event->ip.__more_data;
a80deb62
ACM
1553
1554 if (!(sample_type & PERF_SAMPLE_RAW))
1555 return 0;
0a02ad93
IM
1556
1557 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1558
1559 if (sample_type & PERF_SAMPLE_TIME) {
1560 timestamp = *(u64 *)more_data;
1561 more_data += sizeof(u64);
1562 }
1563
1564 if (sample_type & PERF_SAMPLE_CPU) {
1565 cpu = *(u32 *)more_data;
1566 more_data += sizeof(u32);
1567 more_data += sizeof(u32); /* reserved */
1568 }
1569
1570 if (sample_type & PERF_SAMPLE_PERIOD) {
1571 period = *(u64 *)more_data;
1572 more_data += sizeof(u64);
1573 }
1574
cdd6c482 1575 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
0a02ad93
IM
1576 (void *)(offset + head),
1577 (void *)(long)(event->header.size),
1578 event->header.misc,
1579 event->ip.pid, event->ip.tid,
1580 (void *)(long)ip,
1581 (long long)period);
1582
1583 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1584
1585 if (thread == NULL) {
1586 eprintf("problem processing %d event, skipping it.\n",
1587 event->header.type);
1588 return -1;
1589 }
1590
a80deb62 1591 process_raw_event(event, more_data, cpu, timestamp, thread);
0a02ad93
IM
1592
1593 return 0;
1594}
1595
1596static int
1597process_event(event_t *event, unsigned long offset, unsigned long head)
1598{
1599 trace_event(event);
1600
dc02bf71 1601 nr_events++;
0a02ad93 1602 switch (event->header.type) {
cdd6c482 1603 case PERF_RECORD_MMAP:
dc02bf71 1604 return 0;
cdd6c482 1605 case PERF_RECORD_LOST:
dc02bf71
IM
1606 nr_lost_chunks++;
1607 nr_lost_events += event->lost.lost;
0a02ad93
IM
1608 return 0;
1609
cdd6c482 1610 case PERF_RECORD_COMM:
0a02ad93
IM
1611 return process_comm_event(event, offset, head);
1612
cdd6c482 1613 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
0a02ad93
IM
1614 return 0;
1615
cdd6c482 1616 case PERF_RECORD_SAMPLE:
0a02ad93
IM
1617 return process_sample_event(event, offset, head);
1618
cdd6c482 1619 case PERF_RECORD_MAX:
0a02ad93
IM
1620 default:
1621 return -1;
1622 }
1623
1624 return 0;
1625}
1626
46f392c9 1627static int read_events(void)
0a02ad93
IM
1628{
1629 int ret, rc = EXIT_FAILURE;
1630 unsigned long offset = 0;
1631 unsigned long head = 0;
1632 struct stat perf_stat;
1633 event_t *event;
1634 uint32_t size;
1635 char *buf;
1636
0a02ad93
IM
1637 register_idle_thread(&threads, &last_match);
1638
1639 input = open(input_name, O_RDONLY);
1640 if (input < 0) {
1641 perror("failed to open file");
1642 exit(-1);
1643 }
1644
1645 ret = fstat(input, &perf_stat);
1646 if (ret < 0) {
1647 perror("failed to stat file");
1648 exit(-1);
1649 }
1650
1651 if (!perf_stat.st_size) {
1652 fprintf(stderr, "zero-sized file, nothing to do!\n");
1653 exit(0);
1654 }
1655 header = perf_header__read(input);
1656 head = header->data_offset;
1657 sample_type = perf_header__sample_type(header);
1658
1659 if (!(sample_type & PERF_SAMPLE_RAW))
1660 die("No trace sample to read. Did you call perf record "
1661 "without -R?");
1662
1663 if (load_kernel() < 0) {
1664 perror("failed to load kernel symbols");
1665 return EXIT_FAILURE;
1666 }
1667
1668remap:
1669 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1670 MAP_SHARED, input, offset);
1671 if (buf == MAP_FAILED) {
1672 perror("failed to mmap file");
1673 exit(-1);
1674 }
1675
1676more:
1677 event = (event_t *)(buf + head);
1678
1679 size = event->header.size;
1680 if (!size)
1681 size = 8;
1682
1683 if (head + event->header.size >= page_size * mmap_window) {
1684 unsigned long shift = page_size * (head / page_size);
1685 int res;
1686
1687 res = munmap(buf, page_size * mmap_window);
1688 assert(res == 0);
1689
1690 offset += shift;
1691 head -= shift;
1692 goto remap;
1693 }
1694
1695 size = event->header.size;
1696
1697
1698 if (!size || process_event(event, offset, head) < 0) {
1699
1700 /*
1701 * assume we lost track of the stream, check alignment, and
1702 * increment a single u64 in the hope to catch on again 'soon'.
1703 */
1704
1705 if (unlikely(head & 7))
1706 head &= ~7ULL;
1707
1708 size = 8;
1709 }
1710
1711 head += size;
1712
1713 if (offset + head < (unsigned long)perf_stat.st_size)
1714 goto more;
1715
1716 rc = EXIT_SUCCESS;
1717 close(input);
1718
1719 return rc;
1720}
1721
0ec04e16
IM
1722static void print_bad_events(void)
1723{
1724 if (nr_unordered_timestamps && nr_timestamps) {
1725 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1726 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1727 nr_unordered_timestamps, nr_timestamps);
1728 }
1729 if (nr_lost_events && nr_events) {
1730 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1731 (double)nr_lost_events/(double)nr_events*100.0,
1732 nr_lost_events, nr_events, nr_lost_chunks);
1733 }
1734 if (nr_state_machine_bugs && nr_timestamps) {
1735 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1736 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1737 nr_state_machine_bugs, nr_timestamps);
1738 if (nr_lost_events)
1739 printf(" (due to lost events?)");
1740 printf("\n");
1741 }
1742 if (nr_context_switch_bugs && nr_timestamps) {
1743 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1744 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1745 nr_context_switch_bugs, nr_timestamps);
1746 if (nr_lost_events)
1747 printf(" (due to lost events?)");
1748 printf("\n");
1749 }
1750}
1751
1752static void __cmd_lat(void)
1753{
1754 struct rb_node *next;
1755
1756 setup_pager();
1757 read_events();
1758 sort_lat();
1759
1760 printf("\n -----------------------------------------------------------------------------------------\n");
1761 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1762 printf(" -----------------------------------------------------------------------------------------\n");
1763
1764 next = rb_first(&sorted_atom_root);
1765
1766 while (next) {
1767 struct work_atoms *work_list;
1768
1769 work_list = rb_entry(next, struct work_atoms, node);
1770 output_lat_thread(work_list);
1771 next = rb_next(next);
1772 }
1773
1774 printf(" -----------------------------------------------------------------------------------------\n");
1775 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1776 (double)all_runtime/1e6, all_count);
1777
1778 printf(" ---------------------------------------------------\n");
1779
1780 print_bad_events();
1781 printf("\n");
1782
1783}
1784
1785static struct trace_sched_handler map_ops = {
1786 .wakeup_event = NULL,
1787 .switch_event = map_switch_event,
1788 .runtime_event = NULL,
1789 .fork_event = NULL,
1790};
1791
1792static void __cmd_map(void)
1793{
40749d0f
IM
1794 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1795
0ec04e16
IM
1796 setup_pager();
1797 read_events();
1798 print_bad_events();
1799}
1800
1801static void __cmd_replay(void)
1802{
1803 unsigned long i;
1804
1805 calibrate_run_measurement_overhead();
1806 calibrate_sleep_measurement_overhead();
1807
1808 test_calibrations();
1809
1810 read_events();
1811
1812 printf("nr_run_events: %ld\n", nr_run_events);
1813 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1814 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1815
1816 if (targetless_wakeups)
1817 printf("target-less wakeups: %ld\n", targetless_wakeups);
1818 if (multitarget_wakeups)
1819 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1820 if (nr_run_events_optimized)
1821 printf("run atoms optimized: %ld\n",
1822 nr_run_events_optimized);
1823
1824 print_task_traces();
1825 add_cross_task_wakeups();
1826
1827 create_tasks();
1828 printf("------------------------------------------------------------\n");
1829 for (i = 0; i < replay_repeat; i++)
1830 run_one_test();
1831}
1832
1833
46f392c9 1834static const char * const sched_usage[] = {
4b77a729 1835 "perf sched [<options>] {record|latency|map|replay|trace}",
0a02ad93
IM
1836 NULL
1837};
1838
f2858d8a 1839static const struct option sched_options[] = {
4b77a729
MG
1840 OPT_STRING('i', "input", &input_name, "file",
1841 "input file name"),
f2858d8a
IM
1842 OPT_BOOLEAN('v', "verbose", &verbose,
1843 "be more verbose (show symbol address, etc)"),
0a02ad93
IM
1844 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1845 "dump raw trace in ASCII"),
f2858d8a
IM
1846 OPT_END()
1847};
1848
1849static const char * const latency_usage[] = {
1850 "perf sched latency [<options>]",
1851 NULL
1852};
1853
1854static const struct option latency_options[] = {
daa1d7a5
FW
1855 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1856 "sort by key(s): runtime, switch, avg, max"),
0a02ad93
IM
1857 OPT_BOOLEAN('v', "verbose", &verbose,
1858 "be more verbose (show symbol address, etc)"),
f2858d8a
IM
1859 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1860 "dump raw trace in ASCII"),
1861 OPT_END()
1862};
1863
1864static const char * const replay_usage[] = {
1865 "perf sched replay [<options>]",
1866 NULL
1867};
1868
1869static const struct option replay_options[] = {
1870 OPT_INTEGER('r', "repeat", &replay_repeat,
1871 "repeat the workload replay N times (-1: infinite)"),
1872 OPT_BOOLEAN('v', "verbose", &verbose,
1873 "be more verbose (show symbol address, etc)"),
1874 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1875 "dump raw trace in ASCII"),
0a02ad93
IM
1876 OPT_END()
1877};
1878
daa1d7a5
FW
1879static void setup_sorting(void)
1880{
1881 char *tmp, *tok, *str = strdup(sort_order);
1882
1883 for (tok = strtok_r(str, ", ", &tmp);
1884 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1885 if (sort_dimension__add(tok, &sort_list) < 0) {
1886 error("Unknown --sort key: `%s'", tok);
f2858d8a 1887 usage_with_options(latency_usage, latency_options);
daa1d7a5
FW
1888 }
1889 }
1890
1891 free(str);
1892
1893 sort_dimension__add((char *)"pid", &cmp_pid);
1894}
1895
1fc35b29
IM
1896static const char *record_args[] = {
1897 "record",
1898 "-a",
1899 "-R",
d1302522 1900 "-M",
ea57c4f5 1901 "-f",
dc02bf71 1902 "-m", "1024",
1fc35b29
IM
1903 "-c", "1",
1904 "-e", "sched:sched_switch:r",
1905 "-e", "sched:sched_stat_wait:r",
1906 "-e", "sched:sched_stat_sleep:r",
1907 "-e", "sched:sched_stat_iowait:r",
ea57c4f5 1908 "-e", "sched:sched_stat_runtime:r",
1fc35b29
IM
1909 "-e", "sched:sched_process_exit:r",
1910 "-e", "sched:sched_process_fork:r",
1911 "-e", "sched:sched_wakeup:r",
1912 "-e", "sched:sched_migrate_task:r",
1913};
1914
1915static int __cmd_record(int argc, const char **argv)
1916{
1917 unsigned int rec_argc, i, j;
1918 const char **rec_argv;
1919
1920 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1921 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1922
1923 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1924 rec_argv[i] = strdup(record_args[i]);
1925
1926 for (j = 1; j < (unsigned int)argc; j++, i++)
1927 rec_argv[i] = argv[j];
1928
1929 BUG_ON(i != rec_argc);
1930
1931 return cmd_record(i, rec_argv, NULL);
1932}
1933
0a02ad93
IM
1934int cmd_sched(int argc, const char **argv, const char *prefix __used)
1935{
1936 symbol__init();
1937 page_size = getpagesize();
1938
f2858d8a
IM
1939 argc = parse_options(argc, argv, sched_options, sched_usage,
1940 PARSE_OPT_STOP_AT_NON_OPTION);
1941 if (!argc)
1942 usage_with_options(sched_usage, sched_options);
0a02ad93 1943
1fc35b29
IM
1944 if (!strncmp(argv[0], "rec", 3)) {
1945 return __cmd_record(argc, argv);
1946 } else if (!strncmp(argv[0], "lat", 3)) {
cdce9d73 1947 trace_handler = &lat_ops;
f2858d8a
IM
1948 if (argc > 1) {
1949 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1950 if (argc)
1951 usage_with_options(latency_usage, latency_options);
f2858d8a 1952 }
b5fae128 1953 setup_sorting();
46f392c9 1954 __cmd_lat();
0ec04e16
IM
1955 } else if (!strcmp(argv[0], "map")) {
1956 trace_handler = &map_ops;
1957 setup_sorting();
1958 __cmd_map();
f2858d8a
IM
1959 } else if (!strncmp(argv[0], "rep", 3)) {
1960 trace_handler = &replay_ops;
1961 if (argc) {
1962 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1963 if (argc)
1964 usage_with_options(replay_usage, replay_options);
1965 }
1966 __cmd_replay();
c13f0d3c
IM
1967 } else if (!strcmp(argv[0], "trace")) {
1968 /*
1969 * Aliased to 'perf trace' for now:
1970 */
1971 return cmd_trace(argc, argv, prefix);
f2858d8a
IM
1972 } else {
1973 usage_with_options(sched_usage, sched_options);
1974 }
1975
ec156764 1976 return 0;
0a02ad93 1977}