]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - tools/perf/builtin-sched.c
perf_event: Fix __dsos__write_buildid_table()
[mirror_ubuntu-bionic-kernel.git] / tools / perf / builtin-sched.c
CommitLineData
0a02ad93 1#include "builtin.h"
b1ffe8f3 2#include "perf.h"
0a02ad93
IM
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
b1ffe8f3 11#include "util/trace-event.h"
0a02ad93 12
0a02ad93 13#include "util/debug.h"
016e92fb 14#include "util/data_map.h"
0a02ad93 15
ec156764 16#include <sys/types.h>
b1ffe8f3 17#include <sys/prctl.h>
0a02ad93 18
b1ffe8f3
IM
19#include <semaphore.h>
20#include <pthread.h>
21#include <math.h>
419ab0d6 22
ec156764 23static char const *input_name = "perf.data";
0a02ad93 24
ec156764
IM
25static struct perf_header *header;
26static u64 sample_type;
0a02ad93 27
daa1d7a5
FW
28static char default_sort_order[] = "avg, max, switch, runtime";
29static char *sort_order = default_sort_order;
30
55ffb7a6
MG
31static int profile_cpu = -1;
32
b1ffe8f3
IM
33#define PR_SET_NAME 15 /* Set process name */
34#define MAX_CPUS 4096
0a02ad93 35
b1ffe8f3
IM
36static u64 run_measurement_overhead;
37static u64 sleep_measurement_overhead;
ec156764 38
b1ffe8f3
IM
39#define COMM_LEN 20
40#define SYM_LEN 129
ec156764 41
b1ffe8f3 42#define MAX_PID 65536
ec156764 43
b1ffe8f3 44static unsigned long nr_tasks;
ec156764 45
39aeb52f 46struct sched_atom;
ec156764 47
b1ffe8f3
IM
48struct task_desc {
49 unsigned long nr;
50 unsigned long pid;
51 char comm[COMM_LEN];
ec156764 52
b1ffe8f3
IM
53 unsigned long nr_events;
54 unsigned long curr_event;
39aeb52f 55 struct sched_atom **atoms;
b1ffe8f3
IM
56
57 pthread_t thread;
58 sem_t sleep_sem;
ec156764 59
b1ffe8f3
IM
60 sem_t ready_for_work;
61 sem_t work_done_sem;
62
63 u64 cpu_usage;
64};
65
66enum sched_event_type {
67 SCHED_EVENT_RUN,
68 SCHED_EVENT_SLEEP,
69 SCHED_EVENT_WAKEUP,
55ffb7a6 70 SCHED_EVENT_MIGRATION,
b1ffe8f3
IM
71};
72
39aeb52f 73struct sched_atom {
b1ffe8f3
IM
74 enum sched_event_type type;
75 u64 timestamp;
76 u64 duration;
77 unsigned long nr;
78 int specific_wait;
79 sem_t *wait_sem;
80 struct task_desc *wakee;
81};
82
83static struct task_desc *pid_to_task[MAX_PID];
84
85static struct task_desc **tasks;
86
87static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
88static u64 start_time;
89
90static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
ec156764 91
b1ffe8f3
IM
92static unsigned long nr_run_events;
93static unsigned long nr_sleep_events;
94static unsigned long nr_wakeup_events;
95
96static unsigned long nr_sleep_corrections;
97static unsigned long nr_run_events_optimized;
98
99static unsigned long targetless_wakeups;
100static unsigned long multitarget_wakeups;
101
102static u64 cpu_usage;
103static u64 runavg_cpu_usage;
104static u64 parent_cpu_usage;
105static u64 runavg_parent_cpu_usage;
106
107static unsigned long nr_runs;
108static u64 sum_runtime;
109static u64 sum_fluct;
110static u64 run_avg;
111
112static unsigned long replay_repeat = 10;
ea57c4f5 113static unsigned long nr_timestamps;
dc02bf71
IM
114static unsigned long nr_unordered_timestamps;
115static unsigned long nr_state_machine_bugs;
c8a37751 116static unsigned long nr_context_switch_bugs;
dc02bf71
IM
117static unsigned long nr_events;
118static unsigned long nr_lost_chunks;
119static unsigned long nr_lost_events;
b1ffe8f3
IM
120
121#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
122
123enum thread_state {
124 THREAD_SLEEPING = 0,
125 THREAD_WAIT_CPU,
126 THREAD_SCHED_IN,
127 THREAD_IGNORE
128};
129
130struct work_atom {
131 struct list_head list;
132 enum thread_state state;
aa1ab9d2 133 u64 sched_out_time;
b1ffe8f3
IM
134 u64 wake_up_time;
135 u64 sched_in_time;
136 u64 runtime;
137};
138
39aeb52f 139struct work_atoms {
140 struct list_head work_list;
b1ffe8f3
IM
141 struct thread *thread;
142 struct rb_node node;
143 u64 max_lat;
144 u64 total_lat;
145 u64 nb_atoms;
146 u64 total_runtime;
147};
148
39aeb52f 149typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
b1ffe8f3
IM
150
151static struct rb_root atom_root, sorted_atom_root;
152
153static u64 all_runtime;
154static u64 all_count;
155
b1ffe8f3
IM
156
157static u64 get_nsecs(void)
ec156764
IM
158{
159 struct timespec ts;
160
161 clock_gettime(CLOCK_MONOTONIC, &ts);
162
163 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
164}
165
b1ffe8f3 166static void burn_nsecs(u64 nsecs)
ec156764 167{
b1ffe8f3 168 u64 T0 = get_nsecs(), T1;
ec156764
IM
169
170 do {
171 T1 = get_nsecs();
172 } while (T1 + run_measurement_overhead < T0 + nsecs);
173}
174
b1ffe8f3 175static void sleep_nsecs(u64 nsecs)
ec156764
IM
176{
177 struct timespec ts;
178
179 ts.tv_nsec = nsecs % 999999999;
180 ts.tv_sec = nsecs / 999999999;
181
182 nanosleep(&ts, NULL);
183}
184
185static void calibrate_run_measurement_overhead(void)
186{
b1ffe8f3 187 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
188 int i;
189
190 for (i = 0; i < 10; i++) {
191 T0 = get_nsecs();
192 burn_nsecs(0);
193 T1 = get_nsecs();
194 delta = T1-T0;
195 min_delta = min(min_delta, delta);
196 }
197 run_measurement_overhead = min_delta;
198
ad236fd2 199 printf("run measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
200}
201
202static void calibrate_sleep_measurement_overhead(void)
203{
b1ffe8f3 204 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
205 int i;
206
207 for (i = 0; i < 10; i++) {
208 T0 = get_nsecs();
209 sleep_nsecs(10000);
210 T1 = get_nsecs();
211 delta = T1-T0;
212 min_delta = min(min_delta, delta);
213 }
214 min_delta -= 10000;
215 sleep_measurement_overhead = min_delta;
216
ad236fd2 217 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
218}
219
39aeb52f 220static struct sched_atom *
b1ffe8f3 221get_new_event(struct task_desc *task, u64 timestamp)
ec156764 222{
36479484 223 struct sched_atom *event = zalloc(sizeof(*event));
ec156764
IM
224 unsigned long idx = task->nr_events;
225 size_t size;
226
227 event->timestamp = timestamp;
228 event->nr = idx;
229
230 task->nr_events++;
39aeb52f 231 size = sizeof(struct sched_atom *) * task->nr_events;
232 task->atoms = realloc(task->atoms, size);
233 BUG_ON(!task->atoms);
ec156764 234
39aeb52f 235 task->atoms[idx] = event;
ec156764
IM
236
237 return event;
238}
239
39aeb52f 240static struct sched_atom *last_event(struct task_desc *task)
ec156764
IM
241{
242 if (!task->nr_events)
243 return NULL;
244
39aeb52f 245 return task->atoms[task->nr_events - 1];
ec156764
IM
246}
247
248static void
b1ffe8f3 249add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
ec156764 250{
39aeb52f 251 struct sched_atom *event, *curr_event = last_event(task);
ec156764
IM
252
253 /*
fbf94829
IM
254 * optimize an existing RUN event by merging this one
255 * to it:
256 */
ec156764
IM
257 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
258 nr_run_events_optimized++;
259 curr_event->duration += duration;
260 return;
261 }
262
263 event = get_new_event(task, timestamp);
264
265 event->type = SCHED_EVENT_RUN;
266 event->duration = duration;
267
268 nr_run_events++;
269}
270
ec156764 271static void
b1ffe8f3 272add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
ec156764
IM
273 struct task_desc *wakee)
274{
39aeb52f 275 struct sched_atom *event, *wakee_event;
ec156764
IM
276
277 event = get_new_event(task, timestamp);
278 event->type = SCHED_EVENT_WAKEUP;
279 event->wakee = wakee;
280
281 wakee_event = last_event(wakee);
282 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
283 targetless_wakeups++;
284 return;
285 }
286 if (wakee_event->wait_sem) {
287 multitarget_wakeups++;
288 return;
289 }
290
36479484 291 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
ec156764
IM
292 sem_init(wakee_event->wait_sem, 0, 0);
293 wakee_event->specific_wait = 1;
294 event->wait_sem = wakee_event->wait_sem;
295
296 nr_wakeup_events++;
297}
298
299static void
b1ffe8f3 300add_sched_event_sleep(struct task_desc *task, u64 timestamp,
ad236fd2 301 u64 task_state __used)
ec156764 302{
39aeb52f 303 struct sched_atom *event = get_new_event(task, timestamp);
ec156764
IM
304
305 event->type = SCHED_EVENT_SLEEP;
306
307 nr_sleep_events++;
308}
309
310static struct task_desc *register_pid(unsigned long pid, const char *comm)
311{
312 struct task_desc *task;
313
314 BUG_ON(pid >= MAX_PID);
315
316 task = pid_to_task[pid];
317
318 if (task)
319 return task;
320
36479484 321 task = zalloc(sizeof(*task));
ec156764
IM
322 task->pid = pid;
323 task->nr = nr_tasks;
324 strcpy(task->comm, comm);
325 /*
326 * every task starts in sleeping state - this gets ignored
327 * if there's no wakeup pointing to this sleep state:
328 */
329 add_sched_event_sleep(task, 0, 0);
330
331 pid_to_task[pid] = task;
332 nr_tasks++;
333 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
334 BUG_ON(!tasks);
335 tasks[task->nr] = task;
336
ad236fd2
IM
337 if (verbose)
338 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
ec156764
IM
339
340 return task;
341}
342
343
ec156764
IM
344static void print_task_traces(void)
345{
346 struct task_desc *task;
347 unsigned long i;
348
349 for (i = 0; i < nr_tasks; i++) {
350 task = tasks[i];
ad236fd2 351 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
ec156764
IM
352 task->nr, task->comm, task->pid, task->nr_events);
353 }
354}
355
356static void add_cross_task_wakeups(void)
357{
358 struct task_desc *task1, *task2;
359 unsigned long i, j;
360
361 for (i = 0; i < nr_tasks; i++) {
362 task1 = tasks[i];
363 j = i + 1;
364 if (j == nr_tasks)
365 j = 0;
366 task2 = tasks[j];
367 add_sched_event_wakeup(task1, 0, task2);
368 }
369}
370
371static void
39aeb52f 372process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
ec156764
IM
373{
374 int ret = 0;
b1ffe8f3 375 u64 now;
ec156764
IM
376 long long delta;
377
378 now = get_nsecs();
39aeb52f 379 delta = start_time + atom->timestamp - now;
ec156764 380
39aeb52f 381 switch (atom->type) {
ec156764 382 case SCHED_EVENT_RUN:
39aeb52f 383 burn_nsecs(atom->duration);
ec156764
IM
384 break;
385 case SCHED_EVENT_SLEEP:
39aeb52f 386 if (atom->wait_sem)
387 ret = sem_wait(atom->wait_sem);
ec156764
IM
388 BUG_ON(ret);
389 break;
390 case SCHED_EVENT_WAKEUP:
39aeb52f 391 if (atom->wait_sem)
392 ret = sem_post(atom->wait_sem);
ec156764
IM
393 BUG_ON(ret);
394 break;
55ffb7a6
MG
395 case SCHED_EVENT_MIGRATION:
396 break;
ec156764
IM
397 default:
398 BUG_ON(1);
399 }
400}
401
b1ffe8f3 402static u64 get_cpu_usage_nsec_parent(void)
ec156764
IM
403{
404 struct rusage ru;
b1ffe8f3 405 u64 sum;
ec156764
IM
406 int err;
407
408 err = getrusage(RUSAGE_SELF, &ru);
409 BUG_ON(err);
410
411 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
412 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
413
414 return sum;
415}
416
b1ffe8f3 417static u64 get_cpu_usage_nsec_self(void)
ec156764
IM
418{
419 char filename [] = "/proc/1234567890/sched";
420 unsigned long msecs, nsecs;
421 char *line = NULL;
b1ffe8f3 422 u64 total = 0;
ec156764
IM
423 size_t len = 0;
424 ssize_t chars;
425 FILE *file;
426 int ret;
427
428 sprintf(filename, "/proc/%d/sched", getpid());
429 file = fopen(filename, "r");
430 BUG_ON(!file);
431
432 while ((chars = getline(&line, &len, file)) != -1) {
ec156764
IM
433 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
434 &msecs, &nsecs);
435 if (ret == 2) {
436 total = msecs*1e6 + nsecs;
ec156764
IM
437 break;
438 }
439 }
440 if (line)
441 free(line);
442 fclose(file);
443
444 return total;
445}
446
447static void *thread_func(void *ctx)
448{
449 struct task_desc *this_task = ctx;
b1ffe8f3 450 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
451 unsigned long i, ret;
452 char comm2[22];
453
ec156764
IM
454 sprintf(comm2, ":%s", this_task->comm);
455 prctl(PR_SET_NAME, comm2);
456
457again:
458 ret = sem_post(&this_task->ready_for_work);
459 BUG_ON(ret);
ec156764
IM
460 ret = pthread_mutex_lock(&start_work_mutex);
461 BUG_ON(ret);
462 ret = pthread_mutex_unlock(&start_work_mutex);
463 BUG_ON(ret);
ec156764
IM
464
465 cpu_usage_0 = get_cpu_usage_nsec_self();
466
467 for (i = 0; i < this_task->nr_events; i++) {
468 this_task->curr_event = i;
39aeb52f 469 process_sched_event(this_task, this_task->atoms[i]);
ec156764
IM
470 }
471
472 cpu_usage_1 = get_cpu_usage_nsec_self();
473 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
474
ec156764
IM
475 ret = sem_post(&this_task->work_done_sem);
476 BUG_ON(ret);
ec156764
IM
477
478 ret = pthread_mutex_lock(&work_done_wait_mutex);
479 BUG_ON(ret);
480 ret = pthread_mutex_unlock(&work_done_wait_mutex);
481 BUG_ON(ret);
ec156764
IM
482
483 goto again;
484}
485
486static void create_tasks(void)
487{
488 struct task_desc *task;
489 pthread_attr_t attr;
490 unsigned long i;
491 int err;
492
493 err = pthread_attr_init(&attr);
494 BUG_ON(err);
495 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
496 BUG_ON(err);
497 err = pthread_mutex_lock(&start_work_mutex);
498 BUG_ON(err);
499 err = pthread_mutex_lock(&work_done_wait_mutex);
500 BUG_ON(err);
501 for (i = 0; i < nr_tasks; i++) {
502 task = tasks[i];
503 sem_init(&task->sleep_sem, 0, 0);
504 sem_init(&task->ready_for_work, 0, 0);
505 sem_init(&task->work_done_sem, 0, 0);
506 task->curr_event = 0;
507 err = pthread_create(&task->thread, &attr, thread_func, task);
508 BUG_ON(err);
509 }
510}
511
ec156764
IM
512static void wait_for_tasks(void)
513{
b1ffe8f3 514 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
515 struct task_desc *task;
516 unsigned long i, ret;
517
ec156764 518 start_time = get_nsecs();
ec156764
IM
519 cpu_usage = 0;
520 pthread_mutex_unlock(&work_done_wait_mutex);
521
522 for (i = 0; i < nr_tasks; i++) {
523 task = tasks[i];
524 ret = sem_wait(&task->ready_for_work);
525 BUG_ON(ret);
526 sem_init(&task->ready_for_work, 0, 0);
527 }
528 ret = pthread_mutex_lock(&work_done_wait_mutex);
529 BUG_ON(ret);
530
531 cpu_usage_0 = get_cpu_usage_nsec_parent();
532
533 pthread_mutex_unlock(&start_work_mutex);
534
ec156764
IM
535 for (i = 0; i < nr_tasks; i++) {
536 task = tasks[i];
537 ret = sem_wait(&task->work_done_sem);
538 BUG_ON(ret);
539 sem_init(&task->work_done_sem, 0, 0);
540 cpu_usage += task->cpu_usage;
541 task->cpu_usage = 0;
542 }
543
544 cpu_usage_1 = get_cpu_usage_nsec_parent();
545 if (!runavg_cpu_usage)
546 runavg_cpu_usage = cpu_usage;
547 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
548
549 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
550 if (!runavg_parent_cpu_usage)
551 runavg_parent_cpu_usage = parent_cpu_usage;
552 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
553 parent_cpu_usage)/10;
554
555 ret = pthread_mutex_lock(&start_work_mutex);
556 BUG_ON(ret);
557
558 for (i = 0; i < nr_tasks; i++) {
559 task = tasks[i];
560 sem_init(&task->sleep_sem, 0, 0);
561 task->curr_event = 0;
562 }
563}
564
ec156764
IM
565static void run_one_test(void)
566{
b1ffe8f3 567 u64 T0, T1, delta, avg_delta, fluct, std_dev;
ec156764
IM
568
569 T0 = get_nsecs();
570 wait_for_tasks();
571 T1 = get_nsecs();
572
573 delta = T1 - T0;
574 sum_runtime += delta;
575 nr_runs++;
576
577 avg_delta = sum_runtime / nr_runs;
578 if (delta < avg_delta)
579 fluct = avg_delta - delta;
580 else
581 fluct = delta - avg_delta;
582 sum_fluct += fluct;
583 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
584 if (!run_avg)
585 run_avg = delta;
586 run_avg = (run_avg*9 + delta)/10;
587
ad236fd2 588 printf("#%-3ld: %0.3f, ",
ec156764
IM
589 nr_runs, (double)delta/1000000.0);
590
ad236fd2 591 printf("ravg: %0.2f, ",
ec156764
IM
592 (double)run_avg/1e6);
593
ad236fd2 594 printf("cpu: %0.2f / %0.2f",
ec156764
IM
595 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
596
597#if 0
598 /*
fbf94829
IM
599 * rusage statistics done by the parent, these are less
600 * accurate than the sum_exec_runtime based statistics:
601 */
ad236fd2 602 printf(" [%0.2f / %0.2f]",
ec156764
IM
603 (double)parent_cpu_usage/1e6,
604 (double)runavg_parent_cpu_usage/1e6);
605#endif
606
ad236fd2 607 printf("\n");
ec156764
IM
608
609 if (nr_sleep_corrections)
ad236fd2 610 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
ec156764
IM
611 nr_sleep_corrections = 0;
612}
613
614static void test_calibrations(void)
615{
b1ffe8f3 616 u64 T0, T1;
ec156764
IM
617
618 T0 = get_nsecs();
619 burn_nsecs(1e6);
620 T1 = get_nsecs();
621
ad236fd2 622 printf("the run test took %Ld nsecs\n", T1-T0);
ec156764
IM
623
624 T0 = get_nsecs();
625 sleep_nsecs(1e6);
626 T1 = get_nsecs();
627
ad236fd2 628 printf("the sleep test took %Ld nsecs\n", T1-T0);
ec156764
IM
629}
630
46538818
FW
631struct raw_event_sample {
632 u32 size;
633 char data[0];
634};
635
636#define FILL_FIELD(ptr, field, event, data) \
637 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
638
639#define FILL_ARRAY(ptr, array, event, data) \
640do { \
641 void *__array = raw_field_ptr(event, #array, data); \
642 memcpy(ptr.array, __array, sizeof(ptr.array)); \
643} while(0)
644
645#define FILL_COMMON_FIELDS(ptr, event, data) \
646do { \
647 FILL_FIELD(ptr, common_type, event, data); \
648 FILL_FIELD(ptr, common_flags, event, data); \
649 FILL_FIELD(ptr, common_preempt_count, event, data); \
650 FILL_FIELD(ptr, common_pid, event, data); \
651 FILL_FIELD(ptr, common_tgid, event, data); \
652} while (0)
653
419ab0d6
FW
654
655
656struct trace_switch_event {
657 u32 size;
658
659 u16 common_type;
660 u8 common_flags;
661 u8 common_preempt_count;
662 u32 common_pid;
663 u32 common_tgid;
664
665 char prev_comm[16];
666 u32 prev_pid;
667 u32 prev_prio;
668 u64 prev_state;
669 char next_comm[16];
670 u32 next_pid;
671 u32 next_prio;
672};
673
39aeb52f 674struct trace_runtime_event {
675 u32 size;
676
677 u16 common_type;
678 u8 common_flags;
679 u8 common_preempt_count;
680 u32 common_pid;
681 u32 common_tgid;
682
683 char comm[16];
684 u32 pid;
685 u64 runtime;
686 u64 vruntime;
687};
419ab0d6 688
fbf94829
IM
689struct trace_wakeup_event {
690 u32 size;
691
692 u16 common_type;
693 u8 common_flags;
694 u8 common_preempt_count;
695 u32 common_pid;
696 u32 common_tgid;
697
698 char comm[16];
699 u32 pid;
700
701 u32 prio;
702 u32 success;
703 u32 cpu;
704};
705
419ab0d6
FW
706struct trace_fork_event {
707 u32 size;
46538818 708
419ab0d6
FW
709 u16 common_type;
710 u8 common_flags;
711 u8 common_preempt_count;
712 u32 common_pid;
713 u32 common_tgid;
714
715 char parent_comm[16];
716 u32 parent_pid;
717 char child_comm[16];
718 u32 child_pid;
719};
720
55ffb7a6
MG
721struct trace_migrate_task_event {
722 u32 size;
723
724 u16 common_type;
725 u8 common_flags;
726 u8 common_preempt_count;
727 u32 common_pid;
728 u32 common_tgid;
729
730 char comm[16];
731 u32 pid;
732
733 u32 prio;
734 u32 cpu;
735};
736
419ab0d6
FW
737struct trace_sched_handler {
738 void (*switch_event)(struct trace_switch_event *,
739 struct event *,
740 int cpu,
741 u64 timestamp,
742 struct thread *thread);
743
39aeb52f 744 void (*runtime_event)(struct trace_runtime_event *,
745 struct event *,
746 int cpu,
747 u64 timestamp,
748 struct thread *thread);
749
419ab0d6
FW
750 void (*wakeup_event)(struct trace_wakeup_event *,
751 struct event *,
752 int cpu,
753 u64 timestamp,
754 struct thread *thread);
755
756 void (*fork_event)(struct trace_fork_event *,
757 struct event *,
758 int cpu,
759 u64 timestamp,
760 struct thread *thread);
55ffb7a6
MG
761
762 void (*migrate_task_event)(struct trace_migrate_task_event *,
763 struct event *,
764 int cpu,
765 u64 timestamp,
766 struct thread *thread);
419ab0d6 767};
46538818 768
46538818 769
419ab0d6
FW
770static void
771replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
772 struct event *event,
773 int cpu __used,
774 u64 timestamp __used,
775 struct thread *thread __used)
776{
777 struct task_desc *waker, *wakee;
fbf94829 778
ad236fd2
IM
779 if (verbose) {
780 printf("sched_wakeup event %p\n", event);
fbf94829 781
ad236fd2 782 printf(" ... pid %d woke up %s/%d\n",
419ab0d6
FW
783 wakeup_event->common_pid,
784 wakeup_event->comm,
785 wakeup_event->pid);
ad236fd2 786 }
fbf94829 787
419ab0d6
FW
788 waker = register_pid(wakeup_event->common_pid, "<unknown>");
789 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
fbf94829
IM
790
791 add_sched_event_wakeup(waker, timestamp, wakee);
ec156764
IM
792}
793
d1153389 794static u64 cpu_last_switched[MAX_CPUS];
fbf94829
IM
795
796static void
419ab0d6
FW
797replay_switch_event(struct trace_switch_event *switch_event,
798 struct event *event,
799 int cpu,
800 u64 timestamp,
801 struct thread *thread __used)
ec156764 802{
fbf94829
IM
803 struct task_desc *prev, *next;
804 u64 timestamp0;
805 s64 delta;
806
ad236fd2
IM
807 if (verbose)
808 printf("sched_switch event %p\n", event);
809
fbf94829
IM
810 if (cpu >= MAX_CPUS || cpu < 0)
811 return;
812
813 timestamp0 = cpu_last_switched[cpu];
814 if (timestamp0)
815 delta = timestamp - timestamp0;
816 else
817 delta = 0;
818
819 if (delta < 0)
820 die("hm, delta: %Ld < 0 ?\n", delta);
821
ad236fd2
IM
822 if (verbose) {
823 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
419ab0d6
FW
824 switch_event->prev_comm, switch_event->prev_pid,
825 switch_event->next_comm, switch_event->next_pid,
ad236fd2
IM
826 delta);
827 }
fbf94829 828
419ab0d6
FW
829 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
830 next = register_pid(switch_event->next_pid, switch_event->next_comm);
fbf94829
IM
831
832 cpu_last_switched[cpu] = timestamp;
833
834 add_sched_event_run(prev, timestamp, delta);
419ab0d6 835 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
fbf94829
IM
836}
837
fbf94829 838
419ab0d6
FW
839static void
840replay_fork_event(struct trace_fork_event *fork_event,
841 struct event *event,
842 int cpu __used,
843 u64 timestamp __used,
844 struct thread *thread __used)
845{
846 if (verbose) {
847 printf("sched_fork event %p\n", event);
848 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
849 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
850 }
851 register_pid(fork_event->parent_pid, fork_event->parent_comm);
852 register_pid(fork_event->child_pid, fork_event->child_comm);
853}
fbf94829 854
419ab0d6 855static struct trace_sched_handler replay_ops = {
ea92ed5a
IM
856 .wakeup_event = replay_wakeup_event,
857 .switch_event = replay_switch_event,
858 .fork_event = replay_fork_event,
fbf94829
IM
859};
860
b1ffe8f3
IM
861struct sort_dimension {
862 const char *name;
b5fae128 863 sort_fn_t cmp;
b1ffe8f3
IM
864 struct list_head list;
865};
866
867static LIST_HEAD(cmp_pid);
868
daa1d7a5 869static int
39aeb52f 870thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
871{
872 struct sort_dimension *sort;
873 int ret = 0;
874
b5fae128
IM
875 BUG_ON(list_empty(list));
876
daa1d7a5
FW
877 list_for_each_entry(sort, list, list) {
878 ret = sort->cmp(l, r);
879 if (ret)
880 return ret;
881 }
882
883 return ret;
884}
885
39aeb52f 886static struct work_atoms *
b5fae128
IM
887thread_atoms_search(struct rb_root *root, struct thread *thread,
888 struct list_head *sort_list)
889{
890 struct rb_node *node = root->rb_node;
39aeb52f 891 struct work_atoms key = { .thread = thread };
b5fae128
IM
892
893 while (node) {
39aeb52f 894 struct work_atoms *atoms;
b5fae128
IM
895 int cmp;
896
39aeb52f 897 atoms = container_of(node, struct work_atoms, node);
b5fae128
IM
898
899 cmp = thread_lat_cmp(sort_list, &key, atoms);
900 if (cmp > 0)
901 node = node->rb_left;
902 else if (cmp < 0)
903 node = node->rb_right;
904 else {
905 BUG_ON(thread != atoms->thread);
906 return atoms;
907 }
908 }
909 return NULL;
910}
911
cdce9d73 912static void
39aeb52f 913__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
daa1d7a5 914 struct list_head *sort_list)
cdce9d73
FW
915{
916 struct rb_node **new = &(root->rb_node), *parent = NULL;
917
918 while (*new) {
39aeb52f 919 struct work_atoms *this;
daa1d7a5 920 int cmp;
cdce9d73 921
39aeb52f 922 this = container_of(*new, struct work_atoms, node);
cdce9d73 923 parent = *new;
daa1d7a5
FW
924
925 cmp = thread_lat_cmp(sort_list, data, this);
926
927 if (cmp > 0)
cdce9d73 928 new = &((*new)->rb_left);
cdce9d73 929 else
daa1d7a5 930 new = &((*new)->rb_right);
cdce9d73
FW
931 }
932
933 rb_link_node(&data->node, parent, new);
934 rb_insert_color(&data->node, root);
935}
936
b1ffe8f3 937static void thread_atoms_insert(struct thread *thread)
cdce9d73 938{
36479484 939 struct work_atoms *atoms = zalloc(sizeof(*atoms));
17562205 940 if (!atoms)
cdce9d73
FW
941 die("No memory");
942
17562205 943 atoms->thread = thread;
39aeb52f 944 INIT_LIST_HEAD(&atoms->work_list);
b1ffe8f3 945 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
cdce9d73
FW
946}
947
948static void
949latency_fork_event(struct trace_fork_event *fork_event __used,
950 struct event *event __used,
951 int cpu __used,
952 u64 timestamp __used,
953 struct thread *thread __used)
954{
955 /* should insert the newcomer */
956}
957
ea92ed5a 958__used
cdce9d73
FW
959static char sched_out_state(struct trace_switch_event *switch_event)
960{
961 const char *str = TASK_STATE_TO_CHAR_STR;
962
963 return str[switch_event->prev_state];
964}
965
966static void
39aeb52f 967add_sched_out_event(struct work_atoms *atoms,
968 char run_state,
969 u64 timestamp)
cdce9d73 970{
36479484 971 struct work_atom *atom = zalloc(sizeof(*atom));
b1ffe8f3 972 if (!atom)
cdce9d73
FW
973 die("Non memory");
974
aa1ab9d2
FW
975 atom->sched_out_time = timestamp;
976
39aeb52f 977 if (run_state == 'R') {
b1ffe8f3 978 atom->state = THREAD_WAIT_CPU;
aa1ab9d2 979 atom->wake_up_time = atom->sched_out_time;
c6ced611
FW
980 }
981
39aeb52f 982 list_add_tail(&atom->list, &atoms->work_list);
cdce9d73
FW
983}
984
985static void
39aeb52f 986add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
987{
988 struct work_atom *atom;
989
990 BUG_ON(list_empty(&atoms->work_list));
991
992 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
993
994 atom->runtime += delta;
995 atoms->total_runtime += delta;
996}
997
998static void
999add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
cdce9d73 1000{
b1ffe8f3 1001 struct work_atom *atom;
66685678 1002 u64 delta;
cdce9d73 1003
39aeb52f 1004 if (list_empty(&atoms->work_list))
cdce9d73
FW
1005 return;
1006
39aeb52f 1007 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1008
b1ffe8f3 1009 if (atom->state != THREAD_WAIT_CPU)
cdce9d73
FW
1010 return;
1011
b1ffe8f3
IM
1012 if (timestamp < atom->wake_up_time) {
1013 atom->state = THREAD_IGNORE;
cdce9d73
FW
1014 return;
1015 }
1016
b1ffe8f3
IM
1017 atom->state = THREAD_SCHED_IN;
1018 atom->sched_in_time = timestamp;
66685678 1019
b1ffe8f3 1020 delta = atom->sched_in_time - atom->wake_up_time;
66685678
FW
1021 atoms->total_lat += delta;
1022 if (delta > atoms->max_lat)
1023 atoms->max_lat = delta;
1024 atoms->nb_atoms++;
cdce9d73
FW
1025}
1026
cdce9d73
FW
1027static void
1028latency_switch_event(struct trace_switch_event *switch_event,
1029 struct event *event __used,
ea92ed5a 1030 int cpu,
cdce9d73
FW
1031 u64 timestamp,
1032 struct thread *thread __used)
1033{
39aeb52f 1034 struct work_atoms *out_events, *in_events;
cdce9d73 1035 struct thread *sched_out, *sched_in;
ea92ed5a
IM
1036 u64 timestamp0;
1037 s64 delta;
1038
39aeb52f 1039 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
ea92ed5a
IM
1040
1041 timestamp0 = cpu_last_switched[cpu];
1042 cpu_last_switched[cpu] = timestamp;
1043 if (timestamp0)
1044 delta = timestamp - timestamp0;
1045 else
1046 delta = 0;
1047
1048 if (delta < 0)
1049 die("hm, delta: %Ld < 0 ?\n", delta);
1050
cdce9d73 1051
d5b889f2
ACM
1052 sched_out = threads__findnew(switch_event->prev_pid);
1053 sched_in = threads__findnew(switch_event->next_pid);
cdce9d73 1054
39aeb52f 1055 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1056 if (!out_events) {
1057 thread_atoms_insert(sched_out);
1058 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1059 if (!out_events)
1060 die("out-event: Internal tree error");
1061 }
1062 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1063
1064 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1065 if (!in_events) {
b1ffe8f3 1066 thread_atoms_insert(sched_in);
39aeb52f 1067 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1068 if (!in_events)
1069 die("in-event: Internal tree error");
1070 /*
1071 * Take came in we have not heard about yet,
1072 * add in an initial atom in runnable state:
1073 */
1074 add_sched_out_event(in_events, 'R', timestamp);
cdce9d73 1075 }
39aeb52f 1076 add_sched_in_event(in_events, timestamp);
1077}
cdce9d73 1078
39aeb52f 1079static void
1080latency_runtime_event(struct trace_runtime_event *runtime_event,
1081 struct event *event __used,
1082 int cpu,
1083 u64 timestamp,
1084 struct thread *this_thread __used)
1085{
d5b889f2
ACM
1086 struct thread *thread = threads__findnew(runtime_event->pid);
1087 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
39aeb52f 1088
1089 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
39aeb52f 1090 if (!atoms) {
1091 thread_atoms_insert(thread);
1092 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1093 if (!atoms)
1094 die("in-event: Internal tree error");
1095 add_sched_out_event(atoms, 'R', timestamp);
cdce9d73
FW
1096 }
1097
39aeb52f 1098 add_runtime_event(atoms, runtime_event->runtime, timestamp);
cdce9d73
FW
1099}
1100
1101static void
1102latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
39aeb52f 1103 struct event *__event __used,
cdce9d73
FW
1104 int cpu __used,
1105 u64 timestamp,
1106 struct thread *thread __used)
1107{
39aeb52f 1108 struct work_atoms *atoms;
b1ffe8f3 1109 struct work_atom *atom;
cdce9d73
FW
1110 struct thread *wakee;
1111
1112 /* Note for later, it may be interesting to observe the failing cases */
1113 if (!wakeup_event->success)
1114 return;
1115
d5b889f2 1116 wakee = threads__findnew(wakeup_event->pid);
b5fae128 1117 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
17562205 1118 if (!atoms) {
b1ffe8f3 1119 thread_atoms_insert(wakee);
39aeb52f 1120 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1121 if (!atoms)
1122 die("wakeup-event: Internal tree error");
1123 add_sched_out_event(atoms, 'S', timestamp);
cdce9d73
FW
1124 }
1125
39aeb52f 1126 BUG_ON(list_empty(&atoms->work_list));
cdce9d73 1127
39aeb52f 1128 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1129
55ffb7a6
MG
1130 /*
1131 * You WILL be missing events if you've recorded only
1132 * one CPU, or are only looking at only one, so don't
1133 * make useless noise.
1134 */
1135 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
dc02bf71 1136 nr_state_machine_bugs++;
cdce9d73 1137
ea57c4f5
IM
1138 nr_timestamps++;
1139 if (atom->sched_out_time > timestamp) {
dc02bf71 1140 nr_unordered_timestamps++;
aa1ab9d2 1141 return;
ea57c4f5 1142 }
aa1ab9d2 1143
b1ffe8f3
IM
1144 atom->state = THREAD_WAIT_CPU;
1145 atom->wake_up_time = timestamp;
cdce9d73
FW
1146}
1147
55ffb7a6
MG
1148static void
1149latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1150 struct event *__event __used,
1151 int cpu __used,
1152 u64 timestamp,
1153 struct thread *thread __used)
1154{
1155 struct work_atoms *atoms;
1156 struct work_atom *atom;
1157 struct thread *migrant;
1158
1159 /*
1160 * Only need to worry about migration when profiling one CPU.
1161 */
1162 if (profile_cpu == -1)
1163 return;
1164
d5b889f2 1165 migrant = threads__findnew(migrate_task_event->pid);
55ffb7a6
MG
1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1167 if (!atoms) {
1168 thread_atoms_insert(migrant);
1169 register_pid(migrant->pid, migrant->comm);
1170 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1171 if (!atoms)
1172 die("migration-event: Internal tree error");
1173 add_sched_out_event(atoms, 'R', timestamp);
1174 }
1175
1176 BUG_ON(list_empty(&atoms->work_list));
1177
1178 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1179 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1180
1181 nr_timestamps++;
1182
1183 if (atom->sched_out_time > timestamp)
1184 nr_unordered_timestamps++;
1185}
1186
cdce9d73 1187static struct trace_sched_handler lat_ops = {
ea92ed5a
IM
1188 .wakeup_event = latency_wakeup_event,
1189 .switch_event = latency_switch_event,
39aeb52f 1190 .runtime_event = latency_runtime_event,
ea92ed5a 1191 .fork_event = latency_fork_event,
55ffb7a6 1192 .migrate_task_event = latency_migrate_task_event,
cdce9d73
FW
1193};
1194
39aeb52f 1195static void output_lat_thread(struct work_atoms *work_list)
cdce9d73 1196{
cdce9d73
FW
1197 int i;
1198 int ret;
66685678 1199 u64 avg;
cdce9d73 1200
39aeb52f 1201 if (!work_list->nb_atoms)
cdce9d73 1202 return;
ea57c4f5
IM
1203 /*
1204 * Ignore idle threads:
1205 */
80ed0987 1206 if (!strcmp(work_list->thread->comm, "swapper"))
ea57c4f5 1207 return;
cdce9d73 1208
39aeb52f 1209 all_runtime += work_list->total_runtime;
1210 all_count += work_list->nb_atoms;
66685678 1211
80ed0987 1212 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
cdce9d73 1213
08f69e6c 1214 for (i = 0; i < 24 - ret; i++)
cdce9d73
FW
1215 printf(" ");
1216
39aeb52f 1217 avg = work_list->total_lat / work_list->nb_atoms;
cdce9d73 1218
dc02bf71 1219 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
39aeb52f 1220 (double)work_list->total_runtime / 1e6,
1221 work_list->nb_atoms, (double)avg / 1e6,
1222 (double)work_list->max_lat / 1e6);
cdce9d73
FW
1223}
1224
39aeb52f 1225static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5 1226{
daa1d7a5
FW
1227 if (l->thread->pid < r->thread->pid)
1228 return -1;
1229 if (l->thread->pid > r->thread->pid)
1230 return 1;
1231
1232 return 0;
1233}
1234
1235static struct sort_dimension pid_sort_dimension = {
b5fae128
IM
1236 .name = "pid",
1237 .cmp = pid_cmp,
daa1d7a5
FW
1238};
1239
39aeb52f 1240static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1241{
1242 u64 avgl, avgr;
1243
1244 if (!l->nb_atoms)
1245 return -1;
1246
1247 if (!r->nb_atoms)
1248 return 1;
1249
1250 avgl = l->total_lat / l->nb_atoms;
1251 avgr = r->total_lat / r->nb_atoms;
1252
1253 if (avgl < avgr)
1254 return -1;
1255 if (avgl > avgr)
1256 return 1;
1257
1258 return 0;
1259}
1260
1261static struct sort_dimension avg_sort_dimension = {
b5fae128
IM
1262 .name = "avg",
1263 .cmp = avg_cmp,
daa1d7a5
FW
1264};
1265
39aeb52f 1266static int max_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1267{
1268 if (l->max_lat < r->max_lat)
1269 return -1;
1270 if (l->max_lat > r->max_lat)
1271 return 1;
1272
1273 return 0;
1274}
1275
1276static struct sort_dimension max_sort_dimension = {
b5fae128
IM
1277 .name = "max",
1278 .cmp = max_cmp,
daa1d7a5
FW
1279};
1280
39aeb52f 1281static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1282{
1283 if (l->nb_atoms < r->nb_atoms)
1284 return -1;
1285 if (l->nb_atoms > r->nb_atoms)
1286 return 1;
1287
1288 return 0;
1289}
1290
1291static struct sort_dimension switch_sort_dimension = {
b5fae128
IM
1292 .name = "switch",
1293 .cmp = switch_cmp,
daa1d7a5
FW
1294};
1295
39aeb52f 1296static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1297{
1298 if (l->total_runtime < r->total_runtime)
1299 return -1;
1300 if (l->total_runtime > r->total_runtime)
1301 return 1;
1302
1303 return 0;
1304}
1305
1306static struct sort_dimension runtime_sort_dimension = {
b5fae128
IM
1307 .name = "runtime",
1308 .cmp = runtime_cmp,
daa1d7a5
FW
1309};
1310
1311static struct sort_dimension *available_sorts[] = {
1312 &pid_sort_dimension,
1313 &avg_sort_dimension,
1314 &max_sort_dimension,
1315 &switch_sort_dimension,
1316 &runtime_sort_dimension,
1317};
1318
1319#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1320
1321static LIST_HEAD(sort_list);
1322
cbef79a8 1323static int sort_dimension__add(const char *tok, struct list_head *list)
daa1d7a5
FW
1324{
1325 int i;
1326
1327 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1328 if (!strcmp(available_sorts[i]->name, tok)) {
1329 list_add_tail(&available_sorts[i]->list, list);
1330
1331 return 0;
1332 }
1333 }
1334
1335 return -1;
1336}
1337
1338static void setup_sorting(void);
1339
1340static void sort_lat(void)
1341{
1342 struct rb_node *node;
1343
1344 for (;;) {
39aeb52f 1345 struct work_atoms *data;
b1ffe8f3 1346 node = rb_first(&atom_root);
daa1d7a5
FW
1347 if (!node)
1348 break;
1349
b1ffe8f3 1350 rb_erase(node, &atom_root);
39aeb52f 1351 data = rb_entry(node, struct work_atoms, node);
b1ffe8f3 1352 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
daa1d7a5
FW
1353 }
1354}
1355
419ab0d6
FW
1356static struct trace_sched_handler *trace_handler;
1357
fbf94829 1358static void
419ab0d6
FW
1359process_sched_wakeup_event(struct raw_event_sample *raw,
1360 struct event *event,
1361 int cpu __used,
1362 u64 timestamp __used,
1363 struct thread *thread __used)
1364{
1365 struct trace_wakeup_event wakeup_event;
1366
1367 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1368
1369 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1370 FILL_FIELD(wakeup_event, pid, event, raw->data);
1371 FILL_FIELD(wakeup_event, prio, event, raw->data);
1372 FILL_FIELD(wakeup_event, success, event, raw->data);
1373 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1374
0ec04e16
IM
1375 if (trace_handler->wakeup_event)
1376 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
419ab0d6
FW
1377}
1378
c8a37751
IM
1379/*
1380 * Track the current task - that way we can know whether there's any
1381 * weird events, such as a task being switched away that is not current.
1382 */
40749d0f 1383static int max_cpu;
0ec04e16 1384
c8a37751
IM
1385static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1386
0ec04e16
IM
1387static struct thread *curr_thread[MAX_CPUS];
1388
1389static char next_shortname1 = 'A';
1390static char next_shortname2 = '0';
1391
1392static void
1393map_switch_event(struct trace_switch_event *switch_event,
1394 struct event *event __used,
1395 int this_cpu,
1396 u64 timestamp,
1397 struct thread *thread __used)
1398{
1399 struct thread *sched_out, *sched_in;
1400 int new_shortname;
1401 u64 timestamp0;
1402 s64 delta;
1403 int cpu;
1404
1405 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1406
1407 if (this_cpu > max_cpu)
1408 max_cpu = this_cpu;
1409
1410 timestamp0 = cpu_last_switched[this_cpu];
1411 cpu_last_switched[this_cpu] = timestamp;
1412 if (timestamp0)
1413 delta = timestamp - timestamp0;
1414 else
1415 delta = 0;
1416
1417 if (delta < 0)
1418 die("hm, delta: %Ld < 0 ?\n", delta);
1419
1420
d5b889f2
ACM
1421 sched_out = threads__findnew(switch_event->prev_pid);
1422 sched_in = threads__findnew(switch_event->next_pid);
0ec04e16
IM
1423
1424 curr_thread[this_cpu] = sched_in;
1425
1426 printf(" ");
1427
1428 new_shortname = 0;
1429 if (!sched_in->shortname[0]) {
1430 sched_in->shortname[0] = next_shortname1;
1431 sched_in->shortname[1] = next_shortname2;
1432
1433 if (next_shortname1 < 'Z') {
1434 next_shortname1++;
1435 } else {
1436 next_shortname1='A';
1437 if (next_shortname2 < '9') {
1438 next_shortname2++;
1439 } else {
1440 next_shortname2='0';
1441 }
1442 }
1443 new_shortname = 1;
1444 }
1445
1446 for (cpu = 0; cpu <= max_cpu; cpu++) {
1447 if (cpu != this_cpu)
1448 printf(" ");
1449 else
1450 printf("*");
1451
1452 if (curr_thread[cpu]) {
1453 if (curr_thread[cpu]->pid)
1454 printf("%2s ", curr_thread[cpu]->shortname);
1455 else
1456 printf(". ");
1457 } else
1458 printf(" ");
1459 }
1460
1461 printf(" %12.6f secs ", (double)timestamp/1e9);
1462 if (new_shortname) {
1463 printf("%s => %s:%d\n",
1464 sched_in->shortname, sched_in->comm, sched_in->pid);
1465 } else {
1466 printf("\n");
1467 }
1468}
1469
1470
419ab0d6
FW
1471static void
1472process_sched_switch_event(struct raw_event_sample *raw,
1473 struct event *event,
0ec04e16 1474 int this_cpu,
419ab0d6
FW
1475 u64 timestamp __used,
1476 struct thread *thread __used)
1477{
1478 struct trace_switch_event switch_event;
1479
1480 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1481
1482 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1483 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1484 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1485 FILL_FIELD(switch_event, prev_state, event, raw->data);
1486 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1487 FILL_FIELD(switch_event, next_pid, event, raw->data);
1488 FILL_FIELD(switch_event, next_prio, event, raw->data);
1489
0ec04e16 1490 if (curr_pid[this_cpu] != (u32)-1) {
c8a37751
IM
1491 /*
1492 * Are we trying to switch away a PID that is
1493 * not current?
1494 */
0ec04e16 1495 if (curr_pid[this_cpu] != switch_event.prev_pid)
c8a37751
IM
1496 nr_context_switch_bugs++;
1497 }
0ec04e16
IM
1498 if (trace_handler->switch_event)
1499 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
c8a37751 1500
0ec04e16 1501 curr_pid[this_cpu] = switch_event.next_pid;
419ab0d6
FW
1502}
1503
39aeb52f 1504static void
1505process_sched_runtime_event(struct raw_event_sample *raw,
1506 struct event *event,
1507 int cpu __used,
1508 u64 timestamp __used,
1509 struct thread *thread __used)
1510{
1511 struct trace_runtime_event runtime_event;
1512
1513 FILL_ARRAY(runtime_event, comm, event, raw->data);
1514 FILL_FIELD(runtime_event, pid, event, raw->data);
1515 FILL_FIELD(runtime_event, runtime, event, raw->data);
1516 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1517
0ec04e16
IM
1518 if (trace_handler->runtime_event)
1519 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
39aeb52f 1520}
1521
419ab0d6
FW
1522static void
1523process_sched_fork_event(struct raw_event_sample *raw,
1524 struct event *event,
1525 int cpu __used,
1526 u64 timestamp __used,
1527 struct thread *thread __used)
fbf94829 1528{
46538818
FW
1529 struct trace_fork_event fork_event;
1530
1531 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1532
1533 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1534 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1535 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1536 FILL_FIELD(fork_event, child_pid, event, raw->data);
1537
0ec04e16
IM
1538 if (trace_handler->fork_event)
1539 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
fbf94829
IM
1540}
1541
419ab0d6
FW
1542static void
1543process_sched_exit_event(struct event *event,
1544 int cpu __used,
1545 u64 timestamp __used,
1546 struct thread *thread __used)
fbf94829 1547{
ad236fd2
IM
1548 if (verbose)
1549 printf("sched_exit event %p\n", event);
ec156764
IM
1550}
1551
55ffb7a6
MG
1552static void
1553process_sched_migrate_task_event(struct raw_event_sample *raw,
1554 struct event *event,
1555 int cpu __used,
1556 u64 timestamp __used,
1557 struct thread *thread __used)
1558{
1559 struct trace_migrate_task_event migrate_task_event;
1560
1561 FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
1562
1563 FILL_ARRAY(migrate_task_event, comm, event, raw->data);
1564 FILL_FIELD(migrate_task_event, pid, event, raw->data);
1565 FILL_FIELD(migrate_task_event, prio, event, raw->data);
1566 FILL_FIELD(migrate_task_event, cpu, event, raw->data);
1567
1568 if (trace_handler->migrate_task_event)
1569 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
1570}
1571
ec156764 1572static void
d8bd9e0a 1573process_raw_event(event_t *raw_event __used, u32 size, void *data,
ec156764
IM
1574 int cpu, u64 timestamp, struct thread *thread)
1575{
d8bd9e0a 1576 struct raw_event_sample *raw;
ec156764
IM
1577 struct event *event;
1578 int type;
1579
d8bd9e0a
XG
1580 raw = malloc_or_die(sizeof(*raw)+size);
1581 raw->size = size;
1582 memcpy(raw->data, data, size);
1583
ec156764
IM
1584 type = trace_parse_common_type(raw->data);
1585 event = trace_find_event(type);
1586
ec156764 1587 if (!strcmp(event->name, "sched_switch"))
46538818 1588 process_sched_switch_event(raw, event, cpu, timestamp, thread);
39aeb52f 1589 if (!strcmp(event->name, "sched_stat_runtime"))
1590 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
ec156764 1591 if (!strcmp(event->name, "sched_wakeup"))
46538818 1592 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
fbf94829 1593 if (!strcmp(event->name, "sched_wakeup_new"))
46538818 1594 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
fbf94829 1595 if (!strcmp(event->name, "sched_process_fork"))
46538818 1596 process_sched_fork_event(raw, event, cpu, timestamp, thread);
fbf94829
IM
1597 if (!strcmp(event->name, "sched_process_exit"))
1598 process_sched_exit_event(event, cpu, timestamp, thread);
55ffb7a6
MG
1599 if (!strcmp(event->name, "sched_migrate_task"))
1600 process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
ec156764
IM
1601}
1602
62daacb5 1603static int process_sample_event(event_t *event)
0a02ad93 1604{
180f95e2 1605 struct sample_data data;
0a02ad93 1606 struct thread *thread;
a80deb62
ACM
1607
1608 if (!(sample_type & PERF_SAMPLE_RAW))
1609 return 0;
0a02ad93 1610
180f95e2
OH
1611 memset(&data, 0, sizeof(data));
1612 data.time = -1;
1613 data.cpu = -1;
1614 data.period = -1;
0a02ad93 1615
180f95e2 1616 event__parse_sample(event, sample_type, &data);
0a02ad93 1617
62daacb5 1618 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
0a02ad93 1619 event->header.misc,
180f95e2
OH
1620 data.pid, data.tid,
1621 (void *)(long)data.ip,
1622 (long long)data.period);
0a02ad93 1623
180f95e2 1624 thread = threads__findnew(data.pid);
0a02ad93 1625 if (thread == NULL) {
6beba7ad
ACM
1626 pr_debug("problem processing %d event, skipping it.\n",
1627 event->header.type);
0a02ad93
IM
1628 return -1;
1629 }
1630
f39cdf25
JL
1631 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1632
180f95e2 1633 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
55ffb7a6
MG
1634 return 0;
1635
d8bd9e0a
XG
1636 process_raw_event(event, data.raw_size, data.raw_data, data.cpu,
1637 data.time, thread);
0a02ad93
IM
1638
1639 return 0;
1640}
1641
62daacb5 1642static int process_lost_event(event_t *event __used)
0a02ad93 1643{
016e92fb
FW
1644 nr_lost_chunks++;
1645 nr_lost_events += event->lost.lost;
0a02ad93 1646
016e92fb
FW
1647 return 0;
1648}
0a02ad93 1649
016e92fb
FW
1650static int sample_type_check(u64 type)
1651{
1652 sample_type = type;
0a02ad93 1653
016e92fb
FW
1654 if (!(sample_type & PERF_SAMPLE_RAW)) {
1655 fprintf(stderr,
1656 "No trace sample to read. Did you call perf record "
1657 "without -R?");
0a02ad93
IM
1658 return -1;
1659 }
1660
1661 return 0;
1662}
1663
016e92fb
FW
1664static struct perf_file_handler file_handler = {
1665 .process_sample_event = process_sample_event,
62daacb5 1666 .process_comm_event = event__process_comm,
016e92fb
FW
1667 .process_lost_event = process_lost_event,
1668 .sample_type_check = sample_type_check,
1669};
1670
46f392c9 1671static int read_events(void)
0a02ad93 1672{
d5b889f2 1673 register_idle_thread();
016e92fb 1674 register_perf_file_handler(&file_handler);
0a02ad93 1675
b32d133a 1676 return mmap_dispatch_perf_file(&header, input_name, 0, 0,
62daacb5 1677 &event__cwdlen, &event__cwd);
0a02ad93
IM
1678}
1679
0ec04e16
IM
1680static void print_bad_events(void)
1681{
1682 if (nr_unordered_timestamps && nr_timestamps) {
1683 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1684 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1685 nr_unordered_timestamps, nr_timestamps);
1686 }
1687 if (nr_lost_events && nr_events) {
1688 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1689 (double)nr_lost_events/(double)nr_events*100.0,
1690 nr_lost_events, nr_events, nr_lost_chunks);
1691 }
1692 if (nr_state_machine_bugs && nr_timestamps) {
1693 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1694 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1695 nr_state_machine_bugs, nr_timestamps);
1696 if (nr_lost_events)
1697 printf(" (due to lost events?)");
1698 printf("\n");
1699 }
1700 if (nr_context_switch_bugs && nr_timestamps) {
1701 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1702 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1703 nr_context_switch_bugs, nr_timestamps);
1704 if (nr_lost_events)
1705 printf(" (due to lost events?)");
1706 printf("\n");
1707 }
1708}
1709
1710static void __cmd_lat(void)
1711{
1712 struct rb_node *next;
1713
1714 setup_pager();
1715 read_events();
1716 sort_lat();
1717
1718 printf("\n -----------------------------------------------------------------------------------------\n");
1719 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1720 printf(" -----------------------------------------------------------------------------------------\n");
1721
1722 next = rb_first(&sorted_atom_root);
1723
1724 while (next) {
1725 struct work_atoms *work_list;
1726
1727 work_list = rb_entry(next, struct work_atoms, node);
1728 output_lat_thread(work_list);
1729 next = rb_next(next);
1730 }
1731
1732 printf(" -----------------------------------------------------------------------------------------\n");
1733 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1734 (double)all_runtime/1e6, all_count);
1735
1736 printf(" ---------------------------------------------------\n");
1737
1738 print_bad_events();
1739 printf("\n");
1740
1741}
1742
1743static struct trace_sched_handler map_ops = {
1744 .wakeup_event = NULL,
1745 .switch_event = map_switch_event,
1746 .runtime_event = NULL,
1747 .fork_event = NULL,
1748};
1749
1750static void __cmd_map(void)
1751{
40749d0f
IM
1752 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1753
0ec04e16
IM
1754 setup_pager();
1755 read_events();
1756 print_bad_events();
1757}
1758
1759static void __cmd_replay(void)
1760{
1761 unsigned long i;
1762
1763 calibrate_run_measurement_overhead();
1764 calibrate_sleep_measurement_overhead();
1765
1766 test_calibrations();
1767
1768 read_events();
1769
1770 printf("nr_run_events: %ld\n", nr_run_events);
1771 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1772 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1773
1774 if (targetless_wakeups)
1775 printf("target-less wakeups: %ld\n", targetless_wakeups);
1776 if (multitarget_wakeups)
1777 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1778 if (nr_run_events_optimized)
1779 printf("run atoms optimized: %ld\n",
1780 nr_run_events_optimized);
1781
1782 print_task_traces();
1783 add_cross_task_wakeups();
1784
1785 create_tasks();
1786 printf("------------------------------------------------------------\n");
1787 for (i = 0; i < replay_repeat; i++)
1788 run_one_test();
1789}
1790
1791
46f392c9 1792static const char * const sched_usage[] = {
4b77a729 1793 "perf sched [<options>] {record|latency|map|replay|trace}",
0a02ad93
IM
1794 NULL
1795};
1796
f2858d8a 1797static const struct option sched_options[] = {
4b77a729
MG
1798 OPT_STRING('i', "input", &input_name, "file",
1799 "input file name"),
f2858d8a
IM
1800 OPT_BOOLEAN('v', "verbose", &verbose,
1801 "be more verbose (show symbol address, etc)"),
0a02ad93
IM
1802 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1803 "dump raw trace in ASCII"),
f2858d8a
IM
1804 OPT_END()
1805};
1806
1807static const char * const latency_usage[] = {
1808 "perf sched latency [<options>]",
1809 NULL
1810};
1811
1812static const struct option latency_options[] = {
daa1d7a5
FW
1813 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1814 "sort by key(s): runtime, switch, avg, max"),
0a02ad93
IM
1815 OPT_BOOLEAN('v', "verbose", &verbose,
1816 "be more verbose (show symbol address, etc)"),
55ffb7a6
MG
1817 OPT_INTEGER('C', "CPU", &profile_cpu,
1818 "CPU to profile on"),
f2858d8a
IM
1819 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1820 "dump raw trace in ASCII"),
1821 OPT_END()
1822};
1823
1824static const char * const replay_usage[] = {
1825 "perf sched replay [<options>]",
1826 NULL
1827};
1828
1829static const struct option replay_options[] = {
1830 OPT_INTEGER('r', "repeat", &replay_repeat,
1831 "repeat the workload replay N times (-1: infinite)"),
1832 OPT_BOOLEAN('v', "verbose", &verbose,
1833 "be more verbose (show symbol address, etc)"),
1834 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1835 "dump raw trace in ASCII"),
0a02ad93
IM
1836 OPT_END()
1837};
1838
daa1d7a5
FW
1839static void setup_sorting(void)
1840{
1841 char *tmp, *tok, *str = strdup(sort_order);
1842
1843 for (tok = strtok_r(str, ", ", &tmp);
1844 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1845 if (sort_dimension__add(tok, &sort_list) < 0) {
1846 error("Unknown --sort key: `%s'", tok);
f2858d8a 1847 usage_with_options(latency_usage, latency_options);
daa1d7a5
FW
1848 }
1849 }
1850
1851 free(str);
1852
cbef79a8 1853 sort_dimension__add("pid", &cmp_pid);
daa1d7a5
FW
1854}
1855
1fc35b29
IM
1856static const char *record_args[] = {
1857 "record",
1858 "-a",
1859 "-R",
d1302522 1860 "-M",
ea57c4f5 1861 "-f",
dc02bf71 1862 "-m", "1024",
1fc35b29
IM
1863 "-c", "1",
1864 "-e", "sched:sched_switch:r",
1865 "-e", "sched:sched_stat_wait:r",
1866 "-e", "sched:sched_stat_sleep:r",
1867 "-e", "sched:sched_stat_iowait:r",
ea57c4f5 1868 "-e", "sched:sched_stat_runtime:r",
1fc35b29
IM
1869 "-e", "sched:sched_process_exit:r",
1870 "-e", "sched:sched_process_fork:r",
1871 "-e", "sched:sched_wakeup:r",
1872 "-e", "sched:sched_migrate_task:r",
1873};
1874
1875static int __cmd_record(int argc, const char **argv)
1876{
1877 unsigned int rec_argc, i, j;
1878 const char **rec_argv;
1879
1880 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1881 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1882
1883 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1884 rec_argv[i] = strdup(record_args[i]);
1885
1886 for (j = 1; j < (unsigned int)argc; j++, i++)
1887 rec_argv[i] = argv[j];
1888
1889 BUG_ON(i != rec_argc);
1890
1891 return cmd_record(i, rec_argv, NULL);
1892}
1893
0a02ad93
IM
1894int cmd_sched(int argc, const char **argv, const char *prefix __used)
1895{
f2858d8a
IM
1896 argc = parse_options(argc, argv, sched_options, sched_usage,
1897 PARSE_OPT_STOP_AT_NON_OPTION);
1898 if (!argc)
1899 usage_with_options(sched_usage, sched_options);
0a02ad93 1900
c0777c5a
XG
1901 /*
1902 * Aliased to 'perf trace' for now:
1903 */
1904 if (!strcmp(argv[0], "trace"))
1905 return cmd_trace(argc, argv, prefix);
1906
1907 symbol__init(0);
1fc35b29
IM
1908 if (!strncmp(argv[0], "rec", 3)) {
1909 return __cmd_record(argc, argv);
1910 } else if (!strncmp(argv[0], "lat", 3)) {
cdce9d73 1911 trace_handler = &lat_ops;
f2858d8a
IM
1912 if (argc > 1) {
1913 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1914 if (argc)
1915 usage_with_options(latency_usage, latency_options);
f2858d8a 1916 }
b5fae128 1917 setup_sorting();
46f392c9 1918 __cmd_lat();
0ec04e16
IM
1919 } else if (!strcmp(argv[0], "map")) {
1920 trace_handler = &map_ops;
1921 setup_sorting();
1922 __cmd_map();
f2858d8a
IM
1923 } else if (!strncmp(argv[0], "rep", 3)) {
1924 trace_handler = &replay_ops;
1925 if (argc) {
1926 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1927 if (argc)
1928 usage_with_options(replay_usage, replay_options);
1929 }
1930 __cmd_replay();
1931 } else {
1932 usage_with_options(sched_usage, sched_options);
1933 }
1934
ec156764 1935 return 0;
0a02ad93 1936}