]>
Commit | Line | Data |
---|---|---|
0a02ad93 IM |
1 | #include "builtin.h" |
2 | ||
3 | #include "util/util.h" | |
4 | #include "util/cache.h" | |
5 | #include "util/symbol.h" | |
6 | #include "util/thread.h" | |
7 | #include "util/header.h" | |
8 | ||
9 | #include "util/parse-options.h" | |
10 | ||
11 | #include "perf.h" | |
12 | #include "util/debug.h" | |
13 | ||
14 | #include "util/trace-event.h" | |
ec156764 | 15 | #include <sys/types.h> |
0a02ad93 | 16 | |
419ab0d6 FW |
17 | |
18 | #define MAX_CPUS 4096 | |
19 | ||
ec156764 IM |
20 | static char const *input_name = "perf.data"; |
21 | static int input; | |
22 | static unsigned long page_size; | |
23 | static unsigned long mmap_window = 32; | |
0a02ad93 | 24 | |
ec156764 | 25 | static unsigned long total_comm = 0; |
0a02ad93 | 26 | |
ec156764 IM |
27 | static struct rb_root threads; |
28 | static struct thread *last_match; | |
0a02ad93 | 29 | |
ec156764 IM |
30 | static struct perf_header *header; |
31 | static u64 sample_type; | |
0a02ad93 | 32 | |
419ab0d6 | 33 | static int replay_mode; |
cdce9d73 | 34 | static int lat_mode; |
419ab0d6 | 35 | |
0a02ad93 | 36 | |
ec156764 IM |
37 | /* |
38 | * Scheduler benchmarks | |
39 | */ | |
40 | #include <sys/resource.h> | |
41 | #include <sys/types.h> | |
42 | #include <sys/stat.h> | |
43 | #include <sys/time.h> | |
44 | #include <sys/prctl.h> | |
45 | ||
46 | #include <linux/unistd.h> | |
47 | ||
48 | #include <semaphore.h> | |
49 | #include <pthread.h> | |
50 | #include <signal.h> | |
51 | #include <values.h> | |
52 | #include <string.h> | |
53 | #include <unistd.h> | |
54 | #include <stdlib.h> | |
55 | #include <assert.h> | |
56 | #include <fcntl.h> | |
57 | #include <time.h> | |
58 | #include <math.h> | |
59 | ||
60 | #include <stdio.h> | |
61 | ||
62 | #define PR_SET_NAME 15 /* Set process name */ | |
63 | ||
64 | #define BUG_ON(x) assert(!(x)) | |
65 | ||
fbf94829 | 66 | #define DEBUG 0 |
ec156764 IM |
67 | |
68 | typedef unsigned long long nsec_t; | |
69 | ||
ec156764 IM |
70 | static nsec_t run_measurement_overhead; |
71 | static nsec_t sleep_measurement_overhead; | |
72 | ||
73 | static nsec_t get_nsecs(void) | |
74 | { | |
75 | struct timespec ts; | |
76 | ||
77 | clock_gettime(CLOCK_MONOTONIC, &ts); | |
78 | ||
79 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; | |
80 | } | |
81 | ||
82 | static void burn_nsecs(nsec_t nsecs) | |
83 | { | |
84 | nsec_t T0 = get_nsecs(), T1; | |
85 | ||
86 | do { | |
87 | T1 = get_nsecs(); | |
88 | } while (T1 + run_measurement_overhead < T0 + nsecs); | |
89 | } | |
90 | ||
91 | static void sleep_nsecs(nsec_t nsecs) | |
92 | { | |
93 | struct timespec ts; | |
94 | ||
95 | ts.tv_nsec = nsecs % 999999999; | |
96 | ts.tv_sec = nsecs / 999999999; | |
97 | ||
98 | nanosleep(&ts, NULL); | |
99 | } | |
100 | ||
101 | static void calibrate_run_measurement_overhead(void) | |
102 | { | |
103 | nsec_t T0, T1, delta, min_delta = 1000000000ULL; | |
104 | int i; | |
105 | ||
106 | for (i = 0; i < 10; i++) { | |
107 | T0 = get_nsecs(); | |
108 | burn_nsecs(0); | |
109 | T1 = get_nsecs(); | |
110 | delta = T1-T0; | |
111 | min_delta = min(min_delta, delta); | |
112 | } | |
113 | run_measurement_overhead = min_delta; | |
114 | ||
ad236fd2 | 115 | printf("run measurement overhead: %Ld nsecs\n", min_delta); |
ec156764 IM |
116 | } |
117 | ||
118 | static void calibrate_sleep_measurement_overhead(void) | |
119 | { | |
120 | nsec_t T0, T1, delta, min_delta = 1000000000ULL; | |
121 | int i; | |
122 | ||
123 | for (i = 0; i < 10; i++) { | |
124 | T0 = get_nsecs(); | |
125 | sleep_nsecs(10000); | |
126 | T1 = get_nsecs(); | |
127 | delta = T1-T0; | |
128 | min_delta = min(min_delta, delta); | |
129 | } | |
130 | min_delta -= 10000; | |
131 | sleep_measurement_overhead = min_delta; | |
132 | ||
ad236fd2 | 133 | printf("sleep measurement overhead: %Ld nsecs\n", min_delta); |
ec156764 IM |
134 | } |
135 | ||
136 | #define COMM_LEN 20 | |
137 | #define SYM_LEN 129 | |
138 | ||
139 | #define MAX_PID 65536 | |
140 | ||
141 | static unsigned long nr_tasks; | |
142 | ||
143 | struct sched_event; | |
144 | ||
145 | struct task_desc { | |
146 | unsigned long nr; | |
147 | unsigned long pid; | |
148 | char comm[COMM_LEN]; | |
149 | ||
150 | unsigned long nr_events; | |
151 | unsigned long curr_event; | |
152 | struct sched_event **events; | |
153 | ||
154 | pthread_t thread; | |
155 | sem_t sleep_sem; | |
156 | ||
157 | sem_t ready_for_work; | |
158 | sem_t work_done_sem; | |
159 | ||
160 | nsec_t cpu_usage; | |
161 | }; | |
162 | ||
163 | enum sched_event_type { | |
164 | SCHED_EVENT_RUN, | |
165 | SCHED_EVENT_SLEEP, | |
166 | SCHED_EVENT_WAKEUP, | |
167 | }; | |
168 | ||
169 | struct sched_event { | |
170 | enum sched_event_type type; | |
171 | nsec_t timestamp; | |
172 | nsec_t duration; | |
173 | unsigned long nr; | |
174 | int specific_wait; | |
175 | sem_t *wait_sem; | |
176 | struct task_desc *wakee; | |
177 | }; | |
178 | ||
179 | static struct task_desc *pid_to_task[MAX_PID]; | |
180 | ||
181 | static struct task_desc **tasks; | |
182 | ||
183 | static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER; | |
184 | static nsec_t start_time; | |
185 | ||
186 | static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER; | |
187 | ||
188 | static unsigned long nr_run_events; | |
189 | static unsigned long nr_sleep_events; | |
190 | static unsigned long nr_wakeup_events; | |
191 | ||
192 | static unsigned long nr_sleep_corrections; | |
193 | static unsigned long nr_run_events_optimized; | |
194 | ||
195 | static struct sched_event * | |
196 | get_new_event(struct task_desc *task, nsec_t timestamp) | |
197 | { | |
198 | struct sched_event *event = calloc(1, sizeof(*event)); | |
199 | unsigned long idx = task->nr_events; | |
200 | size_t size; | |
201 | ||
202 | event->timestamp = timestamp; | |
203 | event->nr = idx; | |
204 | ||
205 | task->nr_events++; | |
206 | size = sizeof(struct sched_event *) * task->nr_events; | |
207 | task->events = realloc(task->events, size); | |
208 | BUG_ON(!task->events); | |
209 | ||
210 | task->events[idx] = event; | |
211 | ||
212 | return event; | |
213 | } | |
214 | ||
215 | static struct sched_event *last_event(struct task_desc *task) | |
216 | { | |
217 | if (!task->nr_events) | |
218 | return NULL; | |
219 | ||
220 | return task->events[task->nr_events - 1]; | |
221 | } | |
222 | ||
223 | static void | |
fbf94829 | 224 | add_sched_event_run(struct task_desc *task, nsec_t timestamp, u64 duration) |
ec156764 IM |
225 | { |
226 | struct sched_event *event, *curr_event = last_event(task); | |
227 | ||
228 | /* | |
fbf94829 IM |
229 | * optimize an existing RUN event by merging this one |
230 | * to it: | |
231 | */ | |
ec156764 IM |
232 | if (curr_event && curr_event->type == SCHED_EVENT_RUN) { |
233 | nr_run_events_optimized++; | |
234 | curr_event->duration += duration; | |
235 | return; | |
236 | } | |
237 | ||
238 | event = get_new_event(task, timestamp); | |
239 | ||
240 | event->type = SCHED_EVENT_RUN; | |
241 | event->duration = duration; | |
242 | ||
243 | nr_run_events++; | |
244 | } | |
245 | ||
246 | static unsigned long targetless_wakeups; | |
247 | static unsigned long multitarget_wakeups; | |
248 | ||
249 | static void | |
250 | add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp, | |
251 | struct task_desc *wakee) | |
252 | { | |
253 | struct sched_event *event, *wakee_event; | |
254 | ||
255 | event = get_new_event(task, timestamp); | |
256 | event->type = SCHED_EVENT_WAKEUP; | |
257 | event->wakee = wakee; | |
258 | ||
259 | wakee_event = last_event(wakee); | |
260 | if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { | |
261 | targetless_wakeups++; | |
262 | return; | |
263 | } | |
264 | if (wakee_event->wait_sem) { | |
265 | multitarget_wakeups++; | |
266 | return; | |
267 | } | |
268 | ||
269 | wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem)); | |
270 | sem_init(wakee_event->wait_sem, 0, 0); | |
271 | wakee_event->specific_wait = 1; | |
272 | event->wait_sem = wakee_event->wait_sem; | |
273 | ||
274 | nr_wakeup_events++; | |
275 | } | |
276 | ||
277 | static void | |
278 | add_sched_event_sleep(struct task_desc *task, nsec_t timestamp, | |
ad236fd2 | 279 | u64 task_state __used) |
ec156764 IM |
280 | { |
281 | struct sched_event *event = get_new_event(task, timestamp); | |
282 | ||
283 | event->type = SCHED_EVENT_SLEEP; | |
284 | ||
285 | nr_sleep_events++; | |
286 | } | |
287 | ||
288 | static struct task_desc *register_pid(unsigned long pid, const char *comm) | |
289 | { | |
290 | struct task_desc *task; | |
291 | ||
292 | BUG_ON(pid >= MAX_PID); | |
293 | ||
294 | task = pid_to_task[pid]; | |
295 | ||
296 | if (task) | |
297 | return task; | |
298 | ||
299 | task = calloc(1, sizeof(*task)); | |
300 | task->pid = pid; | |
301 | task->nr = nr_tasks; | |
302 | strcpy(task->comm, comm); | |
303 | /* | |
304 | * every task starts in sleeping state - this gets ignored | |
305 | * if there's no wakeup pointing to this sleep state: | |
306 | */ | |
307 | add_sched_event_sleep(task, 0, 0); | |
308 | ||
309 | pid_to_task[pid] = task; | |
310 | nr_tasks++; | |
311 | tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *)); | |
312 | BUG_ON(!tasks); | |
313 | tasks[task->nr] = task; | |
314 | ||
ad236fd2 IM |
315 | if (verbose) |
316 | printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm); | |
ec156764 IM |
317 | |
318 | return task; | |
319 | } | |
320 | ||
321 | ||
ec156764 IM |
322 | static void print_task_traces(void) |
323 | { | |
324 | struct task_desc *task; | |
325 | unsigned long i; | |
326 | ||
327 | for (i = 0; i < nr_tasks; i++) { | |
328 | task = tasks[i]; | |
ad236fd2 | 329 | printf("task %6ld (%20s:%10ld), nr_events: %ld\n", |
ec156764 IM |
330 | task->nr, task->comm, task->pid, task->nr_events); |
331 | } | |
332 | } | |
333 | ||
334 | static void add_cross_task_wakeups(void) | |
335 | { | |
336 | struct task_desc *task1, *task2; | |
337 | unsigned long i, j; | |
338 | ||
339 | for (i = 0; i < nr_tasks; i++) { | |
340 | task1 = tasks[i]; | |
341 | j = i + 1; | |
342 | if (j == nr_tasks) | |
343 | j = 0; | |
344 | task2 = tasks[j]; | |
345 | add_sched_event_wakeup(task1, 0, task2); | |
346 | } | |
347 | } | |
348 | ||
349 | static void | |
fbf94829 | 350 | process_sched_event(struct task_desc *this_task __used, struct sched_event *event) |
ec156764 IM |
351 | { |
352 | int ret = 0; | |
353 | nsec_t now; | |
354 | long long delta; | |
355 | ||
356 | now = get_nsecs(); | |
357 | delta = start_time + event->timestamp - now; | |
358 | ||
ec156764 IM |
359 | switch (event->type) { |
360 | case SCHED_EVENT_RUN: | |
ec156764 IM |
361 | burn_nsecs(event->duration); |
362 | break; | |
363 | case SCHED_EVENT_SLEEP: | |
ec156764 IM |
364 | if (event->wait_sem) |
365 | ret = sem_wait(event->wait_sem); | |
366 | BUG_ON(ret); | |
367 | break; | |
368 | case SCHED_EVENT_WAKEUP: | |
ec156764 IM |
369 | if (event->wait_sem) |
370 | ret = sem_post(event->wait_sem); | |
371 | BUG_ON(ret); | |
372 | break; | |
373 | default: | |
374 | BUG_ON(1); | |
375 | } | |
376 | } | |
377 | ||
378 | static nsec_t get_cpu_usage_nsec_parent(void) | |
379 | { | |
380 | struct rusage ru; | |
381 | nsec_t sum; | |
382 | int err; | |
383 | ||
384 | err = getrusage(RUSAGE_SELF, &ru); | |
385 | BUG_ON(err); | |
386 | ||
387 | sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3; | |
388 | sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3; | |
389 | ||
390 | return sum; | |
391 | } | |
392 | ||
393 | static nsec_t get_cpu_usage_nsec_self(void) | |
394 | { | |
395 | char filename [] = "/proc/1234567890/sched"; | |
396 | unsigned long msecs, nsecs; | |
397 | char *line = NULL; | |
398 | nsec_t total = 0; | |
399 | size_t len = 0; | |
400 | ssize_t chars; | |
401 | FILE *file; | |
402 | int ret; | |
403 | ||
404 | sprintf(filename, "/proc/%d/sched", getpid()); | |
405 | file = fopen(filename, "r"); | |
406 | BUG_ON(!file); | |
407 | ||
408 | while ((chars = getline(&line, &len, file)) != -1) { | |
ec156764 IM |
409 | ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n", |
410 | &msecs, &nsecs); | |
411 | if (ret == 2) { | |
412 | total = msecs*1e6 + nsecs; | |
ec156764 IM |
413 | break; |
414 | } | |
415 | } | |
416 | if (line) | |
417 | free(line); | |
418 | fclose(file); | |
419 | ||
420 | return total; | |
421 | } | |
422 | ||
423 | static void *thread_func(void *ctx) | |
424 | { | |
425 | struct task_desc *this_task = ctx; | |
426 | nsec_t cpu_usage_0, cpu_usage_1; | |
427 | unsigned long i, ret; | |
428 | char comm2[22]; | |
429 | ||
ec156764 IM |
430 | sprintf(comm2, ":%s", this_task->comm); |
431 | prctl(PR_SET_NAME, comm2); | |
432 | ||
433 | again: | |
434 | ret = sem_post(&this_task->ready_for_work); | |
435 | BUG_ON(ret); | |
ec156764 IM |
436 | ret = pthread_mutex_lock(&start_work_mutex); |
437 | BUG_ON(ret); | |
438 | ret = pthread_mutex_unlock(&start_work_mutex); | |
439 | BUG_ON(ret); | |
ec156764 IM |
440 | |
441 | cpu_usage_0 = get_cpu_usage_nsec_self(); | |
442 | ||
443 | for (i = 0; i < this_task->nr_events; i++) { | |
444 | this_task->curr_event = i; | |
445 | process_sched_event(this_task, this_task->events[i]); | |
446 | } | |
447 | ||
448 | cpu_usage_1 = get_cpu_usage_nsec_self(); | |
449 | this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; | |
450 | ||
ec156764 IM |
451 | ret = sem_post(&this_task->work_done_sem); |
452 | BUG_ON(ret); | |
ec156764 IM |
453 | |
454 | ret = pthread_mutex_lock(&work_done_wait_mutex); | |
455 | BUG_ON(ret); | |
456 | ret = pthread_mutex_unlock(&work_done_wait_mutex); | |
457 | BUG_ON(ret); | |
ec156764 IM |
458 | |
459 | goto again; | |
460 | } | |
461 | ||
462 | static void create_tasks(void) | |
463 | { | |
464 | struct task_desc *task; | |
465 | pthread_attr_t attr; | |
466 | unsigned long i; | |
467 | int err; | |
468 | ||
469 | err = pthread_attr_init(&attr); | |
470 | BUG_ON(err); | |
471 | err = pthread_attr_setstacksize(&attr, (size_t)(16*1024)); | |
472 | BUG_ON(err); | |
473 | err = pthread_mutex_lock(&start_work_mutex); | |
474 | BUG_ON(err); | |
475 | err = pthread_mutex_lock(&work_done_wait_mutex); | |
476 | BUG_ON(err); | |
477 | for (i = 0; i < nr_tasks; i++) { | |
478 | task = tasks[i]; | |
479 | sem_init(&task->sleep_sem, 0, 0); | |
480 | sem_init(&task->ready_for_work, 0, 0); | |
481 | sem_init(&task->work_done_sem, 0, 0); | |
482 | task->curr_event = 0; | |
483 | err = pthread_create(&task->thread, &attr, thread_func, task); | |
484 | BUG_ON(err); | |
485 | } | |
486 | } | |
487 | ||
488 | static nsec_t cpu_usage; | |
489 | static nsec_t runavg_cpu_usage; | |
490 | static nsec_t parent_cpu_usage; | |
491 | static nsec_t runavg_parent_cpu_usage; | |
492 | ||
493 | static void wait_for_tasks(void) | |
494 | { | |
495 | nsec_t cpu_usage_0, cpu_usage_1; | |
496 | struct task_desc *task; | |
497 | unsigned long i, ret; | |
498 | ||
ec156764 | 499 | start_time = get_nsecs(); |
ec156764 IM |
500 | cpu_usage = 0; |
501 | pthread_mutex_unlock(&work_done_wait_mutex); | |
502 | ||
503 | for (i = 0; i < nr_tasks; i++) { | |
504 | task = tasks[i]; | |
505 | ret = sem_wait(&task->ready_for_work); | |
506 | BUG_ON(ret); | |
507 | sem_init(&task->ready_for_work, 0, 0); | |
508 | } | |
509 | ret = pthread_mutex_lock(&work_done_wait_mutex); | |
510 | BUG_ON(ret); | |
511 | ||
512 | cpu_usage_0 = get_cpu_usage_nsec_parent(); | |
513 | ||
514 | pthread_mutex_unlock(&start_work_mutex); | |
515 | ||
ec156764 IM |
516 | for (i = 0; i < nr_tasks; i++) { |
517 | task = tasks[i]; | |
518 | ret = sem_wait(&task->work_done_sem); | |
519 | BUG_ON(ret); | |
520 | sem_init(&task->work_done_sem, 0, 0); | |
521 | cpu_usage += task->cpu_usage; | |
522 | task->cpu_usage = 0; | |
523 | } | |
524 | ||
525 | cpu_usage_1 = get_cpu_usage_nsec_parent(); | |
526 | if (!runavg_cpu_usage) | |
527 | runavg_cpu_usage = cpu_usage; | |
528 | runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10; | |
529 | ||
530 | parent_cpu_usage = cpu_usage_1 - cpu_usage_0; | |
531 | if (!runavg_parent_cpu_usage) | |
532 | runavg_parent_cpu_usage = parent_cpu_usage; | |
533 | runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 + | |
534 | parent_cpu_usage)/10; | |
535 | ||
536 | ret = pthread_mutex_lock(&start_work_mutex); | |
537 | BUG_ON(ret); | |
538 | ||
539 | for (i = 0; i < nr_tasks; i++) { | |
540 | task = tasks[i]; | |
541 | sem_init(&task->sleep_sem, 0, 0); | |
542 | task->curr_event = 0; | |
543 | } | |
544 | } | |
545 | ||
546 | static int __cmd_sched(void); | |
547 | ||
548 | static void parse_trace(void) | |
549 | { | |
550 | __cmd_sched(); | |
551 | ||
ad236fd2 IM |
552 | printf("nr_run_events: %ld\n", nr_run_events); |
553 | printf("nr_sleep_events: %ld\n", nr_sleep_events); | |
554 | printf("nr_wakeup_events: %ld\n", nr_wakeup_events); | |
ec156764 IM |
555 | |
556 | if (targetless_wakeups) | |
ad236fd2 | 557 | printf("target-less wakeups: %ld\n", targetless_wakeups); |
ec156764 | 558 | if (multitarget_wakeups) |
ad236fd2 | 559 | printf("multi-target wakeups: %ld\n", multitarget_wakeups); |
ec156764 | 560 | if (nr_run_events_optimized) |
ad236fd2 | 561 | printf("run events optimized: %ld\n", |
ec156764 IM |
562 | nr_run_events_optimized); |
563 | } | |
564 | ||
565 | static unsigned long nr_runs; | |
566 | static nsec_t sum_runtime; | |
567 | static nsec_t sum_fluct; | |
568 | static nsec_t run_avg; | |
569 | ||
570 | static void run_one_test(void) | |
571 | { | |
572 | nsec_t T0, T1, delta, avg_delta, fluct, std_dev; | |
573 | ||
574 | T0 = get_nsecs(); | |
575 | wait_for_tasks(); | |
576 | T1 = get_nsecs(); | |
577 | ||
578 | delta = T1 - T0; | |
579 | sum_runtime += delta; | |
580 | nr_runs++; | |
581 | ||
582 | avg_delta = sum_runtime / nr_runs; | |
583 | if (delta < avg_delta) | |
584 | fluct = avg_delta - delta; | |
585 | else | |
586 | fluct = delta - avg_delta; | |
587 | sum_fluct += fluct; | |
588 | std_dev = sum_fluct / nr_runs / sqrt(nr_runs); | |
589 | if (!run_avg) | |
590 | run_avg = delta; | |
591 | run_avg = (run_avg*9 + delta)/10; | |
592 | ||
ad236fd2 | 593 | printf("#%-3ld: %0.3f, ", |
ec156764 IM |
594 | nr_runs, (double)delta/1000000.0); |
595 | ||
596 | #if 0 | |
ad236fd2 | 597 | printf("%0.2f +- %0.2f, ", |
ec156764 IM |
598 | (double)avg_delta/1e6, (double)std_dev/1e6); |
599 | #endif | |
ad236fd2 | 600 | printf("ravg: %0.2f, ", |
ec156764 IM |
601 | (double)run_avg/1e6); |
602 | ||
ad236fd2 | 603 | printf("cpu: %0.2f / %0.2f", |
ec156764 IM |
604 | (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6); |
605 | ||
606 | #if 0 | |
607 | /* | |
fbf94829 IM |
608 | * rusage statistics done by the parent, these are less |
609 | * accurate than the sum_exec_runtime based statistics: | |
610 | */ | |
ad236fd2 | 611 | printf(" [%0.2f / %0.2f]", |
ec156764 IM |
612 | (double)parent_cpu_usage/1e6, |
613 | (double)runavg_parent_cpu_usage/1e6); | |
614 | #endif | |
615 | ||
ad236fd2 | 616 | printf("\n"); |
ec156764 IM |
617 | |
618 | if (nr_sleep_corrections) | |
ad236fd2 | 619 | printf(" (%ld sleep corrections)\n", nr_sleep_corrections); |
ec156764 IM |
620 | nr_sleep_corrections = 0; |
621 | } | |
622 | ||
623 | static void test_calibrations(void) | |
624 | { | |
625 | nsec_t T0, T1; | |
626 | ||
627 | T0 = get_nsecs(); | |
628 | burn_nsecs(1e6); | |
629 | T1 = get_nsecs(); | |
630 | ||
ad236fd2 | 631 | printf("the run test took %Ld nsecs\n", T1-T0); |
ec156764 IM |
632 | |
633 | T0 = get_nsecs(); | |
634 | sleep_nsecs(1e6); | |
635 | T1 = get_nsecs(); | |
636 | ||
ad236fd2 | 637 | printf("the sleep test took %Ld nsecs\n", T1-T0); |
ec156764 IM |
638 | } |
639 | ||
0a02ad93 IM |
640 | static int |
641 | process_comm_event(event_t *event, unsigned long offset, unsigned long head) | |
642 | { | |
643 | struct thread *thread; | |
644 | ||
645 | thread = threads__findnew(event->comm.pid, &threads, &last_match); | |
646 | ||
647 | dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", | |
648 | (void *)(offset + head), | |
649 | (void *)(long)(event->header.size), | |
650 | event->comm.comm, event->comm.pid); | |
651 | ||
652 | if (thread == NULL || | |
653 | thread__set_comm(thread, event->comm.comm)) { | |
654 | dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); | |
655 | return -1; | |
656 | } | |
657 | total_comm++; | |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
46538818 FW |
662 | |
663 | struct raw_event_sample { | |
664 | u32 size; | |
665 | char data[0]; | |
666 | }; | |
667 | ||
668 | #define FILL_FIELD(ptr, field, event, data) \ | |
669 | ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data) | |
670 | ||
671 | #define FILL_ARRAY(ptr, array, event, data) \ | |
672 | do { \ | |
673 | void *__array = raw_field_ptr(event, #array, data); \ | |
674 | memcpy(ptr.array, __array, sizeof(ptr.array)); \ | |
675 | } while(0) | |
676 | ||
677 | #define FILL_COMMON_FIELDS(ptr, event, data) \ | |
678 | do { \ | |
679 | FILL_FIELD(ptr, common_type, event, data); \ | |
680 | FILL_FIELD(ptr, common_flags, event, data); \ | |
681 | FILL_FIELD(ptr, common_preempt_count, event, data); \ | |
682 | FILL_FIELD(ptr, common_pid, event, data); \ | |
683 | FILL_FIELD(ptr, common_tgid, event, data); \ | |
684 | } while (0) | |
685 | ||
419ab0d6 FW |
686 | |
687 | ||
688 | struct trace_switch_event { | |
689 | u32 size; | |
690 | ||
691 | u16 common_type; | |
692 | u8 common_flags; | |
693 | u8 common_preempt_count; | |
694 | u32 common_pid; | |
695 | u32 common_tgid; | |
696 | ||
697 | char prev_comm[16]; | |
698 | u32 prev_pid; | |
699 | u32 prev_prio; | |
700 | u64 prev_state; | |
701 | char next_comm[16]; | |
702 | u32 next_pid; | |
703 | u32 next_prio; | |
704 | }; | |
705 | ||
706 | ||
fbf94829 IM |
707 | struct trace_wakeup_event { |
708 | u32 size; | |
709 | ||
710 | u16 common_type; | |
711 | u8 common_flags; | |
712 | u8 common_preempt_count; | |
713 | u32 common_pid; | |
714 | u32 common_tgid; | |
715 | ||
716 | char comm[16]; | |
717 | u32 pid; | |
718 | ||
719 | u32 prio; | |
720 | u32 success; | |
721 | u32 cpu; | |
722 | }; | |
723 | ||
419ab0d6 FW |
724 | struct trace_fork_event { |
725 | u32 size; | |
46538818 | 726 | |
419ab0d6 FW |
727 | u16 common_type; |
728 | u8 common_flags; | |
729 | u8 common_preempt_count; | |
730 | u32 common_pid; | |
731 | u32 common_tgid; | |
732 | ||
733 | char parent_comm[16]; | |
734 | u32 parent_pid; | |
735 | char child_comm[16]; | |
736 | u32 child_pid; | |
737 | }; | |
738 | ||
739 | struct trace_sched_handler { | |
740 | void (*switch_event)(struct trace_switch_event *, | |
741 | struct event *, | |
742 | int cpu, | |
743 | u64 timestamp, | |
744 | struct thread *thread); | |
745 | ||
746 | void (*wakeup_event)(struct trace_wakeup_event *, | |
747 | struct event *, | |
748 | int cpu, | |
749 | u64 timestamp, | |
750 | struct thread *thread); | |
751 | ||
752 | void (*fork_event)(struct trace_fork_event *, | |
753 | struct event *, | |
754 | int cpu, | |
755 | u64 timestamp, | |
756 | struct thread *thread); | |
757 | }; | |
46538818 | 758 | |
46538818 | 759 | |
419ab0d6 FW |
760 | static void |
761 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | |
762 | struct event *event, | |
763 | int cpu __used, | |
764 | u64 timestamp __used, | |
765 | struct thread *thread __used) | |
766 | { | |
767 | struct task_desc *waker, *wakee; | |
fbf94829 | 768 | |
ad236fd2 IM |
769 | if (verbose) { |
770 | printf("sched_wakeup event %p\n", event); | |
fbf94829 | 771 | |
ad236fd2 | 772 | printf(" ... pid %d woke up %s/%d\n", |
419ab0d6 FW |
773 | wakeup_event->common_pid, |
774 | wakeup_event->comm, | |
775 | wakeup_event->pid); | |
ad236fd2 | 776 | } |
fbf94829 | 777 | |
419ab0d6 FW |
778 | waker = register_pid(wakeup_event->common_pid, "<unknown>"); |
779 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); | |
fbf94829 IM |
780 | |
781 | add_sched_event_wakeup(waker, timestamp, wakee); | |
ec156764 IM |
782 | } |
783 | ||
419ab0d6 | 784 | static unsigned long cpu_last_switched[MAX_CPUS]; |
fbf94829 IM |
785 | |
786 | static void | |
419ab0d6 FW |
787 | replay_switch_event(struct trace_switch_event *switch_event, |
788 | struct event *event, | |
789 | int cpu, | |
790 | u64 timestamp, | |
791 | struct thread *thread __used) | |
ec156764 | 792 | { |
fbf94829 IM |
793 | struct task_desc *prev, *next; |
794 | u64 timestamp0; | |
795 | s64 delta; | |
796 | ||
ad236fd2 IM |
797 | if (verbose) |
798 | printf("sched_switch event %p\n", event); | |
799 | ||
fbf94829 IM |
800 | if (cpu >= MAX_CPUS || cpu < 0) |
801 | return; | |
802 | ||
803 | timestamp0 = cpu_last_switched[cpu]; | |
804 | if (timestamp0) | |
805 | delta = timestamp - timestamp0; | |
806 | else | |
807 | delta = 0; | |
808 | ||
809 | if (delta < 0) | |
810 | die("hm, delta: %Ld < 0 ?\n", delta); | |
811 | ||
ad236fd2 IM |
812 | if (verbose) { |
813 | printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n", | |
419ab0d6 FW |
814 | switch_event->prev_comm, switch_event->prev_pid, |
815 | switch_event->next_comm, switch_event->next_pid, | |
ad236fd2 IM |
816 | delta); |
817 | } | |
fbf94829 | 818 | |
419ab0d6 FW |
819 | prev = register_pid(switch_event->prev_pid, switch_event->prev_comm); |
820 | next = register_pid(switch_event->next_pid, switch_event->next_comm); | |
fbf94829 IM |
821 | |
822 | cpu_last_switched[cpu] = timestamp; | |
823 | ||
824 | add_sched_event_run(prev, timestamp, delta); | |
419ab0d6 | 825 | add_sched_event_sleep(prev, timestamp, switch_event->prev_state); |
fbf94829 IM |
826 | } |
827 | ||
fbf94829 | 828 | |
419ab0d6 FW |
829 | static void |
830 | replay_fork_event(struct trace_fork_event *fork_event, | |
831 | struct event *event, | |
832 | int cpu __used, | |
833 | u64 timestamp __used, | |
834 | struct thread *thread __used) | |
835 | { | |
836 | if (verbose) { | |
837 | printf("sched_fork event %p\n", event); | |
838 | printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); | |
839 | printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); | |
840 | } | |
841 | register_pid(fork_event->parent_pid, fork_event->parent_comm); | |
842 | register_pid(fork_event->child_pid, fork_event->child_comm); | |
843 | } | |
fbf94829 | 844 | |
419ab0d6 FW |
845 | static struct trace_sched_handler replay_ops = { |
846 | .wakeup_event = replay_wakeup_event, | |
847 | .switch_event = replay_switch_event, | |
848 | .fork_event = replay_fork_event, | |
fbf94829 IM |
849 | }; |
850 | ||
cdce9d73 FW |
851 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" |
852 | ||
853 | enum thread_state { | |
854 | THREAD_SLEEPING, | |
855 | THREAD_WAKED_UP, | |
856 | THREAD_SCHED_IN, | |
857 | THREAD_IGNORE | |
858 | }; | |
859 | ||
860 | struct lat_snapshot { | |
861 | struct list_head list; | |
862 | enum thread_state state; | |
863 | u64 wake_up_time; | |
864 | u64 sched_in_time; | |
865 | }; | |
866 | ||
867 | struct thread_latency { | |
868 | struct list_head snapshot_list; | |
869 | struct thread *thread; | |
870 | struct rb_node node; | |
871 | }; | |
872 | ||
873 | static struct rb_root lat_snapshot_root; | |
874 | ||
875 | static struct thread_latency * | |
876 | thread_latency_search(struct rb_root *root, struct thread *thread) | |
877 | { | |
878 | struct rb_node *node = root->rb_node; | |
879 | ||
880 | while (node) { | |
881 | struct thread_latency *lat; | |
882 | ||
883 | lat = container_of(node, struct thread_latency, node); | |
884 | if (thread->pid < lat->thread->pid) | |
885 | node = node->rb_left; | |
886 | else if (thread->pid > lat->thread->pid) | |
887 | node = node->rb_right; | |
888 | else { | |
889 | return lat; | |
890 | } | |
891 | } | |
892 | return NULL; | |
893 | } | |
894 | ||
895 | static void | |
896 | __thread_latency_insert(struct rb_root *root, struct thread_latency *data) | |
897 | { | |
898 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
899 | ||
900 | while (*new) { | |
901 | struct thread_latency *this; | |
902 | ||
903 | this = container_of(*new, struct thread_latency, node); | |
904 | parent = *new; | |
905 | if (data->thread->pid < this->thread->pid) | |
906 | new = &((*new)->rb_left); | |
907 | else if (data->thread->pid > this->thread->pid) | |
908 | new = &((*new)->rb_right); | |
909 | else | |
910 | die("Double thread insertion\n"); | |
911 | } | |
912 | ||
913 | rb_link_node(&data->node, parent, new); | |
914 | rb_insert_color(&data->node, root); | |
915 | } | |
916 | ||
917 | static void thread_latency_insert(struct thread *thread) | |
918 | { | |
919 | struct thread_latency *lat; | |
920 | lat = calloc(sizeof(*lat), 1); | |
921 | if (!lat) | |
922 | die("No memory"); | |
923 | ||
924 | lat->thread = thread; | |
925 | INIT_LIST_HEAD(&lat->snapshot_list); | |
926 | __thread_latency_insert(&lat_snapshot_root, lat); | |
927 | } | |
928 | ||
929 | static void | |
930 | latency_fork_event(struct trace_fork_event *fork_event __used, | |
931 | struct event *event __used, | |
932 | int cpu __used, | |
933 | u64 timestamp __used, | |
934 | struct thread *thread __used) | |
935 | { | |
936 | /* should insert the newcomer */ | |
937 | } | |
938 | ||
939 | static char sched_out_state(struct trace_switch_event *switch_event) | |
940 | { | |
941 | const char *str = TASK_STATE_TO_CHAR_STR; | |
942 | ||
943 | return str[switch_event->prev_state]; | |
944 | } | |
945 | ||
946 | static void | |
947 | lat_sched_out(struct thread_latency *lat, | |
948 | struct trace_switch_event *switch_event) | |
949 | { | |
950 | struct lat_snapshot *snapshot; | |
951 | ||
952 | if (sched_out_state(switch_event) == 'R') | |
953 | return; | |
954 | ||
955 | snapshot = calloc(sizeof(*snapshot), 1); | |
956 | if (!snapshot) | |
957 | die("Non memory"); | |
958 | ||
959 | list_add_tail(&snapshot->list, &lat->snapshot_list); | |
960 | } | |
961 | ||
962 | static void | |
963 | lat_sched_in(struct thread_latency *lat, u64 timestamp) | |
964 | { | |
965 | struct lat_snapshot *snapshot; | |
966 | ||
967 | if (list_empty(&lat->snapshot_list)) | |
968 | return; | |
969 | ||
970 | snapshot = list_entry(lat->snapshot_list.prev, struct lat_snapshot, | |
971 | list); | |
972 | ||
973 | if (snapshot->state != THREAD_WAKED_UP) | |
974 | return; | |
975 | ||
976 | if (timestamp < snapshot->wake_up_time) { | |
977 | snapshot->state = THREAD_IGNORE; | |
978 | return; | |
979 | } | |
980 | ||
981 | snapshot->state = THREAD_SCHED_IN; | |
982 | snapshot->sched_in_time = timestamp; | |
983 | } | |
984 | ||
985 | ||
986 | static void | |
987 | latency_switch_event(struct trace_switch_event *switch_event, | |
988 | struct event *event __used, | |
989 | int cpu __used, | |
990 | u64 timestamp, | |
991 | struct thread *thread __used) | |
992 | { | |
993 | struct thread_latency *out_lat, *in_lat; | |
994 | struct thread *sched_out, *sched_in; | |
995 | ||
996 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); | |
997 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); | |
998 | ||
999 | in_lat = thread_latency_search(&lat_snapshot_root, sched_in); | |
1000 | if (!in_lat) { | |
1001 | thread_latency_insert(sched_in); | |
1002 | in_lat = thread_latency_search(&lat_snapshot_root, sched_in); | |
1003 | if (!in_lat) | |
1004 | die("Internal latency tree error"); | |
1005 | } | |
1006 | ||
1007 | out_lat = thread_latency_search(&lat_snapshot_root, sched_out); | |
1008 | if (!out_lat) { | |
1009 | thread_latency_insert(sched_out); | |
1010 | out_lat = thread_latency_search(&lat_snapshot_root, sched_out); | |
1011 | if (!out_lat) | |
1012 | die("Internal latency tree error"); | |
1013 | } | |
1014 | ||
1015 | lat_sched_in(in_lat, timestamp); | |
1016 | lat_sched_out(out_lat, switch_event); | |
1017 | } | |
1018 | ||
1019 | static void | |
1020 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |
1021 | struct event *event __used, | |
1022 | int cpu __used, | |
1023 | u64 timestamp, | |
1024 | struct thread *thread __used) | |
1025 | { | |
1026 | struct thread_latency *lat; | |
1027 | struct lat_snapshot *snapshot; | |
1028 | struct thread *wakee; | |
1029 | ||
1030 | /* Note for later, it may be interesting to observe the failing cases */ | |
1031 | if (!wakeup_event->success) | |
1032 | return; | |
1033 | ||
1034 | wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); | |
1035 | lat = thread_latency_search(&lat_snapshot_root, wakee); | |
1036 | if (!lat) { | |
1037 | thread_latency_insert(wakee); | |
1038 | return; | |
1039 | } | |
1040 | ||
1041 | if (list_empty(&lat->snapshot_list)) | |
1042 | return; | |
1043 | ||
1044 | snapshot = list_entry(lat->snapshot_list.prev, struct lat_snapshot, | |
1045 | list); | |
1046 | ||
1047 | if (snapshot->state != THREAD_SLEEPING) | |
1048 | return; | |
1049 | ||
1050 | snapshot->state = THREAD_WAKED_UP; | |
1051 | snapshot->wake_up_time = timestamp; | |
1052 | } | |
1053 | ||
1054 | static struct trace_sched_handler lat_ops = { | |
1055 | .wakeup_event = latency_wakeup_event, | |
1056 | .switch_event = latency_switch_event, | |
1057 | .fork_event = latency_fork_event, | |
1058 | }; | |
1059 | ||
1060 | static void output_lat_thread(struct thread_latency *lat) | |
1061 | { | |
1062 | struct lat_snapshot *shot; | |
1063 | int count = 0; | |
1064 | int i; | |
1065 | int ret; | |
1066 | u64 max = 0, avg; | |
1067 | u64 total = 0, delta; | |
1068 | ||
1069 | list_for_each_entry(shot, &lat->snapshot_list, list) { | |
1070 | if (shot->state != THREAD_SCHED_IN) | |
1071 | continue; | |
1072 | ||
1073 | count++; | |
1074 | ||
1075 | delta = shot->sched_in_time - shot->wake_up_time; | |
1076 | if (delta > max) | |
1077 | max = delta; | |
1078 | total += delta; | |
1079 | } | |
1080 | ||
1081 | if (!count) | |
1082 | return; | |
1083 | ||
1084 | ret = printf("%s", lat->thread->comm); | |
1085 | ||
1086 | for (i = 0; i < 25 - ret; i++) | |
1087 | printf(" "); | |
1088 | ||
1089 | avg = total / count; | |
1090 | ||
1091 | printf("%5d %10llu %10llu %10llu\n", count, total, avg, max); | |
1092 | } | |
1093 | ||
1094 | static void output_lat_results(void) | |
1095 | { | |
1096 | struct rb_node *next; | |
1097 | ||
1098 | printf(" Tasks"); | |
1099 | printf(" count"); | |
1100 | printf(" total"); | |
1101 | printf(" avg"); | |
1102 | printf(" max\n\n"); | |
1103 | ||
1104 | next = rb_first(&lat_snapshot_root); | |
1105 | ||
1106 | while (next) { | |
1107 | struct thread_latency *lat; | |
1108 | ||
1109 | lat = rb_entry(next, struct thread_latency, node); | |
1110 | output_lat_thread(lat); | |
1111 | next = rb_next(next); | |
1112 | } | |
1113 | } | |
419ab0d6 FW |
1114 | |
1115 | static struct trace_sched_handler *trace_handler; | |
1116 | ||
fbf94829 | 1117 | static void |
419ab0d6 FW |
1118 | process_sched_wakeup_event(struct raw_event_sample *raw, |
1119 | struct event *event, | |
1120 | int cpu __used, | |
1121 | u64 timestamp __used, | |
1122 | struct thread *thread __used) | |
1123 | { | |
1124 | struct trace_wakeup_event wakeup_event; | |
1125 | ||
1126 | FILL_COMMON_FIELDS(wakeup_event, event, raw->data); | |
1127 | ||
1128 | FILL_ARRAY(wakeup_event, comm, event, raw->data); | |
1129 | FILL_FIELD(wakeup_event, pid, event, raw->data); | |
1130 | FILL_FIELD(wakeup_event, prio, event, raw->data); | |
1131 | FILL_FIELD(wakeup_event, success, event, raw->data); | |
1132 | FILL_FIELD(wakeup_event, cpu, event, raw->data); | |
1133 | ||
1134 | trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); | |
1135 | } | |
1136 | ||
1137 | static void | |
1138 | process_sched_switch_event(struct raw_event_sample *raw, | |
1139 | struct event *event, | |
1140 | int cpu __used, | |
1141 | u64 timestamp __used, | |
1142 | struct thread *thread __used) | |
1143 | { | |
1144 | struct trace_switch_event switch_event; | |
1145 | ||
1146 | FILL_COMMON_FIELDS(switch_event, event, raw->data); | |
1147 | ||
1148 | FILL_ARRAY(switch_event, prev_comm, event, raw->data); | |
1149 | FILL_FIELD(switch_event, prev_pid, event, raw->data); | |
1150 | FILL_FIELD(switch_event, prev_prio, event, raw->data); | |
1151 | FILL_FIELD(switch_event, prev_state, event, raw->data); | |
1152 | FILL_ARRAY(switch_event, next_comm, event, raw->data); | |
1153 | FILL_FIELD(switch_event, next_pid, event, raw->data); | |
1154 | FILL_FIELD(switch_event, next_prio, event, raw->data); | |
1155 | ||
1156 | trace_handler->switch_event(&switch_event, event, cpu, timestamp, thread); | |
1157 | } | |
1158 | ||
1159 | static void | |
1160 | process_sched_fork_event(struct raw_event_sample *raw, | |
1161 | struct event *event, | |
1162 | int cpu __used, | |
1163 | u64 timestamp __used, | |
1164 | struct thread *thread __used) | |
fbf94829 | 1165 | { |
46538818 FW |
1166 | struct trace_fork_event fork_event; |
1167 | ||
1168 | FILL_COMMON_FIELDS(fork_event, event, raw->data); | |
1169 | ||
1170 | FILL_ARRAY(fork_event, parent_comm, event, raw->data); | |
1171 | FILL_FIELD(fork_event, parent_pid, event, raw->data); | |
1172 | FILL_ARRAY(fork_event, child_comm, event, raw->data); | |
1173 | FILL_FIELD(fork_event, child_pid, event, raw->data); | |
1174 | ||
419ab0d6 | 1175 | trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); |
fbf94829 IM |
1176 | } |
1177 | ||
419ab0d6 FW |
1178 | static void |
1179 | process_sched_exit_event(struct event *event, | |
1180 | int cpu __used, | |
1181 | u64 timestamp __used, | |
1182 | struct thread *thread __used) | |
fbf94829 | 1183 | { |
ad236fd2 IM |
1184 | if (verbose) |
1185 | printf("sched_exit event %p\n", event); | |
ec156764 IM |
1186 | } |
1187 | ||
1188 | static void | |
ad236fd2 | 1189 | process_raw_event(event_t *raw_event __used, void *more_data, |
ec156764 IM |
1190 | int cpu, u64 timestamp, struct thread *thread) |
1191 | { | |
46538818 | 1192 | struct raw_event_sample *raw = more_data; |
ec156764 IM |
1193 | struct event *event; |
1194 | int type; | |
1195 | ||
1196 | type = trace_parse_common_type(raw->data); | |
1197 | event = trace_find_event(type); | |
1198 | ||
ec156764 | 1199 | if (!strcmp(event->name, "sched_switch")) |
46538818 | 1200 | process_sched_switch_event(raw, event, cpu, timestamp, thread); |
ec156764 | 1201 | if (!strcmp(event->name, "sched_wakeup")) |
46538818 | 1202 | process_sched_wakeup_event(raw, event, cpu, timestamp, thread); |
fbf94829 | 1203 | if (!strcmp(event->name, "sched_wakeup_new")) |
46538818 | 1204 | process_sched_wakeup_event(raw, event, cpu, timestamp, thread); |
fbf94829 | 1205 | if (!strcmp(event->name, "sched_process_fork")) |
46538818 | 1206 | process_sched_fork_event(raw, event, cpu, timestamp, thread); |
fbf94829 IM |
1207 | if (!strcmp(event->name, "sched_process_exit")) |
1208 | process_sched_exit_event(event, cpu, timestamp, thread); | |
ec156764 IM |
1209 | } |
1210 | ||
0a02ad93 IM |
1211 | static int |
1212 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |
1213 | { | |
1214 | char level; | |
1215 | int show = 0; | |
1216 | struct dso *dso = NULL; | |
1217 | struct thread *thread; | |
1218 | u64 ip = event->ip.ip; | |
1219 | u64 timestamp = -1; | |
1220 | u32 cpu = -1; | |
1221 | u64 period = 1; | |
1222 | void *more_data = event->ip.__more_data; | |
1223 | int cpumode; | |
1224 | ||
1225 | thread = threads__findnew(event->ip.pid, &threads, &last_match); | |
1226 | ||
1227 | if (sample_type & PERF_SAMPLE_TIME) { | |
1228 | timestamp = *(u64 *)more_data; | |
1229 | more_data += sizeof(u64); | |
1230 | } | |
1231 | ||
1232 | if (sample_type & PERF_SAMPLE_CPU) { | |
1233 | cpu = *(u32 *)more_data; | |
1234 | more_data += sizeof(u32); | |
1235 | more_data += sizeof(u32); /* reserved */ | |
1236 | } | |
1237 | ||
1238 | if (sample_type & PERF_SAMPLE_PERIOD) { | |
1239 | period = *(u64 *)more_data; | |
1240 | more_data += sizeof(u64); | |
1241 | } | |
1242 | ||
1243 | dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", | |
1244 | (void *)(offset + head), | |
1245 | (void *)(long)(event->header.size), | |
1246 | event->header.misc, | |
1247 | event->ip.pid, event->ip.tid, | |
1248 | (void *)(long)ip, | |
1249 | (long long)period); | |
1250 | ||
1251 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | |
1252 | ||
1253 | if (thread == NULL) { | |
1254 | eprintf("problem processing %d event, skipping it.\n", | |
1255 | event->header.type); | |
1256 | return -1; | |
1257 | } | |
1258 | ||
1259 | cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; | |
1260 | ||
1261 | if (cpumode == PERF_EVENT_MISC_KERNEL) { | |
1262 | show = SHOW_KERNEL; | |
1263 | level = 'k'; | |
1264 | ||
1265 | dso = kernel_dso; | |
1266 | ||
1267 | dump_printf(" ...... dso: %s\n", dso->name); | |
1268 | ||
1269 | } else if (cpumode == PERF_EVENT_MISC_USER) { | |
1270 | ||
1271 | show = SHOW_USER; | |
1272 | level = '.'; | |
1273 | ||
1274 | } else { | |
1275 | show = SHOW_HV; | |
1276 | level = 'H'; | |
1277 | ||
1278 | dso = hypervisor_dso; | |
1279 | ||
1280 | dump_printf(" ...... dso: [hypervisor]\n"); | |
1281 | } | |
1282 | ||
ec156764 IM |
1283 | if (sample_type & PERF_SAMPLE_RAW) |
1284 | process_raw_event(event, more_data, cpu, timestamp, thread); | |
0a02ad93 IM |
1285 | |
1286 | return 0; | |
1287 | } | |
1288 | ||
1289 | static int | |
1290 | process_event(event_t *event, unsigned long offset, unsigned long head) | |
1291 | { | |
1292 | trace_event(event); | |
1293 | ||
1294 | switch (event->header.type) { | |
1295 | case PERF_EVENT_MMAP ... PERF_EVENT_LOST: | |
1296 | return 0; | |
1297 | ||
1298 | case PERF_EVENT_COMM: | |
1299 | return process_comm_event(event, offset, head); | |
1300 | ||
1301 | case PERF_EVENT_EXIT ... PERF_EVENT_READ: | |
1302 | return 0; | |
1303 | ||
1304 | case PERF_EVENT_SAMPLE: | |
1305 | return process_sample_event(event, offset, head); | |
1306 | ||
1307 | case PERF_EVENT_MAX: | |
1308 | default: | |
1309 | return -1; | |
1310 | } | |
1311 | ||
1312 | return 0; | |
1313 | } | |
1314 | ||
1315 | static int __cmd_sched(void) | |
1316 | { | |
1317 | int ret, rc = EXIT_FAILURE; | |
1318 | unsigned long offset = 0; | |
1319 | unsigned long head = 0; | |
1320 | struct stat perf_stat; | |
1321 | event_t *event; | |
1322 | uint32_t size; | |
1323 | char *buf; | |
1324 | ||
1325 | trace_report(); | |
1326 | register_idle_thread(&threads, &last_match); | |
1327 | ||
1328 | input = open(input_name, O_RDONLY); | |
1329 | if (input < 0) { | |
1330 | perror("failed to open file"); | |
1331 | exit(-1); | |
1332 | } | |
1333 | ||
1334 | ret = fstat(input, &perf_stat); | |
1335 | if (ret < 0) { | |
1336 | perror("failed to stat file"); | |
1337 | exit(-1); | |
1338 | } | |
1339 | ||
1340 | if (!perf_stat.st_size) { | |
1341 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | |
1342 | exit(0); | |
1343 | } | |
1344 | header = perf_header__read(input); | |
1345 | head = header->data_offset; | |
1346 | sample_type = perf_header__sample_type(header); | |
1347 | ||
1348 | if (!(sample_type & PERF_SAMPLE_RAW)) | |
1349 | die("No trace sample to read. Did you call perf record " | |
1350 | "without -R?"); | |
1351 | ||
1352 | if (load_kernel() < 0) { | |
1353 | perror("failed to load kernel symbols"); | |
1354 | return EXIT_FAILURE; | |
1355 | } | |
1356 | ||
1357 | remap: | |
1358 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | |
1359 | MAP_SHARED, input, offset); | |
1360 | if (buf == MAP_FAILED) { | |
1361 | perror("failed to mmap file"); | |
1362 | exit(-1); | |
1363 | } | |
1364 | ||
1365 | more: | |
1366 | event = (event_t *)(buf + head); | |
1367 | ||
1368 | size = event->header.size; | |
1369 | if (!size) | |
1370 | size = 8; | |
1371 | ||
1372 | if (head + event->header.size >= page_size * mmap_window) { | |
1373 | unsigned long shift = page_size * (head / page_size); | |
1374 | int res; | |
1375 | ||
1376 | res = munmap(buf, page_size * mmap_window); | |
1377 | assert(res == 0); | |
1378 | ||
1379 | offset += shift; | |
1380 | head -= shift; | |
1381 | goto remap; | |
1382 | } | |
1383 | ||
1384 | size = event->header.size; | |
1385 | ||
1386 | ||
1387 | if (!size || process_event(event, offset, head) < 0) { | |
1388 | ||
1389 | /* | |
1390 | * assume we lost track of the stream, check alignment, and | |
1391 | * increment a single u64 in the hope to catch on again 'soon'. | |
1392 | */ | |
1393 | ||
1394 | if (unlikely(head & 7)) | |
1395 | head &= ~7ULL; | |
1396 | ||
1397 | size = 8; | |
1398 | } | |
1399 | ||
1400 | head += size; | |
1401 | ||
1402 | if (offset + head < (unsigned long)perf_stat.st_size) | |
1403 | goto more; | |
1404 | ||
1405 | rc = EXIT_SUCCESS; | |
1406 | close(input); | |
1407 | ||
1408 | return rc; | |
1409 | } | |
1410 | ||
1411 | static const char * const annotate_usage[] = { | |
1412 | "perf trace [<options>] <command>", | |
1413 | NULL | |
1414 | }; | |
1415 | ||
1416 | static const struct option options[] = { | |
1417 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | |
1418 | "dump raw trace in ASCII"), | |
419ab0d6 FW |
1419 | OPT_BOOLEAN('r', "replay", &replay_mode, |
1420 | "replay sched behaviour from traces"), | |
cdce9d73 FW |
1421 | OPT_BOOLEAN('l', "latency", &lat_mode, |
1422 | "measure various latencies"), | |
0a02ad93 IM |
1423 | OPT_BOOLEAN('v', "verbose", &verbose, |
1424 | "be more verbose (show symbol address, etc)"), | |
1425 | OPT_END() | |
1426 | }; | |
1427 | ||
1428 | int cmd_sched(int argc, const char **argv, const char *prefix __used) | |
1429 | { | |
fbf94829 | 1430 | long nr_iterations = 10, i; |
ec156764 | 1431 | |
0a02ad93 IM |
1432 | symbol__init(); |
1433 | page_size = getpagesize(); | |
1434 | ||
1435 | argc = parse_options(argc, argv, options, annotate_usage, 0); | |
1436 | if (argc) { | |
1437 | /* | |
1438 | * Special case: if there's an argument left then assume tha | |
1439 | * it's a symbol filter: | |
1440 | */ | |
1441 | if (argc > 1) | |
1442 | usage_with_options(annotate_usage, options); | |
1443 | } | |
1444 | ||
fbf94829 | 1445 | // setup_pager(); |
0a02ad93 | 1446 | |
419ab0d6 FW |
1447 | if (replay_mode) |
1448 | trace_handler = &replay_ops; | |
cdce9d73 FW |
1449 | else if (lat_mode) |
1450 | trace_handler = &lat_ops; | |
1451 | else /* We may need a default subcommand (perf trace?) */ | |
419ab0d6 FW |
1452 | die("Please select a sub command (-r)\n"); |
1453 | ||
cdce9d73 FW |
1454 | if (replay_mode) { |
1455 | calibrate_run_measurement_overhead(); | |
1456 | calibrate_sleep_measurement_overhead(); | |
ec156764 | 1457 | |
cdce9d73 | 1458 | test_calibrations(); |
ec156764 | 1459 | |
cdce9d73 FW |
1460 | parse_trace(); |
1461 | print_task_traces(); | |
1462 | add_cross_task_wakeups(); | |
ec156764 | 1463 | |
cdce9d73 FW |
1464 | create_tasks(); |
1465 | printf("------------------------------------------------------------\n"); | |
1466 | for (i = 0; i < nr_iterations; i++) | |
1467 | run_one_test(); | |
1468 | } else if (lat_mode) { | |
1469 | setup_pager(); | |
1470 | __cmd_sched(); | |
1471 | output_lat_results(); | |
1472 | } | |
ec156764 IM |
1473 | |
1474 | return 0; | |
0a02ad93 | 1475 | } |