]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - tools/perf/builtin-top.c
Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge
[mirror_ubuntu-bionic-kernel.git] / tools / perf / builtin-top.c
1 /*
2 * builtin-top.c
3 *
4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
6 *
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
8 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Improvements and fixes by:
11 *
12 * Arjan van de Ven <arjan@linux.intel.com>
13 * Yanmin Zhang <yanmin.zhang@intel.com>
14 * Wu Fengguang <fengguang.wu@intel.com>
15 * Mike Galbraith <efault@gmx.de>
16 * Paul Mackerras <paulus@samba.org>
17 *
18 * Released under the GPL v2. (and only v2, not any later version)
19 */
20 #include "builtin.h"
21
22 #include "perf.h"
23
24 #include "util/annotate.h"
25 #include "util/cache.h"
26 #include "util/color.h"
27 #include "util/evlist.h"
28 #include "util/evsel.h"
29 #include "util/session.h"
30 #include "util/symbol.h"
31 #include "util/thread.h"
32 #include "util/thread_map.h"
33 #include "util/top.h"
34 #include "util/util.h"
35 #include <linux/rbtree.h>
36 #include "util/parse-options.h"
37 #include "util/parse-events.h"
38 #include "util/cpumap.h"
39 #include "util/xyarray.h"
40 #include "util/sort.h"
41
42 #include "util/debug.h"
43
44 #include <assert.h>
45 #include <fcntl.h>
46
47 #include <stdio.h>
48 #include <termios.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <errno.h>
53 #include <time.h>
54 #include <sched.h>
55
56 #include <sys/syscall.h>
57 #include <sys/ioctl.h>
58 #include <sys/poll.h>
59 #include <sys/prctl.h>
60 #include <sys/wait.h>
61 #include <sys/uio.h>
62 #include <sys/mman.h>
63
64 #include <linux/unistd.h>
65 #include <linux/types.h>
66
67
68 void get_term_dimensions(struct winsize *ws)
69 {
70 char *s = getenv("LINES");
71
72 if (s != NULL) {
73 ws->ws_row = atoi(s);
74 s = getenv("COLUMNS");
75 if (s != NULL) {
76 ws->ws_col = atoi(s);
77 if (ws->ws_row && ws->ws_col)
78 return;
79 }
80 }
81 #ifdef TIOCGWINSZ
82 if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
83 ws->ws_row && ws->ws_col)
84 return;
85 #endif
86 ws->ws_row = 25;
87 ws->ws_col = 80;
88 }
89
90 static void perf_top__update_print_entries(struct perf_top *top)
91 {
92 if (top->print_entries > 9)
93 top->print_entries -= 9;
94 }
95
96 static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
97 {
98 struct perf_top *top = arg;
99
100 get_term_dimensions(&top->winsize);
101 if (!top->print_entries
102 || (top->print_entries+4) > top->winsize.ws_row) {
103 top->print_entries = top->winsize.ws_row;
104 } else {
105 top->print_entries += 4;
106 top->winsize.ws_row = top->print_entries;
107 }
108 perf_top__update_print_entries(top);
109 }
110
111 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
112 {
113 struct symbol *sym;
114 struct annotation *notes;
115 struct map *map;
116 int err = -1;
117
118 if (!he || !he->ms.sym)
119 return -1;
120
121 sym = he->ms.sym;
122 map = he->ms.map;
123
124 /*
125 * We can't annotate with just /proc/kallsyms
126 */
127 if (map->dso->symtab_type == SYMTAB__KALLSYMS) {
128 pr_err("Can't annotate %s: No vmlinux file was found in the "
129 "path\n", sym->name);
130 sleep(1);
131 return -1;
132 }
133
134 notes = symbol__annotation(sym);
135 if (notes->src != NULL) {
136 pthread_mutex_lock(&notes->lock);
137 goto out_assign;
138 }
139
140 pthread_mutex_lock(&notes->lock);
141
142 if (symbol__alloc_hist(sym) < 0) {
143 pthread_mutex_unlock(&notes->lock);
144 pr_err("Not enough memory for annotating '%s' symbol!\n",
145 sym->name);
146 sleep(1);
147 return err;
148 }
149
150 err = symbol__annotate(sym, map, 0);
151 if (err == 0) {
152 out_assign:
153 top->sym_filter_entry = he;
154 }
155
156 pthread_mutex_unlock(&notes->lock);
157 return err;
158 }
159
160 static void __zero_source_counters(struct hist_entry *he)
161 {
162 struct symbol *sym = he->ms.sym;
163 symbol__annotate_zero_histograms(sym);
164 }
165
166 static void perf_top__record_precise_ip(struct perf_top *top,
167 struct hist_entry *he,
168 int counter, u64 ip)
169 {
170 struct annotation *notes;
171 struct symbol *sym;
172
173 if (he == NULL || he->ms.sym == NULL ||
174 ((top->sym_filter_entry == NULL ||
175 top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
176 return;
177
178 sym = he->ms.sym;
179 notes = symbol__annotation(sym);
180
181 if (pthread_mutex_trylock(&notes->lock))
182 return;
183
184 if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
185 pthread_mutex_unlock(&notes->lock);
186 pr_err("Not enough memory for annotating '%s' symbol!\n",
187 sym->name);
188 sleep(1);
189 return;
190 }
191
192 ip = he->ms.map->map_ip(he->ms.map, ip);
193 symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
194
195 pthread_mutex_unlock(&notes->lock);
196 }
197
198 static void perf_top__show_details(struct perf_top *top)
199 {
200 struct hist_entry *he = top->sym_filter_entry;
201 struct annotation *notes;
202 struct symbol *symbol;
203 int more;
204
205 if (!he)
206 return;
207
208 symbol = he->ms.sym;
209 notes = symbol__annotation(symbol);
210
211 pthread_mutex_lock(&notes->lock);
212
213 if (notes->src == NULL)
214 goto out_unlock;
215
216 printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
217 printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
218
219 more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
220 0, top->sym_pcnt_filter, top->print_entries, 4);
221 if (top->zero)
222 symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
223 else
224 symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
225 if (more != 0)
226 printf("%d lines not displayed, maybe increase display entries [e]\n", more);
227 out_unlock:
228 pthread_mutex_unlock(&notes->lock);
229 }
230
231 static const char CONSOLE_CLEAR[] = "\e[H\e[2J";
232
233 static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
234 struct addr_location *al,
235 struct perf_sample *sample)
236 {
237 struct hist_entry *he;
238
239 he = __hists__add_entry(&evsel->hists, al, NULL, sample->period);
240 if (he == NULL)
241 return NULL;
242
243 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
244 return he;
245 }
246
247 static void perf_top__print_sym_table(struct perf_top *top)
248 {
249 char bf[160];
250 int printed = 0;
251 const int win_width = top->winsize.ws_col - 1;
252
253 puts(CONSOLE_CLEAR);
254
255 perf_top__header_snprintf(top, bf, sizeof(bf));
256 printf("%s\n", bf);
257
258 perf_top__reset_sample_counters(top);
259
260 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
261
262 if (top->sym_evsel->hists.stats.nr_lost_warned !=
263 top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
264 top->sym_evsel->hists.stats.nr_lost_warned =
265 top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
266 color_fprintf(stdout, PERF_COLOR_RED,
267 "WARNING: LOST %d chunks, Check IO/CPU overload",
268 top->sym_evsel->hists.stats.nr_lost_warned);
269 ++printed;
270 }
271
272 if (top->sym_filter_entry) {
273 perf_top__show_details(top);
274 return;
275 }
276
277 hists__collapse_resort_threaded(&top->sym_evsel->hists);
278 hists__output_resort_threaded(&top->sym_evsel->hists);
279 hists__decay_entries_threaded(&top->sym_evsel->hists,
280 top->hide_user_symbols,
281 top->hide_kernel_symbols);
282 hists__output_recalc_col_len(&top->sym_evsel->hists,
283 top->winsize.ws_row - 3);
284 putchar('\n');
285 hists__fprintf(&top->sym_evsel->hists, NULL, false, false,
286 top->winsize.ws_row - 4 - printed, win_width, stdout);
287 }
288
289 static void prompt_integer(int *target, const char *msg)
290 {
291 char *buf = malloc(0), *p;
292 size_t dummy = 0;
293 int tmp;
294
295 fprintf(stdout, "\n%s: ", msg);
296 if (getline(&buf, &dummy, stdin) < 0)
297 return;
298
299 p = strchr(buf, '\n');
300 if (p)
301 *p = 0;
302
303 p = buf;
304 while(*p) {
305 if (!isdigit(*p))
306 goto out_free;
307 p++;
308 }
309 tmp = strtoul(buf, NULL, 10);
310 *target = tmp;
311 out_free:
312 free(buf);
313 }
314
315 static void prompt_percent(int *target, const char *msg)
316 {
317 int tmp = 0;
318
319 prompt_integer(&tmp, msg);
320 if (tmp >= 0 && tmp <= 100)
321 *target = tmp;
322 }
323
324 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
325 {
326 char *buf = malloc(0), *p;
327 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
328 struct rb_node *next;
329 size_t dummy = 0;
330
331 /* zero counters of active symbol */
332 if (syme) {
333 __zero_source_counters(syme);
334 top->sym_filter_entry = NULL;
335 }
336
337 fprintf(stdout, "\n%s: ", msg);
338 if (getline(&buf, &dummy, stdin) < 0)
339 goto out_free;
340
341 p = strchr(buf, '\n');
342 if (p)
343 *p = 0;
344
345 next = rb_first(&top->sym_evsel->hists.entries);
346 while (next) {
347 n = rb_entry(next, struct hist_entry, rb_node);
348 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
349 found = n;
350 break;
351 }
352 next = rb_next(&n->rb_node);
353 }
354
355 if (!found) {
356 fprintf(stderr, "Sorry, %s is not active.\n", buf);
357 sleep(1);
358 } else
359 perf_top__parse_source(top, found);
360
361 out_free:
362 free(buf);
363 }
364
365 static void perf_top__print_mapped_keys(struct perf_top *top)
366 {
367 char *name = NULL;
368
369 if (top->sym_filter_entry) {
370 struct symbol *sym = top->sym_filter_entry->ms.sym;
371 name = sym->name;
372 }
373
374 fprintf(stdout, "\nMapped keys:\n");
375 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
376 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
377
378 if (top->evlist->nr_entries > 1)
379 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel));
380
381 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
382
383 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
384 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
385 fprintf(stdout, "\t[S] stop annotation.\n");
386
387 fprintf(stdout,
388 "\t[K] hide kernel_symbols symbols. \t(%s)\n",
389 top->hide_kernel_symbols ? "yes" : "no");
390 fprintf(stdout,
391 "\t[U] hide user symbols. \t(%s)\n",
392 top->hide_user_symbols ? "yes" : "no");
393 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
394 fprintf(stdout, "\t[qQ] quit.\n");
395 }
396
397 static int perf_top__key_mapped(struct perf_top *top, int c)
398 {
399 switch (c) {
400 case 'd':
401 case 'e':
402 case 'f':
403 case 'z':
404 case 'q':
405 case 'Q':
406 case 'K':
407 case 'U':
408 case 'F':
409 case 's':
410 case 'S':
411 return 1;
412 case 'E':
413 return top->evlist->nr_entries > 1 ? 1 : 0;
414 default:
415 break;
416 }
417
418 return 0;
419 }
420
421 static void perf_top__handle_keypress(struct perf_top *top, int c)
422 {
423 if (!perf_top__key_mapped(top, c)) {
424 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
425 struct termios tc, save;
426
427 perf_top__print_mapped_keys(top);
428 fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
429 fflush(stdout);
430
431 tcgetattr(0, &save);
432 tc = save;
433 tc.c_lflag &= ~(ICANON | ECHO);
434 tc.c_cc[VMIN] = 0;
435 tc.c_cc[VTIME] = 0;
436 tcsetattr(0, TCSANOW, &tc);
437
438 poll(&stdin_poll, 1, -1);
439 c = getc(stdin);
440
441 tcsetattr(0, TCSAFLUSH, &save);
442 if (!perf_top__key_mapped(top, c))
443 return;
444 }
445
446 switch (c) {
447 case 'd':
448 prompt_integer(&top->delay_secs, "Enter display delay");
449 if (top->delay_secs < 1)
450 top->delay_secs = 1;
451 break;
452 case 'e':
453 prompt_integer(&top->print_entries, "Enter display entries (lines)");
454 if (top->print_entries == 0) {
455 struct sigaction act = {
456 .sa_sigaction = perf_top__sig_winch,
457 .sa_flags = SA_SIGINFO,
458 };
459 perf_top__sig_winch(SIGWINCH, NULL, top);
460 sigaction(SIGWINCH, &act, NULL);
461 } else {
462 perf_top__sig_winch(SIGWINCH, NULL, top);
463 signal(SIGWINCH, SIG_DFL);
464 }
465 break;
466 case 'E':
467 if (top->evlist->nr_entries > 1) {
468 /* Select 0 as the default event: */
469 int counter = 0;
470
471 fprintf(stderr, "\nAvailable events:");
472
473 list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
474 fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
475
476 prompt_integer(&counter, "Enter details event counter");
477
478 if (counter >= top->evlist->nr_entries) {
479 top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
480 fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
481 sleep(1);
482 break;
483 }
484 list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
485 if (top->sym_evsel->idx == counter)
486 break;
487 } else
488 top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
489 break;
490 case 'f':
491 prompt_integer(&top->count_filter, "Enter display event count filter");
492 break;
493 case 'F':
494 prompt_percent(&top->sym_pcnt_filter,
495 "Enter details display event filter (percent)");
496 break;
497 case 'K':
498 top->hide_kernel_symbols = !top->hide_kernel_symbols;
499 break;
500 case 'q':
501 case 'Q':
502 printf("exiting.\n");
503 if (top->dump_symtab)
504 perf_session__fprintf_dsos(top->session, stderr);
505 exit(0);
506 case 's':
507 perf_top__prompt_symbol(top, "Enter details symbol");
508 break;
509 case 'S':
510 if (!top->sym_filter_entry)
511 break;
512 else {
513 struct hist_entry *syme = top->sym_filter_entry;
514
515 top->sym_filter_entry = NULL;
516 __zero_source_counters(syme);
517 }
518 break;
519 case 'U':
520 top->hide_user_symbols = !top->hide_user_symbols;
521 break;
522 case 'z':
523 top->zero = !top->zero;
524 break;
525 default:
526 break;
527 }
528 }
529
530 static void perf_top__sort_new_samples(void *arg)
531 {
532 struct perf_top *t = arg;
533 perf_top__reset_sample_counters(t);
534
535 if (t->evlist->selected != NULL)
536 t->sym_evsel = t->evlist->selected;
537
538 hists__collapse_resort_threaded(&t->sym_evsel->hists);
539 hists__output_resort_threaded(&t->sym_evsel->hists);
540 hists__decay_entries_threaded(&t->sym_evsel->hists,
541 t->hide_user_symbols,
542 t->hide_kernel_symbols);
543 }
544
545 static void *display_thread_tui(void *arg)
546 {
547 struct perf_top *top = arg;
548 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
549
550 perf_top__sort_new_samples(top);
551 perf_evlist__tui_browse_hists(top->evlist, help,
552 perf_top__sort_new_samples,
553 top, top->delay_secs);
554
555 exit_browser(0);
556 exit(0);
557 return NULL;
558 }
559
560 static void *display_thread(void *arg)
561 {
562 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
563 struct termios tc, save;
564 struct perf_top *top = arg;
565 int delay_msecs, c;
566
567 tcgetattr(0, &save);
568 tc = save;
569 tc.c_lflag &= ~(ICANON | ECHO);
570 tc.c_cc[VMIN] = 0;
571 tc.c_cc[VTIME] = 0;
572
573 pthread__unblock_sigwinch();
574 repeat:
575 delay_msecs = top->delay_secs * 1000;
576 tcsetattr(0, TCSANOW, &tc);
577 /* trash return*/
578 getc(stdin);
579
580 while (1) {
581 perf_top__print_sym_table(top);
582 /*
583 * Either timeout expired or we got an EINTR due to SIGWINCH,
584 * refresh screen in both cases.
585 */
586 switch (poll(&stdin_poll, 1, delay_msecs)) {
587 case 0:
588 continue;
589 case -1:
590 if (errno == EINTR)
591 continue;
592 /* Fall trhu */
593 default:
594 goto process_hotkey;
595 }
596 }
597 process_hotkey:
598 c = getc(stdin);
599 tcsetattr(0, TCSAFLUSH, &save);
600
601 perf_top__handle_keypress(top, c);
602 goto repeat;
603
604 return NULL;
605 }
606
607 /* Tag samples to be skipped. */
608 static const char *skip_symbols[] = {
609 "default_idle",
610 "native_safe_halt",
611 "cpu_idle",
612 "enter_idle",
613 "exit_idle",
614 "mwait_idle",
615 "mwait_idle_with_hints",
616 "poll_idle",
617 "ppc64_runlatch_off",
618 "pseries_dedicated_idle_sleep",
619 NULL
620 };
621
622 static int symbol_filter(struct map *map __used, struct symbol *sym)
623 {
624 const char *name = sym->name;
625 int i;
626
627 /*
628 * ppc64 uses function descriptors and appends a '.' to the
629 * start of every instruction address. Remove it.
630 */
631 if (name[0] == '.')
632 name++;
633
634 if (!strcmp(name, "_text") ||
635 !strcmp(name, "_etext") ||
636 !strcmp(name, "_sinittext") ||
637 !strncmp("init_module", name, 11) ||
638 !strncmp("cleanup_module", name, 14) ||
639 strstr(name, "_text_start") ||
640 strstr(name, "_text_end"))
641 return 1;
642
643 for (i = 0; skip_symbols[i]; i++) {
644 if (!strcmp(skip_symbols[i], name)) {
645 sym->ignore = true;
646 break;
647 }
648 }
649
650 return 0;
651 }
652
653 static void perf_event__process_sample(struct perf_tool *tool,
654 const union perf_event *event,
655 struct perf_evsel *evsel,
656 struct perf_sample *sample,
657 struct machine *machine)
658 {
659 struct perf_top *top = container_of(tool, struct perf_top, tool);
660 struct symbol *parent = NULL;
661 u64 ip = event->ip.ip;
662 struct addr_location al;
663 int err;
664
665 if (!machine && perf_guest) {
666 pr_err("Can't find guest [%d]'s kernel information\n",
667 event->ip.pid);
668 return;
669 }
670
671 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
672 top->exact_samples++;
673
674 if (perf_event__preprocess_sample(event, machine, &al, sample,
675 symbol_filter) < 0 ||
676 al.filtered)
677 return;
678
679 if (!top->kptr_restrict_warned &&
680 symbol_conf.kptr_restrict &&
681 al.cpumode == PERF_RECORD_MISC_KERNEL) {
682 ui__warning(
683 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
684 "Check /proc/sys/kernel/kptr_restrict.\n\n"
685 "Kernel%s samples will not be resolved.\n",
686 !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
687 " modules" : "");
688 if (use_browser <= 0)
689 sleep(5);
690 top->kptr_restrict_warned = true;
691 }
692
693 if (al.sym == NULL) {
694 const char *msg = "Kernel samples will not be resolved.\n";
695 /*
696 * As we do lazy loading of symtabs we only will know if the
697 * specified vmlinux file is invalid when we actually have a
698 * hit in kernel space and then try to load it. So if we get
699 * here and there are _no_ symbols in the DSO backing the
700 * kernel map, bail out.
701 *
702 * We may never get here, for instance, if we use -K/
703 * --hide-kernel-symbols, even if the user specifies an
704 * invalid --vmlinux ;-)
705 */
706 if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
707 al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
708 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
709 if (symbol_conf.vmlinux_name) {
710 ui__warning("The %s file can't be used.\n%s",
711 symbol_conf.vmlinux_name, msg);
712 } else {
713 ui__warning("A vmlinux file was not found.\n%s",
714 msg);
715 }
716
717 if (use_browser <= 0)
718 sleep(5);
719 top->vmlinux_warned = true;
720 }
721 }
722
723 if (al.sym == NULL || !al.sym->ignore) {
724 struct hist_entry *he;
725
726 if ((sort__has_parent || symbol_conf.use_callchain) &&
727 sample->callchain) {
728 err = machine__resolve_callchain(machine, evsel, al.thread,
729 sample->callchain, &parent);
730 if (err)
731 return;
732 }
733
734 he = perf_evsel__add_hist_entry(evsel, &al, sample);
735 if (he == NULL) {
736 pr_err("Problem incrementing symbol period, skipping event\n");
737 return;
738 }
739
740 if (symbol_conf.use_callchain) {
741 err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
742 sample->period);
743 if (err)
744 return;
745 }
746
747 if (top->sort_has_symbols)
748 perf_top__record_precise_ip(top, he, evsel->idx, ip);
749 }
750
751 return;
752 }
753
754 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
755 {
756 struct perf_sample sample;
757 struct perf_evsel *evsel;
758 struct perf_session *session = top->session;
759 union perf_event *event;
760 struct machine *machine;
761 u8 origin;
762 int ret;
763
764 while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
765 ret = perf_session__parse_sample(session, event, &sample);
766 if (ret) {
767 pr_err("Can't parse sample, err = %d\n", ret);
768 continue;
769 }
770
771 evsel = perf_evlist__id2evsel(session->evlist, sample.id);
772 assert(evsel != NULL);
773
774 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
775
776 if (event->header.type == PERF_RECORD_SAMPLE)
777 ++top->samples;
778
779 switch (origin) {
780 case PERF_RECORD_MISC_USER:
781 ++top->us_samples;
782 if (top->hide_user_symbols)
783 continue;
784 machine = perf_session__find_host_machine(session);
785 break;
786 case PERF_RECORD_MISC_KERNEL:
787 ++top->kernel_samples;
788 if (top->hide_kernel_symbols)
789 continue;
790 machine = perf_session__find_host_machine(session);
791 break;
792 case PERF_RECORD_MISC_GUEST_KERNEL:
793 ++top->guest_kernel_samples;
794 machine = perf_session__find_machine(session, event->ip.pid);
795 break;
796 case PERF_RECORD_MISC_GUEST_USER:
797 ++top->guest_us_samples;
798 /*
799 * TODO: we don't process guest user from host side
800 * except simple counting.
801 */
802 /* Fall thru */
803 default:
804 continue;
805 }
806
807
808 if (event->header.type == PERF_RECORD_SAMPLE) {
809 perf_event__process_sample(&top->tool, event, evsel,
810 &sample, machine);
811 } else if (event->header.type < PERF_RECORD_MAX) {
812 hists__inc_nr_events(&evsel->hists, event->header.type);
813 perf_event__process(&top->tool, event, &sample, machine);
814 } else
815 ++session->hists.stats.nr_unknown_events;
816 }
817 }
818
819 static void perf_top__mmap_read(struct perf_top *top)
820 {
821 int i;
822
823 for (i = 0; i < top->evlist->nr_mmaps; i++)
824 perf_top__mmap_read_idx(top, i);
825 }
826
827 static void perf_top__start_counters(struct perf_top *top)
828 {
829 struct perf_evsel *counter, *first;
830 struct perf_evlist *evlist = top->evlist;
831
832 first = list_entry(evlist->entries.next, struct perf_evsel, node);
833
834 list_for_each_entry(counter, &evlist->entries, node) {
835 struct perf_event_attr *attr = &counter->attr;
836 struct xyarray *group_fd = NULL;
837
838 if (top->group && counter != first)
839 group_fd = first->fd;
840
841 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
842
843 if (top->freq) {
844 attr->sample_type |= PERF_SAMPLE_PERIOD;
845 attr->freq = 1;
846 attr->sample_freq = top->freq;
847 }
848
849 if (evlist->nr_entries > 1) {
850 attr->sample_type |= PERF_SAMPLE_ID;
851 attr->read_format |= PERF_FORMAT_ID;
852 }
853
854 if (symbol_conf.use_callchain)
855 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
856
857 attr->mmap = 1;
858 attr->comm = 1;
859 attr->inherit = top->inherit;
860 fallback_missing_features:
861 if (top->exclude_guest_missing)
862 attr->exclude_guest = attr->exclude_host = 0;
863 retry_sample_id:
864 attr->sample_id_all = top->sample_id_all_avail ? 1 : 0;
865 try_again:
866 if (perf_evsel__open(counter, top->evlist->cpus,
867 top->evlist->threads, top->group,
868 group_fd) < 0) {
869 int err = errno;
870
871 if (err == EPERM || err == EACCES) {
872 ui__error_paranoid();
873 goto out_err;
874 } else if (err == EINVAL) {
875 if (!top->exclude_guest_missing &&
876 (attr->exclude_guest || attr->exclude_host)) {
877 pr_debug("Old kernel, cannot exclude "
878 "guest or host samples.\n");
879 top->exclude_guest_missing = true;
880 goto fallback_missing_features;
881 } else if (top->sample_id_all_avail) {
882 /*
883 * Old kernel, no attr->sample_id_type_all field
884 */
885 top->sample_id_all_avail = false;
886 goto retry_sample_id;
887 }
888 }
889 /*
890 * If it's cycles then fall back to hrtimer
891 * based cpu-clock-tick sw counter, which
892 * is always available even if no PMU support:
893 */
894 if (attr->type == PERF_TYPE_HARDWARE &&
895 attr->config == PERF_COUNT_HW_CPU_CYCLES) {
896 if (verbose)
897 ui__warning("Cycles event not supported,\n"
898 "trying to fall back to cpu-clock-ticks\n");
899
900 attr->type = PERF_TYPE_SOFTWARE;
901 attr->config = PERF_COUNT_SW_CPU_CLOCK;
902 goto try_again;
903 }
904
905 if (err == ENOENT) {
906 ui__warning("The %s event is not supported.\n",
907 event_name(counter));
908 goto out_err;
909 } else if (err == EMFILE) {
910 ui__warning("Too many events are opened.\n"
911 "Try again after reducing the number of events\n");
912 goto out_err;
913 }
914
915 ui__warning("The sys_perf_event_open() syscall "
916 "returned with %d (%s). /bin/dmesg "
917 "may provide additional information.\n"
918 "No CONFIG_PERF_EVENTS=y kernel support "
919 "configured?\n", err, strerror(err));
920 goto out_err;
921 }
922 }
923
924 if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
925 ui__warning("Failed to mmap with %d (%s)\n",
926 errno, strerror(errno));
927 goto out_err;
928 }
929
930 return;
931
932 out_err:
933 exit_browser(0);
934 exit(0);
935 }
936
937 static int perf_top__setup_sample_type(struct perf_top *top)
938 {
939 if (!top->sort_has_symbols) {
940 if (symbol_conf.use_callchain) {
941 ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
942 return -EINVAL;
943 }
944 } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
945 if (callchain_register_param(&callchain_param) < 0) {
946 ui__warning("Can't register callchain params.\n");
947 return -EINVAL;
948 }
949 }
950
951 return 0;
952 }
953
954 static int __cmd_top(struct perf_top *top)
955 {
956 pthread_t thread;
957 int ret;
958 /*
959 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
960 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
961 */
962 top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
963 if (top->session == NULL)
964 return -ENOMEM;
965
966 ret = perf_top__setup_sample_type(top);
967 if (ret)
968 goto out_delete;
969
970 if (top->target_tid != -1)
971 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
972 perf_event__process,
973 &top->session->host_machine);
974 else
975 perf_event__synthesize_threads(&top->tool, perf_event__process,
976 &top->session->host_machine);
977 perf_top__start_counters(top);
978 top->session->evlist = top->evlist;
979 perf_session__update_sample_type(top->session);
980
981 /* Wait for a minimal set of events before starting the snapshot */
982 poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
983
984 perf_top__mmap_read(top);
985
986 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
987 display_thread), top)) {
988 printf("Could not create display thread.\n");
989 exit(-1);
990 }
991
992 if (top->realtime_prio) {
993 struct sched_param param;
994
995 param.sched_priority = top->realtime_prio;
996 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
997 printf("Could not set realtime priority.\n");
998 exit(-1);
999 }
1000 }
1001
1002 while (1) {
1003 u64 hits = top->samples;
1004
1005 perf_top__mmap_read(top);
1006
1007 if (hits == top->samples)
1008 ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
1009 }
1010
1011 out_delete:
1012 perf_session__delete(top->session);
1013 top->session = NULL;
1014
1015 return 0;
1016 }
1017
1018 static int
1019 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1020 {
1021 struct perf_top *top = (struct perf_top *)opt->value;
1022 char *tok, *tok2;
1023 char *endptr;
1024
1025 /*
1026 * --no-call-graph
1027 */
1028 if (unset) {
1029 top->dont_use_callchains = true;
1030 return 0;
1031 }
1032
1033 symbol_conf.use_callchain = true;
1034
1035 if (!arg)
1036 return 0;
1037
1038 tok = strtok((char *)arg, ",");
1039 if (!tok)
1040 return -1;
1041
1042 /* get the output mode */
1043 if (!strncmp(tok, "graph", strlen(arg)))
1044 callchain_param.mode = CHAIN_GRAPH_ABS;
1045
1046 else if (!strncmp(tok, "flat", strlen(arg)))
1047 callchain_param.mode = CHAIN_FLAT;
1048
1049 else if (!strncmp(tok, "fractal", strlen(arg)))
1050 callchain_param.mode = CHAIN_GRAPH_REL;
1051
1052 else if (!strncmp(tok, "none", strlen(arg))) {
1053 callchain_param.mode = CHAIN_NONE;
1054 symbol_conf.use_callchain = false;
1055
1056 return 0;
1057 } else
1058 return -1;
1059
1060 /* get the min percentage */
1061 tok = strtok(NULL, ",");
1062 if (!tok)
1063 goto setup;
1064
1065 callchain_param.min_percent = strtod(tok, &endptr);
1066 if (tok == endptr)
1067 return -1;
1068
1069 /* get the print limit */
1070 tok2 = strtok(NULL, ",");
1071 if (!tok2)
1072 goto setup;
1073
1074 if (tok2[0] != 'c') {
1075 callchain_param.print_limit = strtod(tok2, &endptr);
1076 tok2 = strtok(NULL, ",");
1077 if (!tok2)
1078 goto setup;
1079 }
1080
1081 /* get the call chain order */
1082 if (!strcmp(tok2, "caller"))
1083 callchain_param.order = ORDER_CALLER;
1084 else if (!strcmp(tok2, "callee"))
1085 callchain_param.order = ORDER_CALLEE;
1086 else
1087 return -1;
1088 setup:
1089 if (callchain_register_param(&callchain_param) < 0) {
1090 fprintf(stderr, "Can't register callchain params\n");
1091 return -1;
1092 }
1093 return 0;
1094 }
1095
1096 static const char * const top_usage[] = {
1097 "perf top [<options>]",
1098 NULL
1099 };
1100
1101 int cmd_top(int argc, const char **argv, const char *prefix __used)
1102 {
1103 struct perf_evsel *pos;
1104 int status = -ENOMEM;
1105 struct perf_top top = {
1106 .count_filter = 5,
1107 .delay_secs = 2,
1108 .target_pid = -1,
1109 .target_tid = -1,
1110 .freq = 1000, /* 1 KHz */
1111 .sample_id_all_avail = true,
1112 .mmap_pages = 128,
1113 .sym_pcnt_filter = 5,
1114 };
1115 char callchain_default_opt[] = "fractal,0.5,callee";
1116 const struct option options[] = {
1117 OPT_CALLBACK('e', "event", &top.evlist, "event",
1118 "event selector. use 'perf list' to list available events",
1119 parse_events_option),
1120 OPT_INTEGER('c', "count", &top.default_interval,
1121 "event period to sample"),
1122 OPT_INTEGER('p', "pid", &top.target_pid,
1123 "profile events on existing process id"),
1124 OPT_INTEGER('t', "tid", &top.target_tid,
1125 "profile events on existing thread id"),
1126 OPT_BOOLEAN('a', "all-cpus", &top.system_wide,
1127 "system-wide collection from all CPUs"),
1128 OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
1129 "list of cpus to monitor"),
1130 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1131 "file", "vmlinux pathname"),
1132 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1133 "hide kernel symbols"),
1134 OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
1135 OPT_INTEGER('r', "realtime", &top.realtime_prio,
1136 "collect data with this RT SCHED_FIFO priority"),
1137 OPT_INTEGER('d', "delay", &top.delay_secs,
1138 "number of seconds to delay between refreshes"),
1139 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1140 "dump the symbol table used for profiling"),
1141 OPT_INTEGER('f', "count-filter", &top.count_filter,
1142 "only display functions with more events than this"),
1143 OPT_BOOLEAN('g', "group", &top.group,
1144 "put the counters into a counter group"),
1145 OPT_BOOLEAN('i', "inherit", &top.inherit,
1146 "child tasks inherit counters"),
1147 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1148 "symbol to annotate"),
1149 OPT_BOOLEAN('z', "zero", &top.zero,
1150 "zero history across updates"),
1151 OPT_INTEGER('F', "freq", &top.freq,
1152 "profile at this frequency"),
1153 OPT_INTEGER('E', "entries", &top.print_entries,
1154 "display this many functions"),
1155 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1156 "hide user symbols"),
1157 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1158 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1159 OPT_INCR('v', "verbose", &verbose,
1160 "be more verbose (show counter open errors, etc)"),
1161 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1162 "sort by key(s): pid, comm, dso, symbol, parent"),
1163 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1164 "Show a column with the number of samples"),
1165 OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
1166 "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
1167 "Default: fractal,0.5,callee", &parse_callchain_opt,
1168 callchain_default_opt),
1169 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1170 "Show a column with the sum of periods"),
1171 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1172 "only consider symbols in these dsos"),
1173 OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1174 "only consider symbols in these comms"),
1175 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1176 "only consider these symbols"),
1177 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
1178 "Interleave source code with assembly code (default)"),
1179 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
1180 "Display raw encoding of assembly instructions (default)"),
1181 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1182 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1183 OPT_END()
1184 };
1185
1186 top.evlist = perf_evlist__new(NULL, NULL);
1187 if (top.evlist == NULL)
1188 return -ENOMEM;
1189
1190 symbol_conf.exclude_other = false;
1191
1192 argc = parse_options(argc, argv, options, top_usage, 0);
1193 if (argc)
1194 usage_with_options(top_usage, options);
1195
1196 if (sort_order == default_sort_order)
1197 sort_order = "dso,symbol";
1198
1199 setup_sorting(top_usage, options);
1200
1201 if (top.use_stdio)
1202 use_browser = 0;
1203 else if (top.use_tui)
1204 use_browser = 1;
1205
1206 setup_browser(false);
1207
1208 /* CPU and PID are mutually exclusive */
1209 if (top.target_tid > 0 && top.cpu_list) {
1210 printf("WARNING: PID switch overriding CPU\n");
1211 sleep(1);
1212 top.cpu_list = NULL;
1213 }
1214
1215 if (top.target_pid != -1)
1216 top.target_tid = top.target_pid;
1217
1218 if (perf_evlist__create_maps(top.evlist, top.target_pid,
1219 top.target_tid, top.cpu_list) < 0)
1220 usage_with_options(top_usage, options);
1221
1222 if (!top.evlist->nr_entries &&
1223 perf_evlist__add_default(top.evlist) < 0) {
1224 pr_err("Not enough memory for event selector list\n");
1225 return -ENOMEM;
1226 }
1227
1228 symbol_conf.nr_events = top.evlist->nr_entries;
1229
1230 if (top.delay_secs < 1)
1231 top.delay_secs = 1;
1232
1233 /*
1234 * User specified count overrides default frequency.
1235 */
1236 if (top.default_interval)
1237 top.freq = 0;
1238 else if (top.freq) {
1239 top.default_interval = top.freq;
1240 } else {
1241 fprintf(stderr, "frequency and count are zero, aborting\n");
1242 exit(EXIT_FAILURE);
1243 }
1244
1245 list_for_each_entry(pos, &top.evlist->entries, node) {
1246 /*
1247 * Fill in the ones not specifically initialized via -c:
1248 */
1249 if (!pos->attr.sample_period)
1250 pos->attr.sample_period = top.default_interval;
1251 }
1252
1253 top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
1254
1255 symbol_conf.priv_size = sizeof(struct annotation);
1256
1257 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1258 if (symbol__init() < 0)
1259 return -1;
1260
1261 sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout);
1262 sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
1263 sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
1264
1265 /*
1266 * Avoid annotation data structures overhead when symbols aren't on the
1267 * sort list.
1268 */
1269 top.sort_has_symbols = sort_sym.list.next != NULL;
1270
1271 get_term_dimensions(&top.winsize);
1272 if (top.print_entries == 0) {
1273 struct sigaction act = {
1274 .sa_sigaction = perf_top__sig_winch,
1275 .sa_flags = SA_SIGINFO,
1276 };
1277 perf_top__update_print_entries(&top);
1278 sigaction(SIGWINCH, &act, NULL);
1279 }
1280
1281 status = __cmd_top(&top);
1282
1283 perf_evlist__delete(top.evlist);
1284
1285 return status;
1286 }