]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * builtin-top.c | |
3 | * | |
4 | * Builtin top command: Display a continuously updated profile of | |
5 | * any workload, CPU or specific PID. | |
6 | * | |
7 | * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com> | |
8 | * | |
9 | * Improvements and fixes by: | |
10 | * | |
11 | * Arjan van de Ven <arjan@linux.intel.com> | |
12 | * Yanmin Zhang <yanmin.zhang@intel.com> | |
13 | * Wu Fengguang <fengguang.wu@intel.com> | |
14 | * Mike Galbraith <efault@gmx.de> | |
15 | * Paul Mackerras <paulus@samba.org> | |
16 | * | |
17 | * Released under the GPL v2. (and only v2, not any later version) | |
18 | */ | |
19 | #include "builtin.h" | |
20 | ||
21 | #include "perf.h" | |
22 | ||
23 | #include "util/symbol.h" | |
24 | #include "util/color.h" | |
25 | #include "util/util.h" | |
26 | #include "util/rbtree.h" | |
27 | #include "util/parse-options.h" | |
28 | #include "util/parse-events.h" | |
29 | ||
30 | #include <assert.h> | |
31 | #include <fcntl.h> | |
32 | ||
33 | #include <stdio.h> | |
34 | ||
35 | #include <errno.h> | |
36 | #include <time.h> | |
37 | #include <sched.h> | |
38 | #include <pthread.h> | |
39 | ||
40 | #include <sys/syscall.h> | |
41 | #include <sys/ioctl.h> | |
42 | #include <sys/poll.h> | |
43 | #include <sys/prctl.h> | |
44 | #include <sys/wait.h> | |
45 | #include <sys/uio.h> | |
46 | #include <sys/mman.h> | |
47 | ||
48 | #include <linux/unistd.h> | |
49 | #include <linux/types.h> | |
50 | ||
51 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | |
52 | ||
53 | static int system_wide = 0; | |
54 | ||
55 | static int default_interval = 100000; | |
56 | ||
57 | static u64 count_filter = 5; | |
58 | static int print_entries = 15; | |
59 | ||
60 | static int target_pid = -1; | |
61 | static int profile_cpu = -1; | |
62 | static int nr_cpus = 0; | |
63 | static unsigned int realtime_prio = 0; | |
64 | static int group = 0; | |
65 | static unsigned int page_size; | |
66 | static unsigned int mmap_pages = 16; | |
67 | static int freq = 0; | |
68 | static int verbose = 0; | |
69 | ||
70 | static char *sym_filter; | |
71 | static unsigned long filter_start; | |
72 | static unsigned long filter_end; | |
73 | ||
74 | static int delay_secs = 2; | |
75 | static int zero; | |
76 | static int dump_symtab; | |
77 | ||
78 | /* | |
79 | * Symbols | |
80 | */ | |
81 | ||
82 | static u64 min_ip; | |
83 | static u64 max_ip = -1ll; | |
84 | ||
85 | struct sym_entry { | |
86 | struct rb_node rb_node; | |
87 | struct list_head node; | |
88 | unsigned long count[MAX_COUNTERS]; | |
89 | unsigned long snap_count; | |
90 | double weight; | |
91 | int skip; | |
92 | }; | |
93 | ||
94 | struct sym_entry *sym_filter_entry; | |
95 | ||
96 | struct dso *kernel_dso; | |
97 | ||
98 | /* | |
99 | * Symbols will be added here in record_ip and will get out | |
100 | * after decayed. | |
101 | */ | |
102 | static LIST_HEAD(active_symbols); | |
103 | static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER; | |
104 | ||
105 | /* | |
106 | * Ordering weight: count-1 * count-2 * ... / count-n | |
107 | */ | |
108 | static double sym_weight(const struct sym_entry *sym) | |
109 | { | |
110 | double weight = sym->snap_count; | |
111 | int counter; | |
112 | ||
113 | for (counter = 1; counter < nr_counters-1; counter++) | |
114 | weight *= sym->count[counter]; | |
115 | ||
116 | weight /= (sym->count[counter] + 1); | |
117 | ||
118 | return weight; | |
119 | } | |
120 | ||
121 | static long samples; | |
122 | static long userspace_samples; | |
123 | static const char CONSOLE_CLEAR[] = "\e[H\e[2J"; | |
124 | ||
125 | static void __list_insert_active_sym(struct sym_entry *syme) | |
126 | { | |
127 | list_add(&syme->node, &active_symbols); | |
128 | } | |
129 | ||
130 | static void list_remove_active_sym(struct sym_entry *syme) | |
131 | { | |
132 | pthread_mutex_lock(&active_symbols_lock); | |
133 | list_del_init(&syme->node); | |
134 | pthread_mutex_unlock(&active_symbols_lock); | |
135 | } | |
136 | ||
137 | static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) | |
138 | { | |
139 | struct rb_node **p = &tree->rb_node; | |
140 | struct rb_node *parent = NULL; | |
141 | struct sym_entry *iter; | |
142 | ||
143 | while (*p != NULL) { | |
144 | parent = *p; | |
145 | iter = rb_entry(parent, struct sym_entry, rb_node); | |
146 | ||
147 | if (se->weight > iter->weight) | |
148 | p = &(*p)->rb_left; | |
149 | else | |
150 | p = &(*p)->rb_right; | |
151 | } | |
152 | ||
153 | rb_link_node(&se->rb_node, parent, p); | |
154 | rb_insert_color(&se->rb_node, tree); | |
155 | } | |
156 | ||
157 | static void print_sym_table(void) | |
158 | { | |
159 | int printed = 0, j; | |
160 | int counter; | |
161 | float samples_per_sec = samples/delay_secs; | |
162 | float ksamples_per_sec = (samples-userspace_samples)/delay_secs; | |
163 | float sum_ksamples = 0.0; | |
164 | struct sym_entry *syme, *n; | |
165 | struct rb_root tmp = RB_ROOT; | |
166 | struct rb_node *nd; | |
167 | ||
168 | samples = userspace_samples = 0; | |
169 | ||
170 | /* Sort the active symbols */ | |
171 | pthread_mutex_lock(&active_symbols_lock); | |
172 | syme = list_entry(active_symbols.next, struct sym_entry, node); | |
173 | pthread_mutex_unlock(&active_symbols_lock); | |
174 | ||
175 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { | |
176 | syme->snap_count = syme->count[0]; | |
177 | if (syme->snap_count != 0) { | |
178 | syme->weight = sym_weight(syme); | |
179 | rb_insert_active_sym(&tmp, syme); | |
180 | sum_ksamples += syme->snap_count; | |
181 | ||
182 | for (j = 0; j < nr_counters; j++) | |
183 | syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; | |
184 | } else | |
185 | list_remove_active_sym(syme); | |
186 | } | |
187 | ||
188 | puts(CONSOLE_CLEAR); | |
189 | ||
190 | printf( | |
191 | "------------------------------------------------------------------------------\n"); | |
192 | printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", | |
193 | samples_per_sec, | |
194 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); | |
195 | ||
196 | if (nr_counters == 1) { | |
197 | printf("%Ld", (u64)attrs[0].sample_period); | |
198 | if (freq) | |
199 | printf("Hz "); | |
200 | else | |
201 | printf(" "); | |
202 | } | |
203 | ||
204 | for (counter = 0; counter < nr_counters; counter++) { | |
205 | if (counter) | |
206 | printf("/"); | |
207 | ||
208 | printf("%s", event_name(counter)); | |
209 | } | |
210 | ||
211 | printf( "], "); | |
212 | ||
213 | if (target_pid != -1) | |
214 | printf(" (target_pid: %d", target_pid); | |
215 | else | |
216 | printf(" (all"); | |
217 | ||
218 | if (profile_cpu != -1) | |
219 | printf(", cpu: %d)\n", profile_cpu); | |
220 | else { | |
221 | if (target_pid != -1) | |
222 | printf(")\n"); | |
223 | else | |
224 | printf(", %d CPUs)\n", nr_cpus); | |
225 | } | |
226 | ||
227 | printf("------------------------------------------------------------------------------\n\n"); | |
228 | ||
229 | if (nr_counters == 1) | |
230 | printf(" samples pcnt"); | |
231 | else | |
232 | printf(" weight samples pcnt"); | |
233 | ||
234 | printf(" RIP kernel function\n" | |
235 | " ______ _______ _____ ________________ _______________\n\n" | |
236 | ); | |
237 | ||
238 | for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { | |
239 | struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); | |
240 | struct symbol *sym = (struct symbol *)(syme + 1); | |
241 | char *color = PERF_COLOR_NORMAL; | |
242 | double pcnt; | |
243 | ||
244 | if (++printed > print_entries || syme->snap_count < count_filter) | |
245 | continue; | |
246 | ||
247 | pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / | |
248 | sum_ksamples)); | |
249 | ||
250 | /* | |
251 | * We color high-overhead entries in red, mid-overhead | |
252 | * entries in green - and keep the low overhead places | |
253 | * normal: | |
254 | */ | |
255 | if (pcnt >= 5.0) { | |
256 | color = PERF_COLOR_RED; | |
257 | } else { | |
258 | if (pcnt >= 0.5) | |
259 | color = PERF_COLOR_GREEN; | |
260 | } | |
261 | ||
262 | if (nr_counters == 1) | |
263 | printf("%20.2f - ", syme->weight); | |
264 | else | |
265 | printf("%9.1f %10ld - ", syme->weight, syme->snap_count); | |
266 | ||
267 | color_fprintf(stdout, color, "%4.1f%%", pcnt); | |
268 | printf(" - %016llx : %s\n", sym->start, sym->name); | |
269 | } | |
270 | } | |
271 | ||
272 | static void *display_thread(void *arg __used) | |
273 | { | |
274 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; | |
275 | int delay_msecs = delay_secs * 1000; | |
276 | ||
277 | printf("PerfTop refresh period: %d seconds\n", delay_secs); | |
278 | ||
279 | do { | |
280 | print_sym_table(); | |
281 | } while (!poll(&stdin_poll, 1, delay_msecs) == 1); | |
282 | ||
283 | printf("key pressed - exiting.\n"); | |
284 | exit(0); | |
285 | ||
286 | return NULL; | |
287 | } | |
288 | ||
289 | /* Tag samples to be skipped. */ | |
290 | static const char *skip_symbols[] = { | |
291 | "default_idle", | |
292 | "cpu_idle", | |
293 | "enter_idle", | |
294 | "exit_idle", | |
295 | "mwait_idle", | |
296 | "ppc64_runlatch_off", | |
297 | "pseries_dedicated_idle_sleep", | |
298 | NULL | |
299 | }; | |
300 | ||
301 | static int symbol_filter(struct dso *self, struct symbol *sym) | |
302 | { | |
303 | static int filter_match; | |
304 | struct sym_entry *syme; | |
305 | const char *name = sym->name; | |
306 | int i; | |
307 | ||
308 | /* | |
309 | * ppc64 uses function descriptors and appends a '.' to the | |
310 | * start of every instruction address. Remove it. | |
311 | */ | |
312 | if (name[0] == '.') | |
313 | name++; | |
314 | ||
315 | if (!strcmp(name, "_text") || | |
316 | !strcmp(name, "_etext") || | |
317 | !strcmp(name, "_sinittext") || | |
318 | !strncmp("init_module", name, 11) || | |
319 | !strncmp("cleanup_module", name, 14) || | |
320 | strstr(name, "_text_start") || | |
321 | strstr(name, "_text_end")) | |
322 | return 1; | |
323 | ||
324 | syme = dso__sym_priv(self, sym); | |
325 | for (i = 0; skip_symbols[i]; i++) { | |
326 | if (!strcmp(skip_symbols[i], name)) { | |
327 | syme->skip = 1; | |
328 | break; | |
329 | } | |
330 | } | |
331 | ||
332 | if (filter_match == 1) { | |
333 | filter_end = sym->start; | |
334 | filter_match = -1; | |
335 | if (filter_end - filter_start > 10000) { | |
336 | fprintf(stderr, | |
337 | "hm, too large filter symbol <%s> - skipping.\n", | |
338 | sym_filter); | |
339 | fprintf(stderr, "symbol filter start: %016lx\n", | |
340 | filter_start); | |
341 | fprintf(stderr, " end: %016lx\n", | |
342 | filter_end); | |
343 | filter_end = filter_start = 0; | |
344 | sym_filter = NULL; | |
345 | sleep(1); | |
346 | } | |
347 | } | |
348 | ||
349 | if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) { | |
350 | filter_match = 1; | |
351 | filter_start = sym->start; | |
352 | } | |
353 | ||
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static int parse_symbols(void) | |
359 | { | |
360 | struct rb_node *node; | |
361 | struct symbol *sym; | |
362 | ||
363 | kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); | |
364 | if (kernel_dso == NULL) | |
365 | return -1; | |
366 | ||
367 | if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0) | |
368 | goto out_delete_dso; | |
369 | ||
370 | node = rb_first(&kernel_dso->syms); | |
371 | sym = rb_entry(node, struct symbol, rb_node); | |
372 | min_ip = sym->start; | |
373 | ||
374 | node = rb_last(&kernel_dso->syms); | |
375 | sym = rb_entry(node, struct symbol, rb_node); | |
376 | max_ip = sym->end; | |
377 | ||
378 | if (dump_symtab) | |
379 | dso__fprintf(kernel_dso, stderr); | |
380 | ||
381 | return 0; | |
382 | ||
383 | out_delete_dso: | |
384 | dso__delete(kernel_dso); | |
385 | kernel_dso = NULL; | |
386 | return -1; | |
387 | } | |
388 | ||
389 | #define TRACE_COUNT 3 | |
390 | ||
391 | /* | |
392 | * Binary search in the histogram table and record the hit: | |
393 | */ | |
394 | static void record_ip(u64 ip, int counter) | |
395 | { | |
396 | struct symbol *sym = dso__find_symbol(kernel_dso, ip); | |
397 | ||
398 | if (sym != NULL) { | |
399 | struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); | |
400 | ||
401 | if (!syme->skip) { | |
402 | syme->count[counter]++; | |
403 | pthread_mutex_lock(&active_symbols_lock); | |
404 | if (list_empty(&syme->node) || !syme->node.next) | |
405 | __list_insert_active_sym(syme); | |
406 | pthread_mutex_unlock(&active_symbols_lock); | |
407 | return; | |
408 | } | |
409 | } | |
410 | ||
411 | samples--; | |
412 | } | |
413 | ||
414 | static void process_event(u64 ip, int counter, int user) | |
415 | { | |
416 | samples++; | |
417 | ||
418 | if (user) { | |
419 | userspace_samples++; | |
420 | return; | |
421 | } | |
422 | ||
423 | record_ip(ip, counter); | |
424 | } | |
425 | ||
426 | struct mmap_data { | |
427 | int counter; | |
428 | void *base; | |
429 | int mask; | |
430 | unsigned int prev; | |
431 | }; | |
432 | ||
433 | static unsigned int mmap_read_head(struct mmap_data *md) | |
434 | { | |
435 | struct perf_counter_mmap_page *pc = md->base; | |
436 | int head; | |
437 | ||
438 | head = pc->data_head; | |
439 | rmb(); | |
440 | ||
441 | return head; | |
442 | } | |
443 | ||
444 | struct timeval last_read, this_read; | |
445 | ||
446 | static void mmap_read_counter(struct mmap_data *md) | |
447 | { | |
448 | unsigned int head = mmap_read_head(md); | |
449 | unsigned int old = md->prev; | |
450 | unsigned char *data = md->base + page_size; | |
451 | int diff; | |
452 | ||
453 | gettimeofday(&this_read, NULL); | |
454 | ||
455 | /* | |
456 | * If we're further behind than half the buffer, there's a chance | |
457 | * the writer will bite our tail and mess up the samples under us. | |
458 | * | |
459 | * If we somehow ended up ahead of the head, we got messed up. | |
460 | * | |
461 | * In either case, truncate and restart at head. | |
462 | */ | |
463 | diff = head - old; | |
464 | if (diff > md->mask / 2 || diff < 0) { | |
465 | struct timeval iv; | |
466 | unsigned long msecs; | |
467 | ||
468 | timersub(&this_read, &last_read, &iv); | |
469 | msecs = iv.tv_sec*1000 + iv.tv_usec/1000; | |
470 | ||
471 | fprintf(stderr, "WARNING: failed to keep up with mmap data." | |
472 | " Last read %lu msecs ago.\n", msecs); | |
473 | ||
474 | /* | |
475 | * head points to a known good entry, start there. | |
476 | */ | |
477 | old = head; | |
478 | } | |
479 | ||
480 | last_read = this_read; | |
481 | ||
482 | for (; old != head;) { | |
483 | struct ip_event { | |
484 | struct perf_event_header header; | |
485 | u64 ip; | |
486 | u32 pid, target_pid; | |
487 | }; | |
488 | struct mmap_event { | |
489 | struct perf_event_header header; | |
490 | u32 pid, target_pid; | |
491 | u64 start; | |
492 | u64 len; | |
493 | u64 pgoff; | |
494 | char filename[PATH_MAX]; | |
495 | }; | |
496 | ||
497 | typedef union event_union { | |
498 | struct perf_event_header header; | |
499 | struct ip_event ip; | |
500 | struct mmap_event mmap; | |
501 | } event_t; | |
502 | ||
503 | event_t *event = (event_t *)&data[old & md->mask]; | |
504 | ||
505 | event_t event_copy; | |
506 | ||
507 | size_t size = event->header.size; | |
508 | ||
509 | /* | |
510 | * Event straddles the mmap boundary -- header should always | |
511 | * be inside due to u64 alignment of output. | |
512 | */ | |
513 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | |
514 | unsigned int offset = old; | |
515 | unsigned int len = min(sizeof(*event), size), cpy; | |
516 | void *dst = &event_copy; | |
517 | ||
518 | do { | |
519 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
520 | memcpy(dst, &data[offset & md->mask], cpy); | |
521 | offset += cpy; | |
522 | dst += cpy; | |
523 | len -= cpy; | |
524 | } while (len); | |
525 | ||
526 | event = &event_copy; | |
527 | } | |
528 | ||
529 | old += size; | |
530 | ||
531 | if (event->header.type == PERF_EVENT_SAMPLE) { | |
532 | int user = | |
533 | (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; | |
534 | process_event(event->ip.ip, md->counter, user); | |
535 | } | |
536 | } | |
537 | ||
538 | md->prev = old; | |
539 | } | |
540 | ||
541 | static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; | |
542 | static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; | |
543 | ||
544 | static void mmap_read(void) | |
545 | { | |
546 | int i, counter; | |
547 | ||
548 | for (i = 0; i < nr_cpus; i++) { | |
549 | for (counter = 0; counter < nr_counters; counter++) | |
550 | mmap_read_counter(&mmap_array[i][counter]); | |
551 | } | |
552 | } | |
553 | ||
554 | int nr_poll; | |
555 | int group_fd; | |
556 | ||
557 | static void start_counter(int i, int counter) | |
558 | { | |
559 | struct perf_counter_attr *attr; | |
560 | unsigned int cpu; | |
561 | ||
562 | cpu = profile_cpu; | |
563 | if (target_pid == -1 && profile_cpu == -1) | |
564 | cpu = i; | |
565 | ||
566 | attr = attrs + counter; | |
567 | ||
568 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | |
569 | attr->freq = freq; | |
570 | ||
571 | try_again: | |
572 | fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); | |
573 | ||
574 | if (fd[i][counter] < 0) { | |
575 | int err = errno; | |
576 | ||
577 | if (err == EPERM) | |
578 | die("No permission - are you root?\n"); | |
579 | /* | |
580 | * If it's cycles then fall back to hrtimer | |
581 | * based cpu-clock-tick sw counter, which | |
582 | * is always available even if no PMU support: | |
583 | */ | |
584 | if (attr->type == PERF_TYPE_HARDWARE | |
585 | && attr->config == PERF_COUNT_HW_CPU_CYCLES) { | |
586 | ||
587 | if (verbose) | |
588 | warning(" ... trying to fall back to cpu-clock-ticks\n"); | |
589 | ||
590 | attr->type = PERF_TYPE_SOFTWARE; | |
591 | attr->config = PERF_COUNT_SW_CPU_CLOCK; | |
592 | goto try_again; | |
593 | } | |
594 | printf("\n"); | |
595 | error("perfcounter syscall returned with %d (%s)\n", | |
596 | fd[i][counter], strerror(err)); | |
597 | die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); | |
598 | exit(-1); | |
599 | } | |
600 | assert(fd[i][counter] >= 0); | |
601 | fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); | |
602 | ||
603 | /* | |
604 | * First counter acts as the group leader: | |
605 | */ | |
606 | if (group && group_fd == -1) | |
607 | group_fd = fd[i][counter]; | |
608 | ||
609 | event_array[nr_poll].fd = fd[i][counter]; | |
610 | event_array[nr_poll].events = POLLIN; | |
611 | nr_poll++; | |
612 | ||
613 | mmap_array[i][counter].counter = counter; | |
614 | mmap_array[i][counter].prev = 0; | |
615 | mmap_array[i][counter].mask = mmap_pages*page_size - 1; | |
616 | mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, | |
617 | PROT_READ, MAP_SHARED, fd[i][counter], 0); | |
618 | if (mmap_array[i][counter].base == MAP_FAILED) | |
619 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); | |
620 | } | |
621 | ||
622 | static int __cmd_top(void) | |
623 | { | |
624 | pthread_t thread; | |
625 | int i, counter; | |
626 | int ret; | |
627 | ||
628 | for (i = 0; i < nr_cpus; i++) { | |
629 | group_fd = -1; | |
630 | for (counter = 0; counter < nr_counters; counter++) | |
631 | start_counter(i, counter); | |
632 | } | |
633 | ||
634 | /* Wait for a minimal set of events before starting the snapshot */ | |
635 | poll(event_array, nr_poll, 100); | |
636 | ||
637 | mmap_read(); | |
638 | ||
639 | if (pthread_create(&thread, NULL, display_thread, NULL)) { | |
640 | printf("Could not create display thread.\n"); | |
641 | exit(-1); | |
642 | } | |
643 | ||
644 | if (realtime_prio) { | |
645 | struct sched_param param; | |
646 | ||
647 | param.sched_priority = realtime_prio; | |
648 | if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { | |
649 | printf("Could not set realtime priority.\n"); | |
650 | exit(-1); | |
651 | } | |
652 | } | |
653 | ||
654 | while (1) { | |
655 | int hits = samples; | |
656 | ||
657 | mmap_read(); | |
658 | ||
659 | if (hits == samples) | |
660 | ret = poll(event_array, nr_poll, 100); | |
661 | } | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
666 | static const char * const top_usage[] = { | |
667 | "perf top [<options>]", | |
668 | NULL | |
669 | }; | |
670 | ||
671 | static const struct option options[] = { | |
672 | OPT_CALLBACK('e', "event", NULL, "event", | |
673 | "event selector. use 'perf list' to list available events", | |
674 | parse_events), | |
675 | OPT_INTEGER('c', "count", &default_interval, | |
676 | "event period to sample"), | |
677 | OPT_INTEGER('p', "pid", &target_pid, | |
678 | "profile events on existing pid"), | |
679 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | |
680 | "system-wide collection from all CPUs"), | |
681 | OPT_INTEGER('C', "CPU", &profile_cpu, | |
682 | "CPU to profile on"), | |
683 | OPT_INTEGER('m', "mmap-pages", &mmap_pages, | |
684 | "number of mmap data pages"), | |
685 | OPT_INTEGER('r', "realtime", &realtime_prio, | |
686 | "collect data with this RT SCHED_FIFO priority"), | |
687 | OPT_INTEGER('d', "delay", &delay_secs, | |
688 | "number of seconds to delay between refreshes"), | |
689 | OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, | |
690 | "dump the symbol table used for profiling"), | |
691 | OPT_INTEGER('f', "count-filter", &count_filter, | |
692 | "only display functions with more events than this"), | |
693 | OPT_BOOLEAN('g', "group", &group, | |
694 | "put the counters into a counter group"), | |
695 | OPT_STRING('s', "sym-filter", &sym_filter, "pattern", | |
696 | "only display symbols matchig this pattern"), | |
697 | OPT_BOOLEAN('z', "zero", &zero, | |
698 | "zero history across updates"), | |
699 | OPT_INTEGER('F', "freq", &freq, | |
700 | "profile at this frequency"), | |
701 | OPT_INTEGER('E', "entries", &print_entries, | |
702 | "display this many functions"), | |
703 | OPT_BOOLEAN('v', "verbose", &verbose, | |
704 | "be more verbose (show counter open errors, etc)"), | |
705 | OPT_END() | |
706 | }; | |
707 | ||
708 | int cmd_top(int argc, const char **argv, const char *prefix __used) | |
709 | { | |
710 | int counter; | |
711 | ||
712 | page_size = sysconf(_SC_PAGE_SIZE); | |
713 | ||
714 | argc = parse_options(argc, argv, options, top_usage, 0); | |
715 | if (argc) | |
716 | usage_with_options(top_usage, options); | |
717 | ||
718 | if (freq) { | |
719 | default_interval = freq; | |
720 | freq = 1; | |
721 | } | |
722 | ||
723 | /* CPU and PID are mutually exclusive */ | |
724 | if (target_pid != -1 && profile_cpu != -1) { | |
725 | printf("WARNING: PID switch overriding CPU\n"); | |
726 | sleep(1); | |
727 | profile_cpu = -1; | |
728 | } | |
729 | ||
730 | if (!nr_counters) | |
731 | nr_counters = 1; | |
732 | ||
733 | if (delay_secs < 1) | |
734 | delay_secs = 1; | |
735 | ||
736 | parse_symbols(); | |
737 | ||
738 | /* | |
739 | * Fill in the ones not specifically initialized via -c: | |
740 | */ | |
741 | for (counter = 0; counter < nr_counters; counter++) { | |
742 | if (attrs[counter].sample_period) | |
743 | continue; | |
744 | ||
745 | attrs[counter].sample_period = default_interval; | |
746 | } | |
747 | ||
748 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | |
749 | assert(nr_cpus <= MAX_NR_CPUS); | |
750 | assert(nr_cpus >= 0); | |
751 | ||
752 | if (target_pid != -1 || profile_cpu != -1) | |
753 | nr_cpus = 1; | |
754 | ||
755 | return __cmd_top(); | |
756 | } |