]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - tools/perf/util/hist.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-zesty-kernel.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
15
16 enum hist_filter {
17 HIST_FILTER__DSO,
18 HIST_FILTER__THREAD,
19 HIST_FILTER__PARENT,
20 HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
25 .min_percent = 0.5,
26 .order = ORDER_CALLEE
27 };
28
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
30 {
31 return hists->col_len[col];
32 }
33
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36 hists->col_len[col] = len;
37 }
38
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
43 return true;
44 }
45 return false;
46 }
47
48 static void hists__reset_col_len(struct hists *hists)
49 {
50 enum hist_column col;
51
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
54 }
55
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65
66 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 u16 len;
70
71 if (h->ms.sym)
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73 else
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
79
80 if (h->ms.map) {
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
83 }
84
85 if (h->branch_info) {
86 int symlen;
87 /*
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
90 */
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97 } else {
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101 }
102
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109 } else {
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113 }
114 }
115 }
116
117 static void hist_entry__add_cpumode_period(struct hist_entry *he,
118 unsigned int cpumode, u64 period)
119 {
120 switch (cpumode) {
121 case PERF_RECORD_MISC_KERNEL:
122 he->period_sys += period;
123 break;
124 case PERF_RECORD_MISC_USER:
125 he->period_us += period;
126 break;
127 case PERF_RECORD_MISC_GUEST_KERNEL:
128 he->period_guest_sys += period;
129 break;
130 case PERF_RECORD_MISC_GUEST_USER:
131 he->period_guest_us += period;
132 break;
133 default:
134 break;
135 }
136 }
137
138 static void hist_entry__decay(struct hist_entry *he)
139 {
140 he->period = (he->period * 7) / 8;
141 he->nr_events = (he->nr_events * 7) / 8;
142 }
143
144 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
145 {
146 u64 prev_period = he->period;
147
148 if (prev_period == 0)
149 return true;
150
151 hist_entry__decay(he);
152
153 if (!he->filtered)
154 hists->stats.total_period -= prev_period - he->period;
155
156 return he->period == 0;
157 }
158
159 static void __hists__decay_entries(struct hists *hists, bool zap_user,
160 bool zap_kernel, bool threaded)
161 {
162 struct rb_node *next = rb_first(&hists->entries);
163 struct hist_entry *n;
164
165 while (next) {
166 n = rb_entry(next, struct hist_entry, rb_node);
167 next = rb_next(&n->rb_node);
168 /*
169 * We may be annotating this, for instance, so keep it here in
170 * case some it gets new samples, we'll eventually free it when
171 * the user stops browsing and it agains gets fully decayed.
172 */
173 if (((zap_user && n->level == '.') ||
174 (zap_kernel && n->level != '.') ||
175 hists__decay_entry(hists, n)) &&
176 !n->used) {
177 rb_erase(&n->rb_node, &hists->entries);
178
179 if (sort__need_collapse || threaded)
180 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
181
182 hist_entry__free(n);
183 --hists->nr_entries;
184 }
185 }
186 }
187
188 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
189 {
190 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
191 }
192
193 void hists__decay_entries_threaded(struct hists *hists,
194 bool zap_user, bool zap_kernel)
195 {
196 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
197 }
198
199 /*
200 * histogram, sorted on item, collects periods
201 */
202
203 static struct hist_entry *hist_entry__new(struct hist_entry *template)
204 {
205 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
206 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
207
208 if (he != NULL) {
209 *he = *template;
210 he->nr_events = 1;
211 if (he->ms.map)
212 he->ms.map->referenced = true;
213 if (symbol_conf.use_callchain)
214 callchain_init(he->callchain);
215 }
216
217 return he;
218 }
219
220 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
221 {
222 if (!h->filtered) {
223 hists__calc_col_len(hists, h);
224 ++hists->nr_entries;
225 hists->stats.total_period += h->period;
226 }
227 }
228
229 static u8 symbol__parent_filter(const struct symbol *parent)
230 {
231 if (symbol_conf.exclude_other && parent == NULL)
232 return 1 << HIST_FILTER__PARENT;
233 return 0;
234 }
235
236 static struct hist_entry *add_hist_entry(struct hists *hists,
237 struct hist_entry *entry,
238 struct addr_location *al,
239 u64 period)
240 {
241 struct rb_node **p;
242 struct rb_node *parent = NULL;
243 struct hist_entry *he;
244 int cmp;
245
246 pthread_mutex_lock(&hists->lock);
247
248 p = &hists->entries_in->rb_node;
249
250 while (*p != NULL) {
251 parent = *p;
252 he = rb_entry(parent, struct hist_entry, rb_node_in);
253
254 cmp = hist_entry__cmp(entry, he);
255
256 if (!cmp) {
257 he->period += period;
258 ++he->nr_events;
259
260 /* If the map of an existing hist_entry has
261 * become out-of-date due to an exec() or
262 * similar, update it. Otherwise we will
263 * mis-adjust symbol addresses when computing
264 * the history counter to increment.
265 */
266 if (he->ms.map != entry->ms.map) {
267 he->ms.map = entry->ms.map;
268 if (he->ms.map)
269 he->ms.map->referenced = true;
270 }
271 goto out;
272 }
273
274 if (cmp < 0)
275 p = &(*p)->rb_left;
276 else
277 p = &(*p)->rb_right;
278 }
279
280 he = hist_entry__new(entry);
281 if (!he)
282 goto out_unlock;
283
284 rb_link_node(&he->rb_node_in, parent, p);
285 rb_insert_color(&he->rb_node_in, hists->entries_in);
286 out:
287 hist_entry__add_cpumode_period(he, al->cpumode, period);
288 out_unlock:
289 pthread_mutex_unlock(&hists->lock);
290 return he;
291 }
292
293 struct hist_entry *__hists__add_branch_entry(struct hists *self,
294 struct addr_location *al,
295 struct symbol *sym_parent,
296 struct branch_info *bi,
297 u64 period)
298 {
299 struct hist_entry entry = {
300 .thread = al->thread,
301 .ms = {
302 .map = bi->to.map,
303 .sym = bi->to.sym,
304 },
305 .cpu = al->cpu,
306 .ip = bi->to.addr,
307 .level = al->level,
308 .period = period,
309 .parent = sym_parent,
310 .filtered = symbol__parent_filter(sym_parent),
311 .branch_info = bi,
312 };
313
314 return add_hist_entry(self, &entry, al, period);
315 }
316
317 struct hist_entry *__hists__add_entry(struct hists *self,
318 struct addr_location *al,
319 struct symbol *sym_parent, u64 period)
320 {
321 struct hist_entry entry = {
322 .thread = al->thread,
323 .ms = {
324 .map = al->map,
325 .sym = al->sym,
326 },
327 .cpu = al->cpu,
328 .ip = al->addr,
329 .level = al->level,
330 .period = period,
331 .parent = sym_parent,
332 .filtered = symbol__parent_filter(sym_parent),
333 };
334
335 return add_hist_entry(self, &entry, al, period);
336 }
337
338 int64_t
339 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
340 {
341 struct sort_entry *se;
342 int64_t cmp = 0;
343
344 list_for_each_entry(se, &hist_entry__sort_list, list) {
345 cmp = se->se_cmp(left, right);
346 if (cmp)
347 break;
348 }
349
350 return cmp;
351 }
352
353 int64_t
354 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
355 {
356 struct sort_entry *se;
357 int64_t cmp = 0;
358
359 list_for_each_entry(se, &hist_entry__sort_list, list) {
360 int64_t (*f)(struct hist_entry *, struct hist_entry *);
361
362 f = se->se_collapse ?: se->se_cmp;
363
364 cmp = f(left, right);
365 if (cmp)
366 break;
367 }
368
369 return cmp;
370 }
371
372 void hist_entry__free(struct hist_entry *he)
373 {
374 free(he);
375 }
376
377 /*
378 * collapse the histogram
379 */
380
381 static bool hists__collapse_insert_entry(struct hists *hists,
382 struct rb_root *root,
383 struct hist_entry *he)
384 {
385 struct rb_node **p = &root->rb_node;
386 struct rb_node *parent = NULL;
387 struct hist_entry *iter;
388 int64_t cmp;
389
390 while (*p != NULL) {
391 parent = *p;
392 iter = rb_entry(parent, struct hist_entry, rb_node_in);
393
394 cmp = hist_entry__collapse(iter, he);
395
396 if (!cmp) {
397 iter->period += he->period;
398 iter->nr_events += he->nr_events;
399 if (symbol_conf.use_callchain) {
400 callchain_cursor_reset(&hists->callchain_cursor);
401 callchain_merge(&hists->callchain_cursor, iter->callchain,
402 he->callchain);
403 }
404 hist_entry__free(he);
405 return false;
406 }
407
408 if (cmp < 0)
409 p = &(*p)->rb_left;
410 else
411 p = &(*p)->rb_right;
412 }
413
414 rb_link_node(&he->rb_node_in, parent, p);
415 rb_insert_color(&he->rb_node_in, root);
416 return true;
417 }
418
419 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
420 {
421 struct rb_root *root;
422
423 pthread_mutex_lock(&hists->lock);
424
425 root = hists->entries_in;
426 if (++hists->entries_in > &hists->entries_in_array[1])
427 hists->entries_in = &hists->entries_in_array[0];
428
429 pthread_mutex_unlock(&hists->lock);
430
431 return root;
432 }
433
434 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
435 {
436 hists__filter_entry_by_dso(hists, he);
437 hists__filter_entry_by_thread(hists, he);
438 hists__filter_entry_by_symbol(hists, he);
439 }
440
441 static void __hists__collapse_resort(struct hists *hists, bool threaded)
442 {
443 struct rb_root *root;
444 struct rb_node *next;
445 struct hist_entry *n;
446
447 if (!sort__need_collapse && !threaded)
448 return;
449
450 root = hists__get_rotate_entries_in(hists);
451 next = rb_first(root);
452
453 while (next) {
454 n = rb_entry(next, struct hist_entry, rb_node_in);
455 next = rb_next(&n->rb_node_in);
456
457 rb_erase(&n->rb_node_in, root);
458 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
459 /*
460 * If it wasn't combined with one of the entries already
461 * collapsed, we need to apply the filters that may have
462 * been set by, say, the hist_browser.
463 */
464 hists__apply_filters(hists, n);
465 }
466 }
467 }
468
469 void hists__collapse_resort(struct hists *hists)
470 {
471 return __hists__collapse_resort(hists, false);
472 }
473
474 void hists__collapse_resort_threaded(struct hists *hists)
475 {
476 return __hists__collapse_resort(hists, true);
477 }
478
479 /*
480 * reverse the map, sort on period.
481 */
482
483 static void __hists__insert_output_entry(struct rb_root *entries,
484 struct hist_entry *he,
485 u64 min_callchain_hits)
486 {
487 struct rb_node **p = &entries->rb_node;
488 struct rb_node *parent = NULL;
489 struct hist_entry *iter;
490
491 if (symbol_conf.use_callchain)
492 callchain_param.sort(&he->sorted_chain, he->callchain,
493 min_callchain_hits, &callchain_param);
494
495 while (*p != NULL) {
496 parent = *p;
497 iter = rb_entry(parent, struct hist_entry, rb_node);
498
499 if (he->period > iter->period)
500 p = &(*p)->rb_left;
501 else
502 p = &(*p)->rb_right;
503 }
504
505 rb_link_node(&he->rb_node, parent, p);
506 rb_insert_color(&he->rb_node, entries);
507 }
508
509 static void __hists__output_resort(struct hists *hists, bool threaded)
510 {
511 struct rb_root *root;
512 struct rb_node *next;
513 struct hist_entry *n;
514 u64 min_callchain_hits;
515
516 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
517
518 if (sort__need_collapse || threaded)
519 root = &hists->entries_collapsed;
520 else
521 root = hists->entries_in;
522
523 next = rb_first(root);
524 hists->entries = RB_ROOT;
525
526 hists->nr_entries = 0;
527 hists->stats.total_period = 0;
528 hists__reset_col_len(hists);
529
530 while (next) {
531 n = rb_entry(next, struct hist_entry, rb_node_in);
532 next = rb_next(&n->rb_node_in);
533
534 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
535 hists__inc_nr_entries(hists, n);
536 }
537 }
538
539 void hists__output_resort(struct hists *hists)
540 {
541 return __hists__output_resort(hists, false);
542 }
543
544 void hists__output_resort_threaded(struct hists *hists)
545 {
546 return __hists__output_resort(hists, true);
547 }
548
549 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
550 {
551 int i;
552 int ret = fprintf(fp, " ");
553
554 for (i = 0; i < left_margin; i++)
555 ret += fprintf(fp, " ");
556
557 return ret;
558 }
559
560 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
561 int left_margin)
562 {
563 int i;
564 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
565
566 for (i = 0; i < depth; i++)
567 if (depth_mask & (1 << i))
568 ret += fprintf(fp, "| ");
569 else
570 ret += fprintf(fp, " ");
571
572 ret += fprintf(fp, "\n");
573
574 return ret;
575 }
576
577 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
578 int depth, int depth_mask, int period,
579 u64 total_samples, u64 hits,
580 int left_margin)
581 {
582 int i;
583 size_t ret = 0;
584
585 ret += callchain__fprintf_left_margin(fp, left_margin);
586 for (i = 0; i < depth; i++) {
587 if (depth_mask & (1 << i))
588 ret += fprintf(fp, "|");
589 else
590 ret += fprintf(fp, " ");
591 if (!period && i == depth - 1) {
592 double percent;
593
594 percent = hits * 100.0 / total_samples;
595 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
596 } else
597 ret += fprintf(fp, "%s", " ");
598 }
599 if (chain->ms.sym)
600 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
601 else
602 ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
603
604 return ret;
605 }
606
607 static struct symbol *rem_sq_bracket;
608 static struct callchain_list rem_hits;
609
610 static void init_rem_hits(void)
611 {
612 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
613 if (!rem_sq_bracket) {
614 fprintf(stderr, "Not enough memory to display remaining hits\n");
615 return;
616 }
617
618 strcpy(rem_sq_bracket->name, "[...]");
619 rem_hits.ms.sym = rem_sq_bracket;
620 }
621
622 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
623 u64 total_samples, int depth,
624 int depth_mask, int left_margin)
625 {
626 struct rb_node *node, *next;
627 struct callchain_node *child;
628 struct callchain_list *chain;
629 int new_depth_mask = depth_mask;
630 u64 remaining;
631 size_t ret = 0;
632 int i;
633 uint entries_printed = 0;
634
635 remaining = total_samples;
636
637 node = rb_first(root);
638 while (node) {
639 u64 new_total;
640 u64 cumul;
641
642 child = rb_entry(node, struct callchain_node, rb_node);
643 cumul = callchain_cumul_hits(child);
644 remaining -= cumul;
645
646 /*
647 * The depth mask manages the output of pipes that show
648 * the depth. We don't want to keep the pipes of the current
649 * level for the last child of this depth.
650 * Except if we have remaining filtered hits. They will
651 * supersede the last child
652 */
653 next = rb_next(node);
654 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
655 new_depth_mask &= ~(1 << (depth - 1));
656
657 /*
658 * But we keep the older depth mask for the line separator
659 * to keep the level link until we reach the last child
660 */
661 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
662 left_margin);
663 i = 0;
664 list_for_each_entry(chain, &child->val, list) {
665 ret += ipchain__fprintf_graph(fp, chain, depth,
666 new_depth_mask, i++,
667 total_samples,
668 cumul,
669 left_margin);
670 }
671
672 if (callchain_param.mode == CHAIN_GRAPH_REL)
673 new_total = child->children_hit;
674 else
675 new_total = total_samples;
676
677 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
678 depth + 1,
679 new_depth_mask | (1 << depth),
680 left_margin);
681 node = next;
682 if (++entries_printed == callchain_param.print_limit)
683 break;
684 }
685
686 if (callchain_param.mode == CHAIN_GRAPH_REL &&
687 remaining && remaining != total_samples) {
688
689 if (!rem_sq_bracket)
690 return ret;
691
692 new_depth_mask &= ~(1 << (depth - 1));
693 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
694 new_depth_mask, 0, total_samples,
695 remaining, left_margin);
696 }
697
698 return ret;
699 }
700
701 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
702 u64 total_samples, int left_margin)
703 {
704 struct callchain_node *cnode;
705 struct callchain_list *chain;
706 u32 entries_printed = 0;
707 bool printed = false;
708 struct rb_node *node;
709 int i = 0;
710 int ret;
711
712 /*
713 * If have one single callchain root, don't bother printing
714 * its percentage (100 % in fractal mode and the same percentage
715 * than the hist in graph mode). This also avoid one level of column.
716 */
717 node = rb_first(root);
718 if (node && !rb_next(node)) {
719 cnode = rb_entry(node, struct callchain_node, rb_node);
720 list_for_each_entry(chain, &cnode->val, list) {
721 /*
722 * If we sort by symbol, the first entry is the same than
723 * the symbol. No need to print it otherwise it appears as
724 * displayed twice.
725 */
726 if (!i++ && sort__first_dimension == SORT_SYM)
727 continue;
728 if (!printed) {
729 ret += callchain__fprintf_left_margin(fp, left_margin);
730 ret += fprintf(fp, "|\n");
731 ret += callchain__fprintf_left_margin(fp, left_margin);
732 ret += fprintf(fp, "---");
733 left_margin += 3;
734 printed = true;
735 } else
736 ret += callchain__fprintf_left_margin(fp, left_margin);
737
738 if (chain->ms.sym)
739 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
740 else
741 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
742
743 if (++entries_printed == callchain_param.print_limit)
744 break;
745 }
746 root = &cnode->rb_root;
747 }
748
749 return __callchain__fprintf_graph(fp, root, total_samples,
750 1, 1, left_margin);
751 }
752
753 static size_t __callchain__fprintf_flat(FILE *fp,
754 struct callchain_node *self,
755 u64 total_samples)
756 {
757 struct callchain_list *chain;
758 size_t ret = 0;
759
760 if (!self)
761 return 0;
762
763 ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
764
765
766 list_for_each_entry(chain, &self->val, list) {
767 if (chain->ip >= PERF_CONTEXT_MAX)
768 continue;
769 if (chain->ms.sym)
770 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
771 else
772 ret += fprintf(fp, " %p\n",
773 (void *)(long)chain->ip);
774 }
775
776 return ret;
777 }
778
779 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
780 u64 total_samples)
781 {
782 size_t ret = 0;
783 u32 entries_printed = 0;
784 struct rb_node *rb_node;
785 struct callchain_node *chain;
786
787 rb_node = rb_first(self);
788 while (rb_node) {
789 double percent;
790
791 chain = rb_entry(rb_node, struct callchain_node, rb_node);
792 percent = chain->hit * 100.0 / total_samples;
793
794 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
795 ret += __callchain__fprintf_flat(fp, chain, total_samples);
796 ret += fprintf(fp, "\n");
797 if (++entries_printed == callchain_param.print_limit)
798 break;
799
800 rb_node = rb_next(rb_node);
801 }
802
803 return ret;
804 }
805
806 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
807 u64 total_samples, int left_margin,
808 FILE *fp)
809 {
810 switch (callchain_param.mode) {
811 case CHAIN_GRAPH_REL:
812 return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
813 left_margin);
814 break;
815 case CHAIN_GRAPH_ABS:
816 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
817 left_margin);
818 break;
819 case CHAIN_FLAT:
820 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
821 break;
822 case CHAIN_NONE:
823 break;
824 default:
825 pr_err("Bad callchain mode\n");
826 }
827
828 return 0;
829 }
830
831 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
832 {
833 struct rb_node *next = rb_first(&hists->entries);
834 struct hist_entry *n;
835 int row = 0;
836
837 hists__reset_col_len(hists);
838
839 while (next && row++ < max_rows) {
840 n = rb_entry(next, struct hist_entry, rb_node);
841 if (!n->filtered)
842 hists__calc_col_len(hists, n);
843 next = rb_next(&n->rb_node);
844 }
845 }
846
847 static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
848 size_t size, struct hists *pair_hists,
849 bool show_displacement, long displacement,
850 bool color, u64 total_period)
851 {
852 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
853 u64 nr_events;
854 const char *sep = symbol_conf.field_sep;
855 int ret;
856
857 if (symbol_conf.exclude_other && !he->parent)
858 return 0;
859
860 if (pair_hists) {
861 period = he->pair ? he->pair->period : 0;
862 nr_events = he->pair ? he->pair->nr_events : 0;
863 total = pair_hists->stats.total_period;
864 period_sys = he->pair ? he->pair->period_sys : 0;
865 period_us = he->pair ? he->pair->period_us : 0;
866 period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
867 period_guest_us = he->pair ? he->pair->period_guest_us : 0;
868 } else {
869 period = he->period;
870 nr_events = he->nr_events;
871 total = total_period;
872 period_sys = he->period_sys;
873 period_us = he->period_us;
874 period_guest_sys = he->period_guest_sys;
875 period_guest_us = he->period_guest_us;
876 }
877
878 if (total) {
879 if (color)
880 ret = percent_color_snprintf(s, size,
881 sep ? "%.2f" : " %6.2f%%",
882 (period * 100.0) / total);
883 else
884 ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
885 (period * 100.0) / total);
886 if (symbol_conf.show_cpu_utilization) {
887 ret += percent_color_snprintf(s + ret, size - ret,
888 sep ? "%.2f" : " %6.2f%%",
889 (period_sys * 100.0) / total);
890 ret += percent_color_snprintf(s + ret, size - ret,
891 sep ? "%.2f" : " %6.2f%%",
892 (period_us * 100.0) / total);
893 if (perf_guest) {
894 ret += percent_color_snprintf(s + ret,
895 size - ret,
896 sep ? "%.2f" : " %6.2f%%",
897 (period_guest_sys * 100.0) /
898 total);
899 ret += percent_color_snprintf(s + ret,
900 size - ret,
901 sep ? "%.2f" : " %6.2f%%",
902 (period_guest_us * 100.0) /
903 total);
904 }
905 }
906 } else
907 ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
908
909 if (symbol_conf.show_nr_samples) {
910 if (sep)
911 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
912 else
913 ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
914 }
915
916 if (symbol_conf.show_total_period) {
917 if (sep)
918 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
919 else
920 ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
921 }
922
923 if (pair_hists) {
924 char bf[32];
925 double old_percent = 0, new_percent = 0, diff;
926
927 if (total > 0)
928 old_percent = (period * 100.0) / total;
929 if (total_period > 0)
930 new_percent = (he->period * 100.0) / total_period;
931
932 diff = new_percent - old_percent;
933
934 if (fabs(diff) >= 0.01)
935 scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
936 else
937 scnprintf(bf, sizeof(bf), " ");
938
939 if (sep)
940 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
941 else
942 ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
943
944 if (show_displacement) {
945 if (displacement)
946 scnprintf(bf, sizeof(bf), "%+4ld", displacement);
947 else
948 scnprintf(bf, sizeof(bf), " ");
949
950 if (sep)
951 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
952 else
953 ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
954 }
955 }
956
957 return ret;
958 }
959
960 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
961 struct hists *hists)
962 {
963 const char *sep = symbol_conf.field_sep;
964 struct sort_entry *se;
965 int ret = 0;
966
967 list_for_each_entry(se, &hist_entry__sort_list, list) {
968 if (se->elide)
969 continue;
970
971 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
972 ret += se->se_snprintf(he, s + ret, size - ret,
973 hists__col_len(hists, se->se_width_idx));
974 }
975
976 return ret;
977 }
978
979 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
980 struct hists *hists, struct hists *pair_hists,
981 bool show_displacement, long displacement,
982 u64 total_period, FILE *fp)
983 {
984 char bf[512];
985 int ret;
986
987 if (size == 0 || size > sizeof(bf))
988 size = sizeof(bf);
989
990 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
991 show_displacement, displacement,
992 true, total_period);
993 hist_entry__snprintf(he, bf + ret, size - ret, hists);
994 return fprintf(fp, "%s\n", bf);
995 }
996
997 static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
998 struct hists *hists,
999 u64 total_period, FILE *fp)
1000 {
1001 int left_margin = 0;
1002
1003 if (sort__first_dimension == SORT_COMM) {
1004 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
1005 typeof(*se), list);
1006 left_margin = hists__col_len(hists, se->se_width_idx);
1007 left_margin -= thread__comm_len(he->thread);
1008 }
1009
1010 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
1011 }
1012
1013 size_t hists__fprintf(struct hists *hists, struct hists *pair,
1014 bool show_displacement, bool show_header, int max_rows,
1015 int max_cols, FILE *fp)
1016 {
1017 struct sort_entry *se;
1018 struct rb_node *nd;
1019 size_t ret = 0;
1020 u64 total_period;
1021 unsigned long position = 1;
1022 long displacement = 0;
1023 unsigned int width;
1024 const char *sep = symbol_conf.field_sep;
1025 const char *col_width = symbol_conf.col_width_list_str;
1026 int nr_rows = 0;
1027
1028 init_rem_hits();
1029
1030 if (!show_header)
1031 goto print_entries;
1032
1033 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
1034
1035 if (symbol_conf.show_cpu_utilization) {
1036 if (sep) {
1037 ret += fprintf(fp, "%csys", *sep);
1038 ret += fprintf(fp, "%cus", *sep);
1039 if (perf_guest) {
1040 ret += fprintf(fp, "%cguest sys", *sep);
1041 ret += fprintf(fp, "%cguest us", *sep);
1042 }
1043 } else {
1044 ret += fprintf(fp, " sys ");
1045 ret += fprintf(fp, " us ");
1046 if (perf_guest) {
1047 ret += fprintf(fp, " guest sys ");
1048 ret += fprintf(fp, " guest us ");
1049 }
1050 }
1051 }
1052
1053 if (symbol_conf.show_nr_samples) {
1054 if (sep)
1055 fprintf(fp, "%cSamples", *sep);
1056 else
1057 fputs(" Samples ", fp);
1058 }
1059
1060 if (symbol_conf.show_total_period) {
1061 if (sep)
1062 ret += fprintf(fp, "%cPeriod", *sep);
1063 else
1064 ret += fprintf(fp, " Period ");
1065 }
1066
1067 if (pair) {
1068 if (sep)
1069 ret += fprintf(fp, "%cDelta", *sep);
1070 else
1071 ret += fprintf(fp, " Delta ");
1072
1073 if (show_displacement) {
1074 if (sep)
1075 ret += fprintf(fp, "%cDisplacement", *sep);
1076 else
1077 ret += fprintf(fp, " Displ");
1078 }
1079 }
1080
1081 list_for_each_entry(se, &hist_entry__sort_list, list) {
1082 if (se->elide)
1083 continue;
1084 if (sep) {
1085 fprintf(fp, "%c%s", *sep, se->se_header);
1086 continue;
1087 }
1088 width = strlen(se->se_header);
1089 if (symbol_conf.col_width_list_str) {
1090 if (col_width) {
1091 hists__set_col_len(hists, se->se_width_idx,
1092 atoi(col_width));
1093 col_width = strchr(col_width, ',');
1094 if (col_width)
1095 ++col_width;
1096 }
1097 }
1098 if (!hists__new_col_len(hists, se->se_width_idx, width))
1099 width = hists__col_len(hists, se->se_width_idx);
1100 fprintf(fp, " %*s", width, se->se_header);
1101 }
1102
1103 fprintf(fp, "\n");
1104 if (max_rows && ++nr_rows >= max_rows)
1105 goto out;
1106
1107 if (sep)
1108 goto print_entries;
1109
1110 fprintf(fp, "# ........");
1111 if (symbol_conf.show_cpu_utilization)
1112 fprintf(fp, " ....... .......");
1113 if (symbol_conf.show_nr_samples)
1114 fprintf(fp, " ..........");
1115 if (symbol_conf.show_total_period)
1116 fprintf(fp, " ............");
1117 if (pair) {
1118 fprintf(fp, " ..........");
1119 if (show_displacement)
1120 fprintf(fp, " .....");
1121 }
1122 list_for_each_entry(se, &hist_entry__sort_list, list) {
1123 unsigned int i;
1124
1125 if (se->elide)
1126 continue;
1127
1128 fprintf(fp, " ");
1129 width = hists__col_len(hists, se->se_width_idx);
1130 if (width == 0)
1131 width = strlen(se->se_header);
1132 for (i = 0; i < width; i++)
1133 fprintf(fp, ".");
1134 }
1135
1136 fprintf(fp, "\n");
1137 if (max_rows && ++nr_rows >= max_rows)
1138 goto out;
1139
1140 fprintf(fp, "#\n");
1141 if (max_rows && ++nr_rows >= max_rows)
1142 goto out;
1143
1144 print_entries:
1145 total_period = hists->stats.total_period;
1146
1147 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1148 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1149
1150 if (h->filtered)
1151 continue;
1152
1153 if (show_displacement) {
1154 if (h->pair != NULL)
1155 displacement = ((long)h->pair->position -
1156 (long)position);
1157 else
1158 displacement = 0;
1159 ++position;
1160 }
1161 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
1162 displacement, total_period, fp);
1163
1164 if (symbol_conf.use_callchain)
1165 ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
1166 if (max_rows && ++nr_rows >= max_rows)
1167 goto out;
1168
1169 if (h->ms.map == NULL && verbose > 1) {
1170 __map_groups__fprintf_maps(&h->thread->mg,
1171 MAP__FUNCTION, verbose, fp);
1172 fprintf(fp, "%.10s end\n", graph_dotted_line);
1173 }
1174 }
1175 out:
1176 free(rem_sq_bracket);
1177
1178 return ret;
1179 }
1180
1181 /*
1182 * See hists__fprintf to match the column widths
1183 */
1184 unsigned int hists__sort_list_width(struct hists *hists)
1185 {
1186 struct sort_entry *se;
1187 int ret = 9; /* total % */
1188
1189 if (symbol_conf.show_cpu_utilization) {
1190 ret += 7; /* count_sys % */
1191 ret += 6; /* count_us % */
1192 if (perf_guest) {
1193 ret += 13; /* count_guest_sys % */
1194 ret += 12; /* count_guest_us % */
1195 }
1196 }
1197
1198 if (symbol_conf.show_nr_samples)
1199 ret += 11;
1200
1201 if (symbol_conf.show_total_period)
1202 ret += 13;
1203
1204 list_for_each_entry(se, &hist_entry__sort_list, list)
1205 if (!se->elide)
1206 ret += 2 + hists__col_len(hists, se->se_width_idx);
1207
1208 if (verbose) /* Addr + origin */
1209 ret += 3 + BITS_PER_LONG / 4;
1210
1211 return ret;
1212 }
1213
1214 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1215 enum hist_filter filter)
1216 {
1217 h->filtered &= ~(1 << filter);
1218 if (h->filtered)
1219 return;
1220
1221 ++hists->nr_entries;
1222 if (h->ms.unfolded)
1223 hists->nr_entries += h->nr_rows;
1224 h->row_offset = 0;
1225 hists->stats.total_period += h->period;
1226 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
1227
1228 hists__calc_col_len(hists, h);
1229 }
1230
1231
1232 static bool hists__filter_entry_by_dso(struct hists *hists,
1233 struct hist_entry *he)
1234 {
1235 if (hists->dso_filter != NULL &&
1236 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1237 he->filtered |= (1 << HIST_FILTER__DSO);
1238 return true;
1239 }
1240
1241 return false;
1242 }
1243
1244 void hists__filter_by_dso(struct hists *hists)
1245 {
1246 struct rb_node *nd;
1247
1248 hists->nr_entries = hists->stats.total_period = 0;
1249 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1250 hists__reset_col_len(hists);
1251
1252 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1253 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1254
1255 if (symbol_conf.exclude_other && !h->parent)
1256 continue;
1257
1258 if (hists__filter_entry_by_dso(hists, h))
1259 continue;
1260
1261 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1262 }
1263 }
1264
1265 static bool hists__filter_entry_by_thread(struct hists *hists,
1266 struct hist_entry *he)
1267 {
1268 if (hists->thread_filter != NULL &&
1269 he->thread != hists->thread_filter) {
1270 he->filtered |= (1 << HIST_FILTER__THREAD);
1271 return true;
1272 }
1273
1274 return false;
1275 }
1276
1277 void hists__filter_by_thread(struct hists *hists)
1278 {
1279 struct rb_node *nd;
1280
1281 hists->nr_entries = hists->stats.total_period = 0;
1282 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1283 hists__reset_col_len(hists);
1284
1285 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1286 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1287
1288 if (hists__filter_entry_by_thread(hists, h))
1289 continue;
1290
1291 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1292 }
1293 }
1294
1295 static bool hists__filter_entry_by_symbol(struct hists *hists,
1296 struct hist_entry *he)
1297 {
1298 if (hists->symbol_filter_str != NULL &&
1299 (!he->ms.sym || strstr(he->ms.sym->name,
1300 hists->symbol_filter_str) == NULL)) {
1301 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1302 return true;
1303 }
1304
1305 return false;
1306 }
1307
1308 void hists__filter_by_symbol(struct hists *hists)
1309 {
1310 struct rb_node *nd;
1311
1312 hists->nr_entries = hists->stats.total_period = 0;
1313 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1314 hists__reset_col_len(hists);
1315
1316 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1317 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1318
1319 if (hists__filter_entry_by_symbol(hists, h))
1320 continue;
1321
1322 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1323 }
1324 }
1325
1326 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1327 {
1328 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
1329 }
1330
1331 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
1332 {
1333 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
1334 }
1335
1336 void hists__inc_nr_events(struct hists *hists, u32 type)
1337 {
1338 ++hists->stats.nr_events[0];
1339 ++hists->stats.nr_events[type];
1340 }
1341
1342 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1343 {
1344 int i;
1345 size_t ret = 0;
1346
1347 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1348 const char *name;
1349
1350 if (hists->stats.nr_events[i] == 0)
1351 continue;
1352
1353 name = perf_event__name(i);
1354 if (!strcmp(name, "UNKNOWN"))
1355 continue;
1356
1357 ret += fprintf(fp, "%16s events: %10d\n", name,
1358 hists->stats.nr_events[i]);
1359 }
1360
1361 return ret;
1362 }