]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/hist.c
perf hist: Make event__totals per hists
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "hist.h"
3 #include "session.h"
4 #include "sort.h"
5 #include <math.h>
6
7 struct callchain_param callchain_param = {
8 .mode = CHAIN_GRAPH_REL,
9 .min_percent = 0.5
10 };
11
12 static void hist_entry__add_cpumode_count(struct hist_entry *self,
13 unsigned int cpumode, u64 count)
14 {
15 switch (cpumode) {
16 case PERF_RECORD_MISC_KERNEL:
17 self->count_sys += count;
18 break;
19 case PERF_RECORD_MISC_USER:
20 self->count_us += count;
21 break;
22 case PERF_RECORD_MISC_GUEST_KERNEL:
23 self->count_guest_sys += count;
24 break;
25 case PERF_RECORD_MISC_GUEST_USER:
26 self->count_guest_us += count;
27 break;
28 default:
29 break;
30 }
31 }
32
33 /*
34 * histogram, sorted on item, collects counts
35 */
36
37 static struct hist_entry *hist_entry__new(struct hist_entry *template)
38 {
39 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
40 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
41
42 if (self != NULL) {
43 *self = *template;
44 if (symbol_conf.use_callchain)
45 callchain_init(self->callchain);
46 }
47
48 return self;
49 }
50
51 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
52 {
53 if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
54 self->max_sym_namelen = entry->ms.sym->namelen;
55 ++self->nr_entries;
56 }
57
58 struct hist_entry *__hists__add_entry(struct hists *self,
59 struct addr_location *al,
60 struct symbol *sym_parent, u64 count)
61 {
62 struct rb_node **p = &self->entries.rb_node;
63 struct rb_node *parent = NULL;
64 struct hist_entry *he;
65 struct hist_entry entry = {
66 .thread = al->thread,
67 .ms = {
68 .map = al->map,
69 .sym = al->sym,
70 },
71 .ip = al->addr,
72 .level = al->level,
73 .count = count,
74 .parent = sym_parent,
75 };
76 int cmp;
77
78 while (*p != NULL) {
79 parent = *p;
80 he = rb_entry(parent, struct hist_entry, rb_node);
81
82 cmp = hist_entry__cmp(&entry, he);
83
84 if (!cmp) {
85 he->count += count;
86 goto out;
87 }
88
89 if (cmp < 0)
90 p = &(*p)->rb_left;
91 else
92 p = &(*p)->rb_right;
93 }
94
95 he = hist_entry__new(&entry);
96 if (!he)
97 return NULL;
98 rb_link_node(&he->rb_node, parent, p);
99 rb_insert_color(&he->rb_node, &self->entries);
100 hists__inc_nr_entries(self, he);
101 out:
102 hist_entry__add_cpumode_count(he, al->cpumode, count);
103 return he;
104 }
105
106 int64_t
107 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
108 {
109 struct sort_entry *se;
110 int64_t cmp = 0;
111
112 list_for_each_entry(se, &hist_entry__sort_list, list) {
113 cmp = se->se_cmp(left, right);
114 if (cmp)
115 break;
116 }
117
118 return cmp;
119 }
120
121 int64_t
122 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
123 {
124 struct sort_entry *se;
125 int64_t cmp = 0;
126
127 list_for_each_entry(se, &hist_entry__sort_list, list) {
128 int64_t (*f)(struct hist_entry *, struct hist_entry *);
129
130 f = se->se_collapse ?: se->se_cmp;
131
132 cmp = f(left, right);
133 if (cmp)
134 break;
135 }
136
137 return cmp;
138 }
139
140 void hist_entry__free(struct hist_entry *he)
141 {
142 free(he);
143 }
144
145 /*
146 * collapse the histogram
147 */
148
149 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
150 {
151 struct rb_node **p = &root->rb_node;
152 struct rb_node *parent = NULL;
153 struct hist_entry *iter;
154 int64_t cmp;
155
156 while (*p != NULL) {
157 parent = *p;
158 iter = rb_entry(parent, struct hist_entry, rb_node);
159
160 cmp = hist_entry__collapse(iter, he);
161
162 if (!cmp) {
163 iter->count += he->count;
164 hist_entry__free(he);
165 return false;
166 }
167
168 if (cmp < 0)
169 p = &(*p)->rb_left;
170 else
171 p = &(*p)->rb_right;
172 }
173
174 rb_link_node(&he->rb_node, parent, p);
175 rb_insert_color(&he->rb_node, root);
176 return true;
177 }
178
179 void hists__collapse_resort(struct hists *self)
180 {
181 struct rb_root tmp;
182 struct rb_node *next;
183 struct hist_entry *n;
184
185 if (!sort__need_collapse)
186 return;
187
188 tmp = RB_ROOT;
189 next = rb_first(&self->entries);
190 self->nr_entries = 0;
191 self->max_sym_namelen = 0;
192
193 while (next) {
194 n = rb_entry(next, struct hist_entry, rb_node);
195 next = rb_next(&n->rb_node);
196
197 rb_erase(&n->rb_node, &self->entries);
198 if (collapse__insert_entry(&tmp, n))
199 hists__inc_nr_entries(self, n);
200 }
201
202 self->entries = tmp;
203 }
204
205 /*
206 * reverse the map, sort on count.
207 */
208
209 static void __hists__insert_output_entry(struct rb_root *entries,
210 struct hist_entry *he,
211 u64 min_callchain_hits)
212 {
213 struct rb_node **p = &entries->rb_node;
214 struct rb_node *parent = NULL;
215 struct hist_entry *iter;
216
217 if (symbol_conf.use_callchain)
218 callchain_param.sort(&he->sorted_chain, he->callchain,
219 min_callchain_hits, &callchain_param);
220
221 while (*p != NULL) {
222 parent = *p;
223 iter = rb_entry(parent, struct hist_entry, rb_node);
224
225 if (he->count > iter->count)
226 p = &(*p)->rb_left;
227 else
228 p = &(*p)->rb_right;
229 }
230
231 rb_link_node(&he->rb_node, parent, p);
232 rb_insert_color(&he->rb_node, entries);
233 }
234
235 void hists__output_resort(struct hists *self)
236 {
237 struct rb_root tmp;
238 struct rb_node *next;
239 struct hist_entry *n;
240 u64 min_callchain_hits;
241
242 min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
243
244 tmp = RB_ROOT;
245 next = rb_first(&self->entries);
246
247 self->nr_entries = 0;
248 self->max_sym_namelen = 0;
249
250 while (next) {
251 n = rb_entry(next, struct hist_entry, rb_node);
252 next = rb_next(&n->rb_node);
253
254 rb_erase(&n->rb_node, &self->entries);
255 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
256 hists__inc_nr_entries(self, n);
257 }
258
259 self->entries = tmp;
260 }
261
262 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
263 {
264 int i;
265 int ret = fprintf(fp, " ");
266
267 for (i = 0; i < left_margin; i++)
268 ret += fprintf(fp, " ");
269
270 return ret;
271 }
272
273 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
274 int left_margin)
275 {
276 int i;
277 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
278
279 for (i = 0; i < depth; i++)
280 if (depth_mask & (1 << i))
281 ret += fprintf(fp, "| ");
282 else
283 ret += fprintf(fp, " ");
284
285 ret += fprintf(fp, "\n");
286
287 return ret;
288 }
289
290 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
291 int depth, int depth_mask, int count,
292 u64 total_samples, int hits,
293 int left_margin)
294 {
295 int i;
296 size_t ret = 0;
297
298 ret += callchain__fprintf_left_margin(fp, left_margin);
299 for (i = 0; i < depth; i++) {
300 if (depth_mask & (1 << i))
301 ret += fprintf(fp, "|");
302 else
303 ret += fprintf(fp, " ");
304 if (!count && i == depth - 1) {
305 double percent;
306
307 percent = hits * 100.0 / total_samples;
308 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
309 } else
310 ret += fprintf(fp, "%s", " ");
311 }
312 if (chain->ms.sym)
313 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
314 else
315 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
316
317 return ret;
318 }
319
320 static struct symbol *rem_sq_bracket;
321 static struct callchain_list rem_hits;
322
323 static void init_rem_hits(void)
324 {
325 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
326 if (!rem_sq_bracket) {
327 fprintf(stderr, "Not enough memory to display remaining hits\n");
328 return;
329 }
330
331 strcpy(rem_sq_bracket->name, "[...]");
332 rem_hits.ms.sym = rem_sq_bracket;
333 }
334
335 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
336 u64 total_samples, int depth,
337 int depth_mask, int left_margin)
338 {
339 struct rb_node *node, *next;
340 struct callchain_node *child;
341 struct callchain_list *chain;
342 int new_depth_mask = depth_mask;
343 u64 new_total;
344 u64 remaining;
345 size_t ret = 0;
346 int i;
347 uint entries_printed = 0;
348
349 if (callchain_param.mode == CHAIN_GRAPH_REL)
350 new_total = self->children_hit;
351 else
352 new_total = total_samples;
353
354 remaining = new_total;
355
356 node = rb_first(&self->rb_root);
357 while (node) {
358 u64 cumul;
359
360 child = rb_entry(node, struct callchain_node, rb_node);
361 cumul = cumul_hits(child);
362 remaining -= cumul;
363
364 /*
365 * The depth mask manages the output of pipes that show
366 * the depth. We don't want to keep the pipes of the current
367 * level for the last child of this depth.
368 * Except if we have remaining filtered hits. They will
369 * supersede the last child
370 */
371 next = rb_next(node);
372 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
373 new_depth_mask &= ~(1 << (depth - 1));
374
375 /*
376 * But we keep the older depth mask for the line separator
377 * to keep the level link until we reach the last child
378 */
379 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
380 left_margin);
381 i = 0;
382 list_for_each_entry(chain, &child->val, list) {
383 ret += ipchain__fprintf_graph(fp, chain, depth,
384 new_depth_mask, i++,
385 new_total,
386 cumul,
387 left_margin);
388 }
389 ret += __callchain__fprintf_graph(fp, child, new_total,
390 depth + 1,
391 new_depth_mask | (1 << depth),
392 left_margin);
393 node = next;
394 if (++entries_printed == callchain_param.print_limit)
395 break;
396 }
397
398 if (callchain_param.mode == CHAIN_GRAPH_REL &&
399 remaining && remaining != new_total) {
400
401 if (!rem_sq_bracket)
402 return ret;
403
404 new_depth_mask &= ~(1 << (depth - 1));
405
406 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
407 new_depth_mask, 0, new_total,
408 remaining, left_margin);
409 }
410
411 return ret;
412 }
413
414 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
415 u64 total_samples, int left_margin)
416 {
417 struct callchain_list *chain;
418 bool printed = false;
419 int i = 0;
420 int ret = 0;
421 u32 entries_printed = 0;
422
423 list_for_each_entry(chain, &self->val, list) {
424 if (!i++ && sort__first_dimension == SORT_SYM)
425 continue;
426
427 if (!printed) {
428 ret += callchain__fprintf_left_margin(fp, left_margin);
429 ret += fprintf(fp, "|\n");
430 ret += callchain__fprintf_left_margin(fp, left_margin);
431 ret += fprintf(fp, "---");
432
433 left_margin += 3;
434 printed = true;
435 } else
436 ret += callchain__fprintf_left_margin(fp, left_margin);
437
438 if (chain->ms.sym)
439 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
440 else
441 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
442
443 if (++entries_printed == callchain_param.print_limit)
444 break;
445 }
446
447 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
448
449 return ret;
450 }
451
452 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
453 u64 total_samples)
454 {
455 struct callchain_list *chain;
456 size_t ret = 0;
457
458 if (!self)
459 return 0;
460
461 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
462
463
464 list_for_each_entry(chain, &self->val, list) {
465 if (chain->ip >= PERF_CONTEXT_MAX)
466 continue;
467 if (chain->ms.sym)
468 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
469 else
470 ret += fprintf(fp, " %p\n",
471 (void *)(long)chain->ip);
472 }
473
474 return ret;
475 }
476
477 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
478 u64 total_samples, int left_margin)
479 {
480 struct rb_node *rb_node;
481 struct callchain_node *chain;
482 size_t ret = 0;
483 u32 entries_printed = 0;
484
485 rb_node = rb_first(&self->sorted_chain);
486 while (rb_node) {
487 double percent;
488
489 chain = rb_entry(rb_node, struct callchain_node, rb_node);
490 percent = chain->hit * 100.0 / total_samples;
491 switch (callchain_param.mode) {
492 case CHAIN_FLAT:
493 ret += percent_color_fprintf(fp, " %6.2f%%\n",
494 percent);
495 ret += callchain__fprintf_flat(fp, chain, total_samples);
496 break;
497 case CHAIN_GRAPH_ABS: /* Falldown */
498 case CHAIN_GRAPH_REL:
499 ret += callchain__fprintf_graph(fp, chain, total_samples,
500 left_margin);
501 case CHAIN_NONE:
502 default:
503 break;
504 }
505 ret += fprintf(fp, "\n");
506 if (++entries_printed == callchain_param.print_limit)
507 break;
508 rb_node = rb_next(rb_node);
509 }
510
511 return ret;
512 }
513
514 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
515 struct hists *pair_hists, bool show_displacement,
516 long displacement, bool color, u64 session_total)
517 {
518 struct sort_entry *se;
519 u64 count, total, count_sys, count_us, count_guest_sys, count_guest_us;
520 const char *sep = symbol_conf.field_sep;
521 int ret;
522
523 if (symbol_conf.exclude_other && !self->parent)
524 return 0;
525
526 if (pair_hists) {
527 count = self->pair ? self->pair->count : 0;
528 total = pair_hists->stats.total;
529 count_sys = self->pair ? self->pair->count_sys : 0;
530 count_us = self->pair ? self->pair->count_us : 0;
531 count_guest_sys = self->pair ? self->pair->count_guest_sys : 0;
532 count_guest_us = self->pair ? self->pair->count_guest_us : 0;
533 } else {
534 count = self->count;
535 total = session_total;
536 count_sys = self->count_sys;
537 count_us = self->count_us;
538 count_guest_sys = self->count_guest_sys;
539 count_guest_us = self->count_guest_us;
540 }
541
542 if (total) {
543 if (color)
544 ret = percent_color_snprintf(s, size,
545 sep ? "%.2f" : " %6.2f%%",
546 (count * 100.0) / total);
547 else
548 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
549 (count * 100.0) / total);
550 if (symbol_conf.show_cpu_utilization) {
551 ret += percent_color_snprintf(s + ret, size - ret,
552 sep ? "%.2f" : " %6.2f%%",
553 (count_sys * 100.0) / total);
554 ret += percent_color_snprintf(s + ret, size - ret,
555 sep ? "%.2f" : " %6.2f%%",
556 (count_us * 100.0) / total);
557 if (perf_guest) {
558 ret += percent_color_snprintf(s + ret,
559 size - ret,
560 sep ? "%.2f" : " %6.2f%%",
561 (count_guest_sys * 100.0) /
562 total);
563 ret += percent_color_snprintf(s + ret,
564 size - ret,
565 sep ? "%.2f" : " %6.2f%%",
566 (count_guest_us * 100.0) /
567 total);
568 }
569 }
570 } else
571 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", count);
572
573 if (symbol_conf.show_nr_samples) {
574 if (sep)
575 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, count);
576 else
577 ret += snprintf(s + ret, size - ret, "%11lld", count);
578 }
579
580 if (pair_hists) {
581 char bf[32];
582 double old_percent = 0, new_percent = 0, diff;
583
584 if (total > 0)
585 old_percent = (count * 100.0) / total;
586 if (session_total > 0)
587 new_percent = (self->count * 100.0) / session_total;
588
589 diff = new_percent - old_percent;
590
591 if (fabs(diff) >= 0.01)
592 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
593 else
594 snprintf(bf, sizeof(bf), " ");
595
596 if (sep)
597 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
598 else
599 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
600
601 if (show_displacement) {
602 if (displacement)
603 snprintf(bf, sizeof(bf), "%+4ld", displacement);
604 else
605 snprintf(bf, sizeof(bf), " ");
606
607 if (sep)
608 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
609 else
610 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
611 }
612 }
613
614 list_for_each_entry(se, &hist_entry__sort_list, list) {
615 if (se->elide)
616 continue;
617
618 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
619 ret += se->se_snprintf(self, s + ret, size - ret,
620 se->se_width ? *se->se_width : 0);
621 }
622
623 return ret;
624 }
625
626 int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
627 bool show_displacement, long displacement, FILE *fp,
628 u64 session_total)
629 {
630 char bf[512];
631 hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
632 show_displacement, displacement,
633 true, session_total);
634 return fprintf(fp, "%s\n", bf);
635 }
636
637 static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
638 u64 session_total)
639 {
640 int left_margin = 0;
641
642 if (sort__first_dimension == SORT_COMM) {
643 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
644 typeof(*se), list);
645 left_margin = se->se_width ? *se->se_width : 0;
646 left_margin -= thread__comm_len(self->thread);
647 }
648
649 return hist_entry_callchain__fprintf(fp, self, session_total,
650 left_margin);
651 }
652
653 size_t hists__fprintf(struct hists *self, struct hists *pair,
654 bool show_displacement, FILE *fp)
655 {
656 struct sort_entry *se;
657 struct rb_node *nd;
658 size_t ret = 0;
659 unsigned long position = 1;
660 long displacement = 0;
661 unsigned int width;
662 const char *sep = symbol_conf.field_sep;
663 char *col_width = symbol_conf.col_width_list_str;
664
665 init_rem_hits();
666
667 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
668
669 if (symbol_conf.show_nr_samples) {
670 if (sep)
671 fprintf(fp, "%cSamples", *sep);
672 else
673 fputs(" Samples ", fp);
674 }
675
676 if (symbol_conf.show_cpu_utilization) {
677 if (sep) {
678 ret += fprintf(fp, "%csys", *sep);
679 ret += fprintf(fp, "%cus", *sep);
680 if (perf_guest) {
681 ret += fprintf(fp, "%cguest sys", *sep);
682 ret += fprintf(fp, "%cguest us", *sep);
683 }
684 } else {
685 ret += fprintf(fp, " sys ");
686 ret += fprintf(fp, " us ");
687 if (perf_guest) {
688 ret += fprintf(fp, " guest sys ");
689 ret += fprintf(fp, " guest us ");
690 }
691 }
692 }
693
694 if (pair) {
695 if (sep)
696 ret += fprintf(fp, "%cDelta", *sep);
697 else
698 ret += fprintf(fp, " Delta ");
699
700 if (show_displacement) {
701 if (sep)
702 ret += fprintf(fp, "%cDisplacement", *sep);
703 else
704 ret += fprintf(fp, " Displ");
705 }
706 }
707
708 list_for_each_entry(se, &hist_entry__sort_list, list) {
709 if (se->elide)
710 continue;
711 if (sep) {
712 fprintf(fp, "%c%s", *sep, se->se_header);
713 continue;
714 }
715 width = strlen(se->se_header);
716 if (se->se_width) {
717 if (symbol_conf.col_width_list_str) {
718 if (col_width) {
719 *se->se_width = atoi(col_width);
720 col_width = strchr(col_width, ',');
721 if (col_width)
722 ++col_width;
723 }
724 }
725 width = *se->se_width = max(*se->se_width, width);
726 }
727 fprintf(fp, " %*s", width, se->se_header);
728 }
729 fprintf(fp, "\n");
730
731 if (sep)
732 goto print_entries;
733
734 fprintf(fp, "# ........");
735 if (symbol_conf.show_nr_samples)
736 fprintf(fp, " ..........");
737 if (pair) {
738 fprintf(fp, " ..........");
739 if (show_displacement)
740 fprintf(fp, " .....");
741 }
742 list_for_each_entry(se, &hist_entry__sort_list, list) {
743 unsigned int i;
744
745 if (se->elide)
746 continue;
747
748 fprintf(fp, " ");
749 if (se->se_width)
750 width = *se->se_width;
751 else
752 width = strlen(se->se_header);
753 for (i = 0; i < width; i++)
754 fprintf(fp, ".");
755 }
756
757 fprintf(fp, "\n#\n");
758
759 print_entries:
760 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
761 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
762
763 if (show_displacement) {
764 if (h->pair != NULL)
765 displacement = ((long)h->pair->position -
766 (long)position);
767 else
768 displacement = 0;
769 ++position;
770 }
771 ret += hist_entry__fprintf(h, pair, show_displacement,
772 displacement, fp, self->stats.total);
773
774 if (symbol_conf.use_callchain)
775 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total);
776
777 if (h->ms.map == NULL && verbose > 1) {
778 __map_groups__fprintf_maps(&h->thread->mg,
779 MAP__FUNCTION, verbose, fp);
780 fprintf(fp, "%.10s end\n", graph_dotted_line);
781 }
782 }
783
784 free(rem_sq_bracket);
785
786 return ret;
787 }
788
789 enum hist_filter {
790 HIST_FILTER__DSO,
791 HIST_FILTER__THREAD,
792 };
793
794 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
795 {
796 struct rb_node *nd;
797
798 self->nr_entries = self->stats.total = 0;
799 self->max_sym_namelen = 0;
800
801 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
802 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
803
804 if (symbol_conf.exclude_other && !h->parent)
805 continue;
806
807 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
808 h->filtered |= (1 << HIST_FILTER__DSO);
809 continue;
810 }
811
812 h->filtered &= ~(1 << HIST_FILTER__DSO);
813 if (!h->filtered) {
814 ++self->nr_entries;
815 self->stats.total += h->count;
816 if (h->ms.sym &&
817 self->max_sym_namelen < h->ms.sym->namelen)
818 self->max_sym_namelen = h->ms.sym->namelen;
819 }
820 }
821 }
822
823 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
824 {
825 struct rb_node *nd;
826
827 self->nr_entries = self->stats.total = 0;
828 self->max_sym_namelen = 0;
829
830 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
831 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
832
833 if (thread != NULL && h->thread != thread) {
834 h->filtered |= (1 << HIST_FILTER__THREAD);
835 continue;
836 }
837 h->filtered &= ~(1 << HIST_FILTER__THREAD);
838 if (!h->filtered) {
839 ++self->nr_entries;
840 self->stats.total += h->count;
841 if (h->ms.sym &&
842 self->max_sym_namelen < h->ms.sym->namelen)
843 self->max_sym_namelen = h->ms.sym->namelen;
844 }
845 }
846 }
847
848 static int symbol__alloc_hist(struct symbol *self)
849 {
850 struct sym_priv *priv = symbol__priv(self);
851 const int size = (sizeof(*priv->hist) +
852 (self->end - self->start) * sizeof(u64));
853
854 priv->hist = zalloc(size);
855 return priv->hist == NULL ? -1 : 0;
856 }
857
858 int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
859 {
860 unsigned int sym_size, offset;
861 struct symbol *sym = self->ms.sym;
862 struct sym_priv *priv;
863 struct sym_hist *h;
864
865 if (!sym || !self->ms.map)
866 return 0;
867
868 priv = symbol__priv(sym);
869 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
870 return -ENOMEM;
871
872 sym_size = sym->end - sym->start;
873 offset = ip - sym->start;
874
875 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
876
877 if (offset >= sym_size)
878 return 0;
879
880 h = priv->hist;
881 h->sum++;
882 h->ip[offset]++;
883
884 pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
885 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
886 return 0;
887 }
888
889 static struct objdump_line *objdump_line__new(s64 offset, char *line)
890 {
891 struct objdump_line *self = malloc(sizeof(*self));
892
893 if (self != NULL) {
894 self->offset = offset;
895 self->line = line;
896 }
897
898 return self;
899 }
900
901 void objdump_line__free(struct objdump_line *self)
902 {
903 free(self->line);
904 free(self);
905 }
906
907 static void objdump__add_line(struct list_head *head, struct objdump_line *line)
908 {
909 list_add_tail(&line->node, head);
910 }
911
912 struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
913 struct objdump_line *pos)
914 {
915 list_for_each_entry_continue(pos, head, node)
916 if (pos->offset >= 0)
917 return pos;
918
919 return NULL;
920 }
921
922 static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
923 struct list_head *head)
924 {
925 struct symbol *sym = self->ms.sym;
926 struct objdump_line *objdump_line;
927 char *line = NULL, *tmp, *tmp2, *c;
928 size_t line_len;
929 s64 line_ip, offset = -1;
930
931 if (getline(&line, &line_len, file) < 0)
932 return -1;
933
934 if (!line)
935 return -1;
936
937 while (line_len != 0 && isspace(line[line_len - 1]))
938 line[--line_len] = '\0';
939
940 c = strchr(line, '\n');
941 if (c)
942 *c = 0;
943
944 line_ip = -1;
945
946 /*
947 * Strip leading spaces:
948 */
949 tmp = line;
950 while (*tmp) {
951 if (*tmp != ' ')
952 break;
953 tmp++;
954 }
955
956 if (*tmp) {
957 /*
958 * Parse hexa addresses followed by ':'
959 */
960 line_ip = strtoull(tmp, &tmp2, 16);
961 if (*tmp2 != ':')
962 line_ip = -1;
963 }
964
965 if (line_ip != -1) {
966 u64 start = map__rip_2objdump(self->ms.map, sym->start);
967 offset = line_ip - start;
968 }
969
970 objdump_line = objdump_line__new(offset, line);
971 if (objdump_line == NULL) {
972 free(line);
973 return -1;
974 }
975 objdump__add_line(head, objdump_line);
976
977 return 0;
978 }
979
980 int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
981 {
982 struct symbol *sym = self->ms.sym;
983 struct map *map = self->ms.map;
984 struct dso *dso = map->dso;
985 const char *filename = dso->long_name;
986 char command[PATH_MAX * 2];
987 FILE *file;
988 u64 len;
989
990 if (!filename)
991 return -1;
992
993 if (dso->origin == DSO__ORIG_KERNEL) {
994 if (dso->annotate_warned)
995 return 0;
996 dso->annotate_warned = 1;
997 pr_err("Can't annotate %s: No vmlinux file was found in the "
998 "path:\n", sym->name);
999 vmlinux_path__fprintf(stderr);
1000 return -1;
1001 }
1002
1003 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1004 filename, sym->name, map->unmap_ip(map, sym->start),
1005 map->unmap_ip(map, sym->end));
1006
1007 len = sym->end - sym->start;
1008
1009 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1010 dso, dso->long_name, sym, sym->name);
1011
1012 snprintf(command, sizeof(command),
1013 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
1014 map__rip_2objdump(map, sym->start),
1015 map__rip_2objdump(map, sym->end),
1016 filename, filename);
1017
1018 pr_debug("Executing: %s\n", command);
1019
1020 file = popen(command, "r");
1021 if (!file)
1022 return -1;
1023
1024 while (!feof(file))
1025 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1026 break;
1027
1028 pclose(file);
1029 return 0;
1030 }
1031
1032 void hists__inc_nr_events(struct hists *self, u32 type)
1033 {
1034 ++self->hists.stats.nr_events[0];
1035 ++self->hists.stats.nr_events[type];
1036 }
1037
1038 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1039 {
1040 int i;
1041 size_t ret = 0;
1042
1043 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1044 if (!event__name[i])
1045 continue;
1046 ret += fprintf(fp, "%10s events: %10d\n",
1047 event__name[i], self->stats.nr_events[i]);
1048 }
1049
1050 return ret;
1051 }