]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/hist.c
Merge branch 'for-linus/bugfixes' of git://xenbits.xensource.com/people/ianc/linux-2.6
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include <math.h>
7
8 struct callchain_param callchain_param = {
9 .mode = CHAIN_GRAPH_REL,
10 .min_percent = 0.5
11 };
12
13 static void hist_entry__add_cpumode_period(struct hist_entry *self,
14 unsigned int cpumode, u64 period)
15 {
16 switch (cpumode) {
17 case PERF_RECORD_MISC_KERNEL:
18 self->period_sys += period;
19 break;
20 case PERF_RECORD_MISC_USER:
21 self->period_us += period;
22 break;
23 case PERF_RECORD_MISC_GUEST_KERNEL:
24 self->period_guest_sys += period;
25 break;
26 case PERF_RECORD_MISC_GUEST_USER:
27 self->period_guest_us += period;
28 break;
29 default:
30 break;
31 }
32 }
33
34 /*
35 * histogram, sorted on item, collects periods
36 */
37
38 static struct hist_entry *hist_entry__new(struct hist_entry *template)
39 {
40 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
41 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
42
43 if (self != NULL) {
44 *self = *template;
45 self->nr_events = 1;
46 if (symbol_conf.use_callchain)
47 callchain_init(self->callchain);
48 }
49
50 return self;
51 }
52
53 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *entry)
54 {
55 if (entry->ms.sym && self->max_sym_namelen < entry->ms.sym->namelen)
56 self->max_sym_namelen = entry->ms.sym->namelen;
57 ++self->nr_entries;
58 }
59
60 struct hist_entry *__hists__add_entry(struct hists *self,
61 struct addr_location *al,
62 struct symbol *sym_parent, u64 period)
63 {
64 struct rb_node **p = &self->entries.rb_node;
65 struct rb_node *parent = NULL;
66 struct hist_entry *he;
67 struct hist_entry entry = {
68 .thread = al->thread,
69 .ms = {
70 .map = al->map,
71 .sym = al->sym,
72 },
73 .ip = al->addr,
74 .level = al->level,
75 .period = period,
76 .parent = sym_parent,
77 };
78 int cmp;
79
80 while (*p != NULL) {
81 parent = *p;
82 he = rb_entry(parent, struct hist_entry, rb_node);
83
84 cmp = hist_entry__cmp(&entry, he);
85
86 if (!cmp) {
87 he->period += period;
88 ++he->nr_events;
89 goto out;
90 }
91
92 if (cmp < 0)
93 p = &(*p)->rb_left;
94 else
95 p = &(*p)->rb_right;
96 }
97
98 he = hist_entry__new(&entry);
99 if (!he)
100 return NULL;
101 rb_link_node(&he->rb_node, parent, p);
102 rb_insert_color(&he->rb_node, &self->entries);
103 hists__inc_nr_entries(self, he);
104 out:
105 hist_entry__add_cpumode_period(he, al->cpumode, period);
106 return he;
107 }
108
109 int64_t
110 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
111 {
112 struct sort_entry *se;
113 int64_t cmp = 0;
114
115 list_for_each_entry(se, &hist_entry__sort_list, list) {
116 cmp = se->se_cmp(left, right);
117 if (cmp)
118 break;
119 }
120
121 return cmp;
122 }
123
124 int64_t
125 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
126 {
127 struct sort_entry *se;
128 int64_t cmp = 0;
129
130 list_for_each_entry(se, &hist_entry__sort_list, list) {
131 int64_t (*f)(struct hist_entry *, struct hist_entry *);
132
133 f = se->se_collapse ?: se->se_cmp;
134
135 cmp = f(left, right);
136 if (cmp)
137 break;
138 }
139
140 return cmp;
141 }
142
143 void hist_entry__free(struct hist_entry *he)
144 {
145 free(he);
146 }
147
148 /*
149 * collapse the histogram
150 */
151
152 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
153 {
154 struct rb_node **p = &root->rb_node;
155 struct rb_node *parent = NULL;
156 struct hist_entry *iter;
157 int64_t cmp;
158
159 while (*p != NULL) {
160 parent = *p;
161 iter = rb_entry(parent, struct hist_entry, rb_node);
162
163 cmp = hist_entry__collapse(iter, he);
164
165 if (!cmp) {
166 iter->period += he->period;
167 hist_entry__free(he);
168 return false;
169 }
170
171 if (cmp < 0)
172 p = &(*p)->rb_left;
173 else
174 p = &(*p)->rb_right;
175 }
176
177 rb_link_node(&he->rb_node, parent, p);
178 rb_insert_color(&he->rb_node, root);
179 return true;
180 }
181
182 void hists__collapse_resort(struct hists *self)
183 {
184 struct rb_root tmp;
185 struct rb_node *next;
186 struct hist_entry *n;
187
188 if (!sort__need_collapse)
189 return;
190
191 tmp = RB_ROOT;
192 next = rb_first(&self->entries);
193 self->nr_entries = 0;
194 self->max_sym_namelen = 0;
195
196 while (next) {
197 n = rb_entry(next, struct hist_entry, rb_node);
198 next = rb_next(&n->rb_node);
199
200 rb_erase(&n->rb_node, &self->entries);
201 if (collapse__insert_entry(&tmp, n))
202 hists__inc_nr_entries(self, n);
203 }
204
205 self->entries = tmp;
206 }
207
208 /*
209 * reverse the map, sort on period.
210 */
211
212 static void __hists__insert_output_entry(struct rb_root *entries,
213 struct hist_entry *he,
214 u64 min_callchain_hits)
215 {
216 struct rb_node **p = &entries->rb_node;
217 struct rb_node *parent = NULL;
218 struct hist_entry *iter;
219
220 if (symbol_conf.use_callchain)
221 callchain_param.sort(&he->sorted_chain, he->callchain,
222 min_callchain_hits, &callchain_param);
223
224 while (*p != NULL) {
225 parent = *p;
226 iter = rb_entry(parent, struct hist_entry, rb_node);
227
228 if (he->period > iter->period)
229 p = &(*p)->rb_left;
230 else
231 p = &(*p)->rb_right;
232 }
233
234 rb_link_node(&he->rb_node, parent, p);
235 rb_insert_color(&he->rb_node, entries);
236 }
237
238 void hists__output_resort(struct hists *self)
239 {
240 struct rb_root tmp;
241 struct rb_node *next;
242 struct hist_entry *n;
243 u64 min_callchain_hits;
244
245 min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
246
247 tmp = RB_ROOT;
248 next = rb_first(&self->entries);
249
250 self->nr_entries = 0;
251 self->max_sym_namelen = 0;
252
253 while (next) {
254 n = rb_entry(next, struct hist_entry, rb_node);
255 next = rb_next(&n->rb_node);
256
257 rb_erase(&n->rb_node, &self->entries);
258 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
259 hists__inc_nr_entries(self, n);
260 }
261
262 self->entries = tmp;
263 }
264
265 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
266 {
267 int i;
268 int ret = fprintf(fp, " ");
269
270 for (i = 0; i < left_margin; i++)
271 ret += fprintf(fp, " ");
272
273 return ret;
274 }
275
276 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
277 int left_margin)
278 {
279 int i;
280 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
281
282 for (i = 0; i < depth; i++)
283 if (depth_mask & (1 << i))
284 ret += fprintf(fp, "| ");
285 else
286 ret += fprintf(fp, " ");
287
288 ret += fprintf(fp, "\n");
289
290 return ret;
291 }
292
293 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
294 int depth, int depth_mask, int period,
295 u64 total_samples, int hits,
296 int left_margin)
297 {
298 int i;
299 size_t ret = 0;
300
301 ret += callchain__fprintf_left_margin(fp, left_margin);
302 for (i = 0; i < depth; i++) {
303 if (depth_mask & (1 << i))
304 ret += fprintf(fp, "|");
305 else
306 ret += fprintf(fp, " ");
307 if (!period && i == depth - 1) {
308 double percent;
309
310 percent = hits * 100.0 / total_samples;
311 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
312 } else
313 ret += fprintf(fp, "%s", " ");
314 }
315 if (chain->ms.sym)
316 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
317 else
318 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
319
320 return ret;
321 }
322
323 static struct symbol *rem_sq_bracket;
324 static struct callchain_list rem_hits;
325
326 static void init_rem_hits(void)
327 {
328 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
329 if (!rem_sq_bracket) {
330 fprintf(stderr, "Not enough memory to display remaining hits\n");
331 return;
332 }
333
334 strcpy(rem_sq_bracket->name, "[...]");
335 rem_hits.ms.sym = rem_sq_bracket;
336 }
337
338 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
339 u64 total_samples, int depth,
340 int depth_mask, int left_margin)
341 {
342 struct rb_node *node, *next;
343 struct callchain_node *child;
344 struct callchain_list *chain;
345 int new_depth_mask = depth_mask;
346 u64 new_total;
347 u64 remaining;
348 size_t ret = 0;
349 int i;
350 uint entries_printed = 0;
351
352 if (callchain_param.mode == CHAIN_GRAPH_REL)
353 new_total = self->children_hit;
354 else
355 new_total = total_samples;
356
357 remaining = new_total;
358
359 node = rb_first(&self->rb_root);
360 while (node) {
361 u64 cumul;
362
363 child = rb_entry(node, struct callchain_node, rb_node);
364 cumul = cumul_hits(child);
365 remaining -= cumul;
366
367 /*
368 * The depth mask manages the output of pipes that show
369 * the depth. We don't want to keep the pipes of the current
370 * level for the last child of this depth.
371 * Except if we have remaining filtered hits. They will
372 * supersede the last child
373 */
374 next = rb_next(node);
375 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
376 new_depth_mask &= ~(1 << (depth - 1));
377
378 /*
379 * But we keep the older depth mask for the line separator
380 * to keep the level link until we reach the last child
381 */
382 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
383 left_margin);
384 i = 0;
385 list_for_each_entry(chain, &child->val, list) {
386 ret += ipchain__fprintf_graph(fp, chain, depth,
387 new_depth_mask, i++,
388 new_total,
389 cumul,
390 left_margin);
391 }
392 ret += __callchain__fprintf_graph(fp, child, new_total,
393 depth + 1,
394 new_depth_mask | (1 << depth),
395 left_margin);
396 node = next;
397 if (++entries_printed == callchain_param.print_limit)
398 break;
399 }
400
401 if (callchain_param.mode == CHAIN_GRAPH_REL &&
402 remaining && remaining != new_total) {
403
404 if (!rem_sq_bracket)
405 return ret;
406
407 new_depth_mask &= ~(1 << (depth - 1));
408
409 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
410 new_depth_mask, 0, new_total,
411 remaining, left_margin);
412 }
413
414 return ret;
415 }
416
417 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
418 u64 total_samples, int left_margin)
419 {
420 struct callchain_list *chain;
421 bool printed = false;
422 int i = 0;
423 int ret = 0;
424 u32 entries_printed = 0;
425
426 list_for_each_entry(chain, &self->val, list) {
427 if (!i++ && sort__first_dimension == SORT_SYM)
428 continue;
429
430 if (!printed) {
431 ret += callchain__fprintf_left_margin(fp, left_margin);
432 ret += fprintf(fp, "|\n");
433 ret += callchain__fprintf_left_margin(fp, left_margin);
434 ret += fprintf(fp, "---");
435
436 left_margin += 3;
437 printed = true;
438 } else
439 ret += callchain__fprintf_left_margin(fp, left_margin);
440
441 if (chain->ms.sym)
442 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
443 else
444 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
445
446 if (++entries_printed == callchain_param.print_limit)
447 break;
448 }
449
450 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
451
452 return ret;
453 }
454
455 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
456 u64 total_samples)
457 {
458 struct callchain_list *chain;
459 size_t ret = 0;
460
461 if (!self)
462 return 0;
463
464 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
465
466
467 list_for_each_entry(chain, &self->val, list) {
468 if (chain->ip >= PERF_CONTEXT_MAX)
469 continue;
470 if (chain->ms.sym)
471 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
472 else
473 ret += fprintf(fp, " %p\n",
474 (void *)(long)chain->ip);
475 }
476
477 return ret;
478 }
479
480 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
481 u64 total_samples, int left_margin)
482 {
483 struct rb_node *rb_node;
484 struct callchain_node *chain;
485 size_t ret = 0;
486 u32 entries_printed = 0;
487
488 rb_node = rb_first(&self->sorted_chain);
489 while (rb_node) {
490 double percent;
491
492 chain = rb_entry(rb_node, struct callchain_node, rb_node);
493 percent = chain->hit * 100.0 / total_samples;
494 switch (callchain_param.mode) {
495 case CHAIN_FLAT:
496 ret += percent_color_fprintf(fp, " %6.2f%%\n",
497 percent);
498 ret += callchain__fprintf_flat(fp, chain, total_samples);
499 break;
500 case CHAIN_GRAPH_ABS: /* Falldown */
501 case CHAIN_GRAPH_REL:
502 ret += callchain__fprintf_graph(fp, chain, total_samples,
503 left_margin);
504 case CHAIN_NONE:
505 default:
506 break;
507 }
508 ret += fprintf(fp, "\n");
509 if (++entries_printed == callchain_param.print_limit)
510 break;
511 rb_node = rb_next(rb_node);
512 }
513
514 return ret;
515 }
516
517 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
518 struct hists *pair_hists, bool show_displacement,
519 long displacement, bool color, u64 session_total)
520 {
521 struct sort_entry *se;
522 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
523 const char *sep = symbol_conf.field_sep;
524 int ret;
525
526 if (symbol_conf.exclude_other && !self->parent)
527 return 0;
528
529 if (pair_hists) {
530 period = self->pair ? self->pair->period : 0;
531 total = pair_hists->stats.total_period;
532 period_sys = self->pair ? self->pair->period_sys : 0;
533 period_us = self->pair ? self->pair->period_us : 0;
534 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
535 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
536 } else {
537 period = self->period;
538 total = session_total;
539 period_sys = self->period_sys;
540 period_us = self->period_us;
541 period_guest_sys = self->period_guest_sys;
542 period_guest_us = self->period_guest_us;
543 }
544
545 if (total) {
546 if (color)
547 ret = percent_color_snprintf(s, size,
548 sep ? "%.2f" : " %6.2f%%",
549 (period * 100.0) / total);
550 else
551 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
552 (period * 100.0) / total);
553 if (symbol_conf.show_cpu_utilization) {
554 ret += percent_color_snprintf(s + ret, size - ret,
555 sep ? "%.2f" : " %6.2f%%",
556 (period_sys * 100.0) / total);
557 ret += percent_color_snprintf(s + ret, size - ret,
558 sep ? "%.2f" : " %6.2f%%",
559 (period_us * 100.0) / total);
560 if (perf_guest) {
561 ret += percent_color_snprintf(s + ret,
562 size - ret,
563 sep ? "%.2f" : " %6.2f%%",
564 (period_guest_sys * 100.0) /
565 total);
566 ret += percent_color_snprintf(s + ret,
567 size - ret,
568 sep ? "%.2f" : " %6.2f%%",
569 (period_guest_us * 100.0) /
570 total);
571 }
572 }
573 } else
574 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
575
576 if (symbol_conf.show_nr_samples) {
577 if (sep)
578 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
579 else
580 ret += snprintf(s + ret, size - ret, "%11lld", period);
581 }
582
583 if (pair_hists) {
584 char bf[32];
585 double old_percent = 0, new_percent = 0, diff;
586
587 if (total > 0)
588 old_percent = (period * 100.0) / total;
589 if (session_total > 0)
590 new_percent = (self->period * 100.0) / session_total;
591
592 diff = new_percent - old_percent;
593
594 if (fabs(diff) >= 0.01)
595 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
596 else
597 snprintf(bf, sizeof(bf), " ");
598
599 if (sep)
600 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
601 else
602 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
603
604 if (show_displacement) {
605 if (displacement)
606 snprintf(bf, sizeof(bf), "%+4ld", displacement);
607 else
608 snprintf(bf, sizeof(bf), " ");
609
610 if (sep)
611 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
612 else
613 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
614 }
615 }
616
617 list_for_each_entry(se, &hist_entry__sort_list, list) {
618 if (se->elide)
619 continue;
620
621 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
622 ret += se->se_snprintf(self, s + ret, size - ret,
623 se->se_width ? *se->se_width : 0);
624 }
625
626 return ret;
627 }
628
629 int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
630 bool show_displacement, long displacement, FILE *fp,
631 u64 session_total)
632 {
633 char bf[512];
634 hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
635 show_displacement, displacement,
636 true, session_total);
637 return fprintf(fp, "%s\n", bf);
638 }
639
640 static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
641 u64 session_total)
642 {
643 int left_margin = 0;
644
645 if (sort__first_dimension == SORT_COMM) {
646 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
647 typeof(*se), list);
648 left_margin = se->se_width ? *se->se_width : 0;
649 left_margin -= thread__comm_len(self->thread);
650 }
651
652 return hist_entry_callchain__fprintf(fp, self, session_total,
653 left_margin);
654 }
655
656 size_t hists__fprintf(struct hists *self, struct hists *pair,
657 bool show_displacement, FILE *fp)
658 {
659 struct sort_entry *se;
660 struct rb_node *nd;
661 size_t ret = 0;
662 unsigned long position = 1;
663 long displacement = 0;
664 unsigned int width;
665 const char *sep = symbol_conf.field_sep;
666 const char *col_width = symbol_conf.col_width_list_str;
667
668 init_rem_hits();
669
670 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
671
672 if (symbol_conf.show_nr_samples) {
673 if (sep)
674 fprintf(fp, "%cSamples", *sep);
675 else
676 fputs(" Samples ", fp);
677 }
678
679 if (symbol_conf.show_cpu_utilization) {
680 if (sep) {
681 ret += fprintf(fp, "%csys", *sep);
682 ret += fprintf(fp, "%cus", *sep);
683 if (perf_guest) {
684 ret += fprintf(fp, "%cguest sys", *sep);
685 ret += fprintf(fp, "%cguest us", *sep);
686 }
687 } else {
688 ret += fprintf(fp, " sys ");
689 ret += fprintf(fp, " us ");
690 if (perf_guest) {
691 ret += fprintf(fp, " guest sys ");
692 ret += fprintf(fp, " guest us ");
693 }
694 }
695 }
696
697 if (pair) {
698 if (sep)
699 ret += fprintf(fp, "%cDelta", *sep);
700 else
701 ret += fprintf(fp, " Delta ");
702
703 if (show_displacement) {
704 if (sep)
705 ret += fprintf(fp, "%cDisplacement", *sep);
706 else
707 ret += fprintf(fp, " Displ");
708 }
709 }
710
711 list_for_each_entry(se, &hist_entry__sort_list, list) {
712 if (se->elide)
713 continue;
714 if (sep) {
715 fprintf(fp, "%c%s", *sep, se->se_header);
716 continue;
717 }
718 width = strlen(se->se_header);
719 if (se->se_width) {
720 if (symbol_conf.col_width_list_str) {
721 if (col_width) {
722 *se->se_width = atoi(col_width);
723 col_width = strchr(col_width, ',');
724 if (col_width)
725 ++col_width;
726 }
727 }
728 width = *se->se_width = max(*se->se_width, width);
729 }
730 fprintf(fp, " %*s", width, se->se_header);
731 }
732 fprintf(fp, "\n");
733
734 if (sep)
735 goto print_entries;
736
737 fprintf(fp, "# ........");
738 if (symbol_conf.show_nr_samples)
739 fprintf(fp, " ..........");
740 if (pair) {
741 fprintf(fp, " ..........");
742 if (show_displacement)
743 fprintf(fp, " .....");
744 }
745 list_for_each_entry(se, &hist_entry__sort_list, list) {
746 unsigned int i;
747
748 if (se->elide)
749 continue;
750
751 fprintf(fp, " ");
752 if (se->se_width)
753 width = *se->se_width;
754 else
755 width = strlen(se->se_header);
756 for (i = 0; i < width; i++)
757 fprintf(fp, ".");
758 }
759
760 fprintf(fp, "\n#\n");
761
762 print_entries:
763 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
764 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
765
766 if (show_displacement) {
767 if (h->pair != NULL)
768 displacement = ((long)h->pair->position -
769 (long)position);
770 else
771 displacement = 0;
772 ++position;
773 }
774 ret += hist_entry__fprintf(h, pair, show_displacement,
775 displacement, fp, self->stats.total_period);
776
777 if (symbol_conf.use_callchain)
778 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period);
779
780 if (h->ms.map == NULL && verbose > 1) {
781 __map_groups__fprintf_maps(&h->thread->mg,
782 MAP__FUNCTION, verbose, fp);
783 fprintf(fp, "%.10s end\n", graph_dotted_line);
784 }
785 }
786
787 free(rem_sq_bracket);
788
789 return ret;
790 }
791
792 enum hist_filter {
793 HIST_FILTER__DSO,
794 HIST_FILTER__THREAD,
795 };
796
797 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
798 {
799 struct rb_node *nd;
800
801 self->nr_entries = self->stats.total_period = 0;
802 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
803 self->max_sym_namelen = 0;
804
805 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
806 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
807
808 if (symbol_conf.exclude_other && !h->parent)
809 continue;
810
811 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
812 h->filtered |= (1 << HIST_FILTER__DSO);
813 continue;
814 }
815
816 h->filtered &= ~(1 << HIST_FILTER__DSO);
817 if (!h->filtered) {
818 ++self->nr_entries;
819 self->stats.total_period += h->period;
820 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
821 if (h->ms.sym &&
822 self->max_sym_namelen < h->ms.sym->namelen)
823 self->max_sym_namelen = h->ms.sym->namelen;
824 }
825 }
826 }
827
828 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
829 {
830 struct rb_node *nd;
831
832 self->nr_entries = self->stats.total_period = 0;
833 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
834 self->max_sym_namelen = 0;
835
836 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
837 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
838
839 if (thread != NULL && h->thread != thread) {
840 h->filtered |= (1 << HIST_FILTER__THREAD);
841 continue;
842 }
843 h->filtered &= ~(1 << HIST_FILTER__THREAD);
844 if (!h->filtered) {
845 ++self->nr_entries;
846 self->stats.total_period += h->period;
847 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
848 if (h->ms.sym &&
849 self->max_sym_namelen < h->ms.sym->namelen)
850 self->max_sym_namelen = h->ms.sym->namelen;
851 }
852 }
853 }
854
855 static int symbol__alloc_hist(struct symbol *self)
856 {
857 struct sym_priv *priv = symbol__priv(self);
858 const int size = (sizeof(*priv->hist) +
859 (self->end - self->start) * sizeof(u64));
860
861 priv->hist = zalloc(size);
862 return priv->hist == NULL ? -1 : 0;
863 }
864
865 int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
866 {
867 unsigned int sym_size, offset;
868 struct symbol *sym = self->ms.sym;
869 struct sym_priv *priv;
870 struct sym_hist *h;
871
872 if (!sym || !self->ms.map)
873 return 0;
874
875 priv = symbol__priv(sym);
876 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
877 return -ENOMEM;
878
879 sym_size = sym->end - sym->start;
880 offset = ip - sym->start;
881
882 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
883
884 if (offset >= sym_size)
885 return 0;
886
887 h = priv->hist;
888 h->sum++;
889 h->ip[offset]++;
890
891 pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
892 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
893 return 0;
894 }
895
896 static struct objdump_line *objdump_line__new(s64 offset, char *line)
897 {
898 struct objdump_line *self = malloc(sizeof(*self));
899
900 if (self != NULL) {
901 self->offset = offset;
902 self->line = line;
903 }
904
905 return self;
906 }
907
908 void objdump_line__free(struct objdump_line *self)
909 {
910 free(self->line);
911 free(self);
912 }
913
914 static void objdump__add_line(struct list_head *head, struct objdump_line *line)
915 {
916 list_add_tail(&line->node, head);
917 }
918
919 struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
920 struct objdump_line *pos)
921 {
922 list_for_each_entry_continue(pos, head, node)
923 if (pos->offset >= 0)
924 return pos;
925
926 return NULL;
927 }
928
929 static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
930 struct list_head *head)
931 {
932 struct symbol *sym = self->ms.sym;
933 struct objdump_line *objdump_line;
934 char *line = NULL, *tmp, *tmp2, *c;
935 size_t line_len;
936 s64 line_ip, offset = -1;
937
938 if (getline(&line, &line_len, file) < 0)
939 return -1;
940
941 if (!line)
942 return -1;
943
944 while (line_len != 0 && isspace(line[line_len - 1]))
945 line[--line_len] = '\0';
946
947 c = strchr(line, '\n');
948 if (c)
949 *c = 0;
950
951 line_ip = -1;
952
953 /*
954 * Strip leading spaces:
955 */
956 tmp = line;
957 while (*tmp) {
958 if (*tmp != ' ')
959 break;
960 tmp++;
961 }
962
963 if (*tmp) {
964 /*
965 * Parse hexa addresses followed by ':'
966 */
967 line_ip = strtoull(tmp, &tmp2, 16);
968 if (*tmp2 != ':' || tmp == tmp2)
969 line_ip = -1;
970 }
971
972 if (line_ip != -1) {
973 u64 start = map__rip_2objdump(self->ms.map, sym->start);
974 offset = line_ip - start;
975 }
976
977 objdump_line = objdump_line__new(offset, line);
978 if (objdump_line == NULL) {
979 free(line);
980 return -1;
981 }
982 objdump__add_line(head, objdump_line);
983
984 return 0;
985 }
986
987 int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
988 {
989 struct symbol *sym = self->ms.sym;
990 struct map *map = self->ms.map;
991 struct dso *dso = map->dso;
992 char *filename = dso__build_id_filename(dso, NULL, 0);
993 bool free_filename = true;
994 char command[PATH_MAX * 2];
995 FILE *file;
996 int err = 0;
997 u64 len;
998
999 if (filename == NULL) {
1000 if (dso->has_build_id) {
1001 pr_err("Can't annotate %s: not enough memory\n",
1002 sym->name);
1003 return -ENOMEM;
1004 }
1005 goto fallback;
1006 } else if (readlink(filename, command, sizeof(command)) < 0 ||
1007 strstr(command, "[kernel.kallsyms]") ||
1008 access(filename, R_OK)) {
1009 free(filename);
1010 fallback:
1011 /*
1012 * If we don't have build-ids or the build-id file isn't in the
1013 * cache, or is just a kallsyms file, well, lets hope that this
1014 * DSO is the same as when 'perf record' ran.
1015 */
1016 filename = dso->long_name;
1017 free_filename = false;
1018 }
1019
1020 if (dso->origin == DSO__ORIG_KERNEL) {
1021 if (dso->annotate_warned)
1022 goto out_free_filename;
1023 err = -ENOENT;
1024 dso->annotate_warned = 1;
1025 pr_err("Can't annotate %s: No vmlinux file was found in the "
1026 "path\n", sym->name);
1027 goto out_free_filename;
1028 }
1029
1030 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1031 filename, sym->name, map->unmap_ip(map, sym->start),
1032 map->unmap_ip(map, sym->end));
1033
1034 len = sym->end - sym->start;
1035
1036 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1037 dso, dso->long_name, sym, sym->name);
1038
1039 snprintf(command, sizeof(command),
1040 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
1041 map__rip_2objdump(map, sym->start),
1042 map__rip_2objdump(map, sym->end),
1043 filename, filename);
1044
1045 pr_debug("Executing: %s\n", command);
1046
1047 file = popen(command, "r");
1048 if (!file)
1049 goto out_free_filename;
1050
1051 while (!feof(file))
1052 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1053 break;
1054
1055 pclose(file);
1056 out_free_filename:
1057 if (free_filename)
1058 free(filename);
1059 return err;
1060 }
1061
1062 void hists__inc_nr_events(struct hists *self, u32 type)
1063 {
1064 ++self->stats.nr_events[0];
1065 ++self->stats.nr_events[type];
1066 }
1067
1068 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1069 {
1070 int i;
1071 size_t ret = 0;
1072
1073 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1074 if (!event__name[i])
1075 continue;
1076 ret += fprintf(fp, "%10s events: %10d\n",
1077 event__name[i], self->stats.nr_events[i]);
1078 }
1079
1080 return ret;
1081 }