]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/hist.c
Merge commit 'v2.6.35' into perf/core
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include <math.h>
7
8 enum hist_filter {
9 HIST_FILTER__DSO,
10 HIST_FILTER__THREAD,
11 HIST_FILTER__PARENT,
12 };
13
14 struct callchain_param callchain_param = {
15 .mode = CHAIN_GRAPH_REL,
16 .min_percent = 0.5
17 };
18
19 u16 hists__col_len(struct hists *self, enum hist_column col)
20 {
21 return self->col_len[col];
22 }
23
24 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
25 {
26 self->col_len[col] = len;
27 }
28
29 bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
30 {
31 if (len > hists__col_len(self, col)) {
32 hists__set_col_len(self, col, len);
33 return true;
34 }
35 return false;
36 }
37
38 static void hists__reset_col_len(struct hists *self)
39 {
40 enum hist_column col;
41
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(self, col, 0);
44 }
45
46 static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
47 {
48 u16 len;
49
50 if (h->ms.sym)
51 hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
52
53 len = thread__comm_len(h->thread);
54 if (hists__new_col_len(self, HISTC_COMM, len))
55 hists__set_col_len(self, HISTC_THREAD, len + 6);
56
57 if (h->ms.map) {
58 len = dso__name_len(h->ms.map->dso);
59 hists__new_col_len(self, HISTC_DSO, len);
60 }
61 }
62
63 static void hist_entry__add_cpumode_period(struct hist_entry *self,
64 unsigned int cpumode, u64 period)
65 {
66 switch (cpumode) {
67 case PERF_RECORD_MISC_KERNEL:
68 self->period_sys += period;
69 break;
70 case PERF_RECORD_MISC_USER:
71 self->period_us += period;
72 break;
73 case PERF_RECORD_MISC_GUEST_KERNEL:
74 self->period_guest_sys += period;
75 break;
76 case PERF_RECORD_MISC_GUEST_USER:
77 self->period_guest_us += period;
78 break;
79 default:
80 break;
81 }
82 }
83
84 /*
85 * histogram, sorted on item, collects periods
86 */
87
88 static struct hist_entry *hist_entry__new(struct hist_entry *template)
89 {
90 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
91 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
92
93 if (self != NULL) {
94 *self = *template;
95 self->nr_events = 1;
96 if (symbol_conf.use_callchain)
97 callchain_init(self->callchain);
98 }
99
100 return self;
101 }
102
103 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
104 {
105 if (!h->filtered) {
106 hists__calc_col_len(self, h);
107 ++self->nr_entries;
108 }
109 }
110
111 static u8 symbol__parent_filter(const struct symbol *parent)
112 {
113 if (symbol_conf.exclude_other && parent == NULL)
114 return 1 << HIST_FILTER__PARENT;
115 return 0;
116 }
117
118 struct hist_entry *__hists__add_entry(struct hists *self,
119 struct addr_location *al,
120 struct symbol *sym_parent, u64 period)
121 {
122 struct rb_node **p = &self->entries.rb_node;
123 struct rb_node *parent = NULL;
124 struct hist_entry *he;
125 struct hist_entry entry = {
126 .thread = al->thread,
127 .ms = {
128 .map = al->map,
129 .sym = al->sym,
130 },
131 .cpu = al->cpu,
132 .ip = al->addr,
133 .level = al->level,
134 .period = period,
135 .parent = sym_parent,
136 .filtered = symbol__parent_filter(sym_parent),
137 };
138 int cmp;
139
140 while (*p != NULL) {
141 parent = *p;
142 he = rb_entry(parent, struct hist_entry, rb_node);
143
144 cmp = hist_entry__cmp(&entry, he);
145
146 if (!cmp) {
147 he->period += period;
148 ++he->nr_events;
149 goto out;
150 }
151
152 if (cmp < 0)
153 p = &(*p)->rb_left;
154 else
155 p = &(*p)->rb_right;
156 }
157
158 he = hist_entry__new(&entry);
159 if (!he)
160 return NULL;
161 rb_link_node(&he->rb_node, parent, p);
162 rb_insert_color(&he->rb_node, &self->entries);
163 hists__inc_nr_entries(self, he);
164 out:
165 hist_entry__add_cpumode_period(he, al->cpumode, period);
166 return he;
167 }
168
169 int64_t
170 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
171 {
172 struct sort_entry *se;
173 int64_t cmp = 0;
174
175 list_for_each_entry(se, &hist_entry__sort_list, list) {
176 cmp = se->se_cmp(left, right);
177 if (cmp)
178 break;
179 }
180
181 return cmp;
182 }
183
184 int64_t
185 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
186 {
187 struct sort_entry *se;
188 int64_t cmp = 0;
189
190 list_for_each_entry(se, &hist_entry__sort_list, list) {
191 int64_t (*f)(struct hist_entry *, struct hist_entry *);
192
193 f = se->se_collapse ?: se->se_cmp;
194
195 cmp = f(left, right);
196 if (cmp)
197 break;
198 }
199
200 return cmp;
201 }
202
203 void hist_entry__free(struct hist_entry *he)
204 {
205 free(he);
206 }
207
208 /*
209 * collapse the histogram
210 */
211
212 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
213 {
214 struct rb_node **p = &root->rb_node;
215 struct rb_node *parent = NULL;
216 struct hist_entry *iter;
217 int64_t cmp;
218
219 while (*p != NULL) {
220 parent = *p;
221 iter = rb_entry(parent, struct hist_entry, rb_node);
222
223 cmp = hist_entry__collapse(iter, he);
224
225 if (!cmp) {
226 iter->period += he->period;
227 hist_entry__free(he);
228 return false;
229 }
230
231 if (cmp < 0)
232 p = &(*p)->rb_left;
233 else
234 p = &(*p)->rb_right;
235 }
236
237 rb_link_node(&he->rb_node, parent, p);
238 rb_insert_color(&he->rb_node, root);
239 return true;
240 }
241
242 void hists__collapse_resort(struct hists *self)
243 {
244 struct rb_root tmp;
245 struct rb_node *next;
246 struct hist_entry *n;
247
248 if (!sort__need_collapse)
249 return;
250
251 tmp = RB_ROOT;
252 next = rb_first(&self->entries);
253 self->nr_entries = 0;
254 hists__reset_col_len(self);
255
256 while (next) {
257 n = rb_entry(next, struct hist_entry, rb_node);
258 next = rb_next(&n->rb_node);
259
260 rb_erase(&n->rb_node, &self->entries);
261 if (collapse__insert_entry(&tmp, n))
262 hists__inc_nr_entries(self, n);
263 }
264
265 self->entries = tmp;
266 }
267
268 /*
269 * reverse the map, sort on period.
270 */
271
272 static void __hists__insert_output_entry(struct rb_root *entries,
273 struct hist_entry *he,
274 u64 min_callchain_hits)
275 {
276 struct rb_node **p = &entries->rb_node;
277 struct rb_node *parent = NULL;
278 struct hist_entry *iter;
279
280 if (symbol_conf.use_callchain)
281 callchain_param.sort(&he->sorted_chain, he->callchain,
282 min_callchain_hits, &callchain_param);
283
284 while (*p != NULL) {
285 parent = *p;
286 iter = rb_entry(parent, struct hist_entry, rb_node);
287
288 if (he->period > iter->period)
289 p = &(*p)->rb_left;
290 else
291 p = &(*p)->rb_right;
292 }
293
294 rb_link_node(&he->rb_node, parent, p);
295 rb_insert_color(&he->rb_node, entries);
296 }
297
298 void hists__output_resort(struct hists *self)
299 {
300 struct rb_root tmp;
301 struct rb_node *next;
302 struct hist_entry *n;
303 u64 min_callchain_hits;
304
305 min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
306
307 tmp = RB_ROOT;
308 next = rb_first(&self->entries);
309
310 self->nr_entries = 0;
311 hists__reset_col_len(self);
312
313 while (next) {
314 n = rb_entry(next, struct hist_entry, rb_node);
315 next = rb_next(&n->rb_node);
316
317 rb_erase(&n->rb_node, &self->entries);
318 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
319 hists__inc_nr_entries(self, n);
320 }
321
322 self->entries = tmp;
323 }
324
325 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
326 {
327 int i;
328 int ret = fprintf(fp, " ");
329
330 for (i = 0; i < left_margin; i++)
331 ret += fprintf(fp, " ");
332
333 return ret;
334 }
335
336 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
337 int left_margin)
338 {
339 int i;
340 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
341
342 for (i = 0; i < depth; i++)
343 if (depth_mask & (1 << i))
344 ret += fprintf(fp, "| ");
345 else
346 ret += fprintf(fp, " ");
347
348 ret += fprintf(fp, "\n");
349
350 return ret;
351 }
352
353 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
354 int depth, int depth_mask, int period,
355 u64 total_samples, int hits,
356 int left_margin)
357 {
358 int i;
359 size_t ret = 0;
360
361 ret += callchain__fprintf_left_margin(fp, left_margin);
362 for (i = 0; i < depth; i++) {
363 if (depth_mask & (1 << i))
364 ret += fprintf(fp, "|");
365 else
366 ret += fprintf(fp, " ");
367 if (!period && i == depth - 1) {
368 double percent;
369
370 percent = hits * 100.0 / total_samples;
371 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
372 } else
373 ret += fprintf(fp, "%s", " ");
374 }
375 if (chain->ms.sym)
376 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
377 else
378 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
379
380 return ret;
381 }
382
383 static struct symbol *rem_sq_bracket;
384 static struct callchain_list rem_hits;
385
386 static void init_rem_hits(void)
387 {
388 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
389 if (!rem_sq_bracket) {
390 fprintf(stderr, "Not enough memory to display remaining hits\n");
391 return;
392 }
393
394 strcpy(rem_sq_bracket->name, "[...]");
395 rem_hits.ms.sym = rem_sq_bracket;
396 }
397
398 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
399 u64 total_samples, int depth,
400 int depth_mask, int left_margin)
401 {
402 struct rb_node *node, *next;
403 struct callchain_node *child;
404 struct callchain_list *chain;
405 int new_depth_mask = depth_mask;
406 u64 new_total;
407 u64 remaining;
408 size_t ret = 0;
409 int i;
410 uint entries_printed = 0;
411
412 if (callchain_param.mode == CHAIN_GRAPH_REL)
413 new_total = self->children_hit;
414 else
415 new_total = total_samples;
416
417 remaining = new_total;
418
419 node = rb_first(&self->rb_root);
420 while (node) {
421 u64 cumul;
422
423 child = rb_entry(node, struct callchain_node, rb_node);
424 cumul = cumul_hits(child);
425 remaining -= cumul;
426
427 /*
428 * The depth mask manages the output of pipes that show
429 * the depth. We don't want to keep the pipes of the current
430 * level for the last child of this depth.
431 * Except if we have remaining filtered hits. They will
432 * supersede the last child
433 */
434 next = rb_next(node);
435 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
436 new_depth_mask &= ~(1 << (depth - 1));
437
438 /*
439 * But we keep the older depth mask for the line separator
440 * to keep the level link until we reach the last child
441 */
442 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
443 left_margin);
444 i = 0;
445 list_for_each_entry(chain, &child->val, list) {
446 ret += ipchain__fprintf_graph(fp, chain, depth,
447 new_depth_mask, i++,
448 new_total,
449 cumul,
450 left_margin);
451 }
452 ret += __callchain__fprintf_graph(fp, child, new_total,
453 depth + 1,
454 new_depth_mask | (1 << depth),
455 left_margin);
456 node = next;
457 if (++entries_printed == callchain_param.print_limit)
458 break;
459 }
460
461 if (callchain_param.mode == CHAIN_GRAPH_REL &&
462 remaining && remaining != new_total) {
463
464 if (!rem_sq_bracket)
465 return ret;
466
467 new_depth_mask &= ~(1 << (depth - 1));
468
469 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
470 new_depth_mask, 0, new_total,
471 remaining, left_margin);
472 }
473
474 return ret;
475 }
476
477 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
478 u64 total_samples, int left_margin)
479 {
480 struct callchain_list *chain;
481 bool printed = false;
482 int i = 0;
483 int ret = 0;
484 u32 entries_printed = 0;
485
486 list_for_each_entry(chain, &self->val, list) {
487 if (!i++ && sort__first_dimension == SORT_SYM)
488 continue;
489
490 if (!printed) {
491 ret += callchain__fprintf_left_margin(fp, left_margin);
492 ret += fprintf(fp, "|\n");
493 ret += callchain__fprintf_left_margin(fp, left_margin);
494 ret += fprintf(fp, "---");
495
496 left_margin += 3;
497 printed = true;
498 } else
499 ret += callchain__fprintf_left_margin(fp, left_margin);
500
501 if (chain->ms.sym)
502 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
503 else
504 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
505
506 if (++entries_printed == callchain_param.print_limit)
507 break;
508 }
509
510 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
511
512 return ret;
513 }
514
515 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
516 u64 total_samples)
517 {
518 struct callchain_list *chain;
519 size_t ret = 0;
520
521 if (!self)
522 return 0;
523
524 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
525
526
527 list_for_each_entry(chain, &self->val, list) {
528 if (chain->ip >= PERF_CONTEXT_MAX)
529 continue;
530 if (chain->ms.sym)
531 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
532 else
533 ret += fprintf(fp, " %p\n",
534 (void *)(long)chain->ip);
535 }
536
537 return ret;
538 }
539
540 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
541 u64 total_samples, int left_margin)
542 {
543 struct rb_node *rb_node;
544 struct callchain_node *chain;
545 size_t ret = 0;
546 u32 entries_printed = 0;
547
548 rb_node = rb_first(&self->sorted_chain);
549 while (rb_node) {
550 double percent;
551
552 chain = rb_entry(rb_node, struct callchain_node, rb_node);
553 percent = chain->hit * 100.0 / total_samples;
554 switch (callchain_param.mode) {
555 case CHAIN_FLAT:
556 ret += percent_color_fprintf(fp, " %6.2f%%\n",
557 percent);
558 ret += callchain__fprintf_flat(fp, chain, total_samples);
559 break;
560 case CHAIN_GRAPH_ABS: /* Falldown */
561 case CHAIN_GRAPH_REL:
562 ret += callchain__fprintf_graph(fp, chain, total_samples,
563 left_margin);
564 case CHAIN_NONE:
565 default:
566 break;
567 }
568 ret += fprintf(fp, "\n");
569 if (++entries_printed == callchain_param.print_limit)
570 break;
571 rb_node = rb_next(rb_node);
572 }
573
574 return ret;
575 }
576
577 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
578 struct hists *hists, struct hists *pair_hists,
579 bool show_displacement, long displacement,
580 bool color, u64 session_total)
581 {
582 struct sort_entry *se;
583 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
584 const char *sep = symbol_conf.field_sep;
585 int ret;
586
587 if (symbol_conf.exclude_other && !self->parent)
588 return 0;
589
590 if (pair_hists) {
591 period = self->pair ? self->pair->period : 0;
592 total = pair_hists->stats.total_period;
593 period_sys = self->pair ? self->pair->period_sys : 0;
594 period_us = self->pair ? self->pair->period_us : 0;
595 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
596 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
597 } else {
598 period = self->period;
599 total = session_total;
600 period_sys = self->period_sys;
601 period_us = self->period_us;
602 period_guest_sys = self->period_guest_sys;
603 period_guest_us = self->period_guest_us;
604 }
605
606 if (total) {
607 if (color)
608 ret = percent_color_snprintf(s, size,
609 sep ? "%.2f" : " %6.2f%%",
610 (period * 100.0) / total);
611 else
612 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
613 (period * 100.0) / total);
614 if (symbol_conf.show_cpu_utilization) {
615 ret += percent_color_snprintf(s + ret, size - ret,
616 sep ? "%.2f" : " %6.2f%%",
617 (period_sys * 100.0) / total);
618 ret += percent_color_snprintf(s + ret, size - ret,
619 sep ? "%.2f" : " %6.2f%%",
620 (period_us * 100.0) / total);
621 if (perf_guest) {
622 ret += percent_color_snprintf(s + ret,
623 size - ret,
624 sep ? "%.2f" : " %6.2f%%",
625 (period_guest_sys * 100.0) /
626 total);
627 ret += percent_color_snprintf(s + ret,
628 size - ret,
629 sep ? "%.2f" : " %6.2f%%",
630 (period_guest_us * 100.0) /
631 total);
632 }
633 }
634 } else
635 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
636
637 if (symbol_conf.show_nr_samples) {
638 if (sep)
639 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
640 else
641 ret += snprintf(s + ret, size - ret, "%11lld", period);
642 }
643
644 if (pair_hists) {
645 char bf[32];
646 double old_percent = 0, new_percent = 0, diff;
647
648 if (total > 0)
649 old_percent = (period * 100.0) / total;
650 if (session_total > 0)
651 new_percent = (self->period * 100.0) / session_total;
652
653 diff = new_percent - old_percent;
654
655 if (fabs(diff) >= 0.01)
656 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
657 else
658 snprintf(bf, sizeof(bf), " ");
659
660 if (sep)
661 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
662 else
663 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
664
665 if (show_displacement) {
666 if (displacement)
667 snprintf(bf, sizeof(bf), "%+4ld", displacement);
668 else
669 snprintf(bf, sizeof(bf), " ");
670
671 if (sep)
672 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
673 else
674 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
675 }
676 }
677
678 list_for_each_entry(se, &hist_entry__sort_list, list) {
679 if (se->elide)
680 continue;
681
682 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
683 ret += se->se_snprintf(self, s + ret, size - ret,
684 hists__col_len(hists, se->se_width_idx));
685 }
686
687 return ret;
688 }
689
690 int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
691 struct hists *pair_hists, bool show_displacement,
692 long displacement, FILE *fp, u64 session_total)
693 {
694 char bf[512];
695 hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
696 show_displacement, displacement,
697 true, session_total);
698 return fprintf(fp, "%s\n", bf);
699 }
700
701 static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
702 struct hists *hists, FILE *fp,
703 u64 session_total)
704 {
705 int left_margin = 0;
706
707 if (sort__first_dimension == SORT_COMM) {
708 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
709 typeof(*se), list);
710 left_margin = hists__col_len(hists, se->se_width_idx);
711 left_margin -= thread__comm_len(self->thread);
712 }
713
714 return hist_entry_callchain__fprintf(fp, self, session_total,
715 left_margin);
716 }
717
718 size_t hists__fprintf(struct hists *self, struct hists *pair,
719 bool show_displacement, FILE *fp)
720 {
721 struct sort_entry *se;
722 struct rb_node *nd;
723 size_t ret = 0;
724 unsigned long position = 1;
725 long displacement = 0;
726 unsigned int width;
727 const char *sep = symbol_conf.field_sep;
728 const char *col_width = symbol_conf.col_width_list_str;
729
730 init_rem_hits();
731
732 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
733
734 if (symbol_conf.show_nr_samples) {
735 if (sep)
736 fprintf(fp, "%cSamples", *sep);
737 else
738 fputs(" Samples ", fp);
739 }
740
741 if (symbol_conf.show_cpu_utilization) {
742 if (sep) {
743 ret += fprintf(fp, "%csys", *sep);
744 ret += fprintf(fp, "%cus", *sep);
745 if (perf_guest) {
746 ret += fprintf(fp, "%cguest sys", *sep);
747 ret += fprintf(fp, "%cguest us", *sep);
748 }
749 } else {
750 ret += fprintf(fp, " sys ");
751 ret += fprintf(fp, " us ");
752 if (perf_guest) {
753 ret += fprintf(fp, " guest sys ");
754 ret += fprintf(fp, " guest us ");
755 }
756 }
757 }
758
759 if (pair) {
760 if (sep)
761 ret += fprintf(fp, "%cDelta", *sep);
762 else
763 ret += fprintf(fp, " Delta ");
764
765 if (show_displacement) {
766 if (sep)
767 ret += fprintf(fp, "%cDisplacement", *sep);
768 else
769 ret += fprintf(fp, " Displ");
770 }
771 }
772
773 list_for_each_entry(se, &hist_entry__sort_list, list) {
774 if (se->elide)
775 continue;
776 if (sep) {
777 fprintf(fp, "%c%s", *sep, se->se_header);
778 continue;
779 }
780 width = strlen(se->se_header);
781 if (symbol_conf.col_width_list_str) {
782 if (col_width) {
783 hists__set_col_len(self, se->se_width_idx,
784 atoi(col_width));
785 col_width = strchr(col_width, ',');
786 if (col_width)
787 ++col_width;
788 }
789 }
790 if (!hists__new_col_len(self, se->se_width_idx, width))
791 width = hists__col_len(self, se->se_width_idx);
792 fprintf(fp, " %*s", width, se->se_header);
793 }
794 fprintf(fp, "\n");
795
796 if (sep)
797 goto print_entries;
798
799 fprintf(fp, "# ........");
800 if (symbol_conf.show_nr_samples)
801 fprintf(fp, " ..........");
802 if (pair) {
803 fprintf(fp, " ..........");
804 if (show_displacement)
805 fprintf(fp, " .....");
806 }
807 list_for_each_entry(se, &hist_entry__sort_list, list) {
808 unsigned int i;
809
810 if (se->elide)
811 continue;
812
813 fprintf(fp, " ");
814 width = hists__col_len(self, se->se_width_idx);
815 if (width == 0)
816 width = strlen(se->se_header);
817 for (i = 0; i < width; i++)
818 fprintf(fp, ".");
819 }
820
821 fprintf(fp, "\n#\n");
822
823 print_entries:
824 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
825 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
826
827 if (show_displacement) {
828 if (h->pair != NULL)
829 displacement = ((long)h->pair->position -
830 (long)position);
831 else
832 displacement = 0;
833 ++position;
834 }
835 ret += hist_entry__fprintf(h, self, pair, show_displacement,
836 displacement, fp, self->stats.total_period);
837
838 if (symbol_conf.use_callchain)
839 ret += hist_entry__fprintf_callchain(h, self, fp,
840 self->stats.total_period);
841 if (h->ms.map == NULL && verbose > 1) {
842 __map_groups__fprintf_maps(&h->thread->mg,
843 MAP__FUNCTION, verbose, fp);
844 fprintf(fp, "%.10s end\n", graph_dotted_line);
845 }
846 }
847
848 free(rem_sq_bracket);
849
850 return ret;
851 }
852
853 /*
854 * See hists__fprintf to match the column widths
855 */
856 unsigned int hists__sort_list_width(struct hists *self)
857 {
858 struct sort_entry *se;
859 int ret = 9; /* total % */
860
861 if (symbol_conf.show_cpu_utilization) {
862 ret += 7; /* count_sys % */
863 ret += 6; /* count_us % */
864 if (perf_guest) {
865 ret += 13; /* count_guest_sys % */
866 ret += 12; /* count_guest_us % */
867 }
868 }
869
870 if (symbol_conf.show_nr_samples)
871 ret += 11;
872
873 list_for_each_entry(se, &hist_entry__sort_list, list)
874 if (!se->elide)
875 ret += 2 + hists__col_len(self, se->se_width_idx);
876
877 return ret;
878 }
879
880 static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
881 enum hist_filter filter)
882 {
883 h->filtered &= ~(1 << filter);
884 if (h->filtered)
885 return;
886
887 ++self->nr_entries;
888 if (h->ms.unfolded)
889 self->nr_entries += h->nr_rows;
890 h->row_offset = 0;
891 self->stats.total_period += h->period;
892 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
893
894 hists__calc_col_len(self, h);
895 }
896
897 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
898 {
899 struct rb_node *nd;
900
901 self->nr_entries = self->stats.total_period = 0;
902 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
903 hists__reset_col_len(self);
904
905 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
906 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
907
908 if (symbol_conf.exclude_other && !h->parent)
909 continue;
910
911 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
912 h->filtered |= (1 << HIST_FILTER__DSO);
913 continue;
914 }
915
916 hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
917 }
918 }
919
920 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
921 {
922 struct rb_node *nd;
923
924 self->nr_entries = self->stats.total_period = 0;
925 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
926 hists__reset_col_len(self);
927
928 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
929 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
930
931 if (thread != NULL && h->thread != thread) {
932 h->filtered |= (1 << HIST_FILTER__THREAD);
933 continue;
934 }
935
936 hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
937 }
938 }
939
940 static int symbol__alloc_hist(struct symbol *self)
941 {
942 struct sym_priv *priv = symbol__priv(self);
943 const int size = (sizeof(*priv->hist) +
944 (self->end - self->start) * sizeof(u64));
945
946 priv->hist = zalloc(size);
947 return priv->hist == NULL ? -1 : 0;
948 }
949
950 int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
951 {
952 unsigned int sym_size, offset;
953 struct symbol *sym = self->ms.sym;
954 struct sym_priv *priv;
955 struct sym_hist *h;
956
957 if (!sym || !self->ms.map)
958 return 0;
959
960 priv = symbol__priv(sym);
961 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
962 return -ENOMEM;
963
964 sym_size = sym->end - sym->start;
965 offset = ip - sym->start;
966
967 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
968
969 if (offset >= sym_size)
970 return 0;
971
972 h = priv->hist;
973 h->sum++;
974 h->ip[offset]++;
975
976 pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
977 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
978 return 0;
979 }
980
981 static struct objdump_line *objdump_line__new(s64 offset, char *line)
982 {
983 struct objdump_line *self = malloc(sizeof(*self));
984
985 if (self != NULL) {
986 self->offset = offset;
987 self->line = line;
988 }
989
990 return self;
991 }
992
993 void objdump_line__free(struct objdump_line *self)
994 {
995 free(self->line);
996 free(self);
997 }
998
999 static void objdump__add_line(struct list_head *head, struct objdump_line *line)
1000 {
1001 list_add_tail(&line->node, head);
1002 }
1003
1004 struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
1005 struct objdump_line *pos)
1006 {
1007 list_for_each_entry_continue(pos, head, node)
1008 if (pos->offset >= 0)
1009 return pos;
1010
1011 return NULL;
1012 }
1013
1014 static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1015 struct list_head *head)
1016 {
1017 struct symbol *sym = self->ms.sym;
1018 struct objdump_line *objdump_line;
1019 char *line = NULL, *tmp, *tmp2, *c;
1020 size_t line_len;
1021 s64 line_ip, offset = -1;
1022
1023 if (getline(&line, &line_len, file) < 0)
1024 return -1;
1025
1026 if (!line)
1027 return -1;
1028
1029 while (line_len != 0 && isspace(line[line_len - 1]))
1030 line[--line_len] = '\0';
1031
1032 c = strchr(line, '\n');
1033 if (c)
1034 *c = 0;
1035
1036 line_ip = -1;
1037
1038 /*
1039 * Strip leading spaces:
1040 */
1041 tmp = line;
1042 while (*tmp) {
1043 if (*tmp != ' ')
1044 break;
1045 tmp++;
1046 }
1047
1048 if (*tmp) {
1049 /*
1050 * Parse hexa addresses followed by ':'
1051 */
1052 line_ip = strtoull(tmp, &tmp2, 16);
1053 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1054 line_ip = -1;
1055 }
1056
1057 if (line_ip != -1) {
1058 u64 start = map__rip_2objdump(self->ms.map, sym->start),
1059 end = map__rip_2objdump(self->ms.map, sym->end);
1060
1061 offset = line_ip - start;
1062 if (offset < 0 || (u64)line_ip > end)
1063 offset = -1;
1064 }
1065
1066 objdump_line = objdump_line__new(offset, line);
1067 if (objdump_line == NULL) {
1068 free(line);
1069 return -1;
1070 }
1071 objdump__add_line(head, objdump_line);
1072
1073 return 0;
1074 }
1075
1076 int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
1077 {
1078 struct symbol *sym = self->ms.sym;
1079 struct map *map = self->ms.map;
1080 struct dso *dso = map->dso;
1081 char *filename = dso__build_id_filename(dso, NULL, 0);
1082 bool free_filename = true;
1083 char command[PATH_MAX * 2];
1084 FILE *file;
1085 int err = 0;
1086 u64 len;
1087
1088 if (filename == NULL) {
1089 if (dso->has_build_id) {
1090 pr_err("Can't annotate %s: not enough memory\n",
1091 sym->name);
1092 return -ENOMEM;
1093 }
1094 goto fallback;
1095 } else if (readlink(filename, command, sizeof(command)) < 0 ||
1096 strstr(command, "[kernel.kallsyms]") ||
1097 access(filename, R_OK)) {
1098 free(filename);
1099 fallback:
1100 /*
1101 * If we don't have build-ids or the build-id file isn't in the
1102 * cache, or is just a kallsyms file, well, lets hope that this
1103 * DSO is the same as when 'perf record' ran.
1104 */
1105 filename = dso->long_name;
1106 free_filename = false;
1107 }
1108
1109 if (dso->origin == DSO__ORIG_KERNEL) {
1110 if (dso->annotate_warned)
1111 goto out_free_filename;
1112 err = -ENOENT;
1113 dso->annotate_warned = 1;
1114 pr_err("Can't annotate %s: No vmlinux file was found in the "
1115 "path\n", sym->name);
1116 goto out_free_filename;
1117 }
1118
1119 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1120 filename, sym->name, map->unmap_ip(map, sym->start),
1121 map->unmap_ip(map, sym->end));
1122
1123 len = sym->end - sym->start;
1124
1125 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1126 dso, dso->long_name, sym, sym->name);
1127
1128 snprintf(command, sizeof(command),
1129 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
1130 map__rip_2objdump(map, sym->start),
1131 map__rip_2objdump(map, sym->end),
1132 filename, filename);
1133
1134 pr_debug("Executing: %s\n", command);
1135
1136 file = popen(command, "r");
1137 if (!file)
1138 goto out_free_filename;
1139
1140 while (!feof(file))
1141 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1142 break;
1143
1144 pclose(file);
1145 out_free_filename:
1146 if (free_filename)
1147 free(filename);
1148 return err;
1149 }
1150
1151 void hists__inc_nr_events(struct hists *self, u32 type)
1152 {
1153 ++self->stats.nr_events[0];
1154 ++self->stats.nr_events[type];
1155 }
1156
1157 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1158 {
1159 int i;
1160 size_t ret = 0;
1161
1162 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1163 if (!event__name[i])
1164 continue;
1165 ret += fprintf(fp, "%10s events: %10d\n",
1166 event__name[i], self->stats.nr_events[i]);
1167 }
1168
1169 return ret;
1170 }