]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/hist.c
perf tools: Add 'cgroup_id' sort order keyword
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "map.h"
5 #include "session.h"
6 #include "namespaces.h"
7 #include "sort.h"
8 #include "evlist.h"
9 #include "evsel.h"
10 #include "annotate.h"
11 #include "ui/progress.h"
12 #include <math.h>
13
14 static bool hists__filter_entry_by_dso(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_thread(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_symbol(struct hists *hists,
19 struct hist_entry *he);
20 static bool hists__filter_entry_by_socket(struct hists *hists,
21 struct hist_entry *he);
22
23 u16 hists__col_len(struct hists *hists, enum hist_column col)
24 {
25 return hists->col_len[col];
26 }
27
28 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
29 {
30 hists->col_len[col] = len;
31 }
32
33 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
34 {
35 if (len > hists__col_len(hists, col)) {
36 hists__set_col_len(hists, col, len);
37 return true;
38 }
39 return false;
40 }
41
42 void hists__reset_col_len(struct hists *hists)
43 {
44 enum hist_column col;
45
46 for (col = 0; col < HISTC_NR_COLS; ++col)
47 hists__set_col_len(hists, col, 0);
48 }
49
50 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
51 {
52 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
53
54 if (hists__col_len(hists, dso) < unresolved_col_width &&
55 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
56 !symbol_conf.dso_list)
57 hists__set_col_len(hists, dso, unresolved_col_width);
58 }
59
60 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
61 {
62 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63 int symlen;
64 u16 len;
65
66 /*
67 * +4 accounts for '[x] ' priv level info
68 * +2 accounts for 0x prefix on raw addresses
69 * +3 accounts for ' y ' symtab origin info
70 */
71 if (h->ms.sym) {
72 symlen = h->ms.sym->namelen + 4;
73 if (verbose > 0)
74 symlen += BITS_PER_LONG / 4 + 2 + 3;
75 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
76 } else {
77 symlen = unresolved_col_width + 4 + 2;
78 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
79 hists__set_unres_dso_col_len(hists, HISTC_DSO);
80 }
81
82 len = thread__comm_len(h->thread);
83 if (hists__new_col_len(hists, HISTC_COMM, len))
84 hists__set_col_len(hists, HISTC_THREAD, len + 8);
85
86 if (h->ms.map) {
87 len = dso__name_len(h->ms.map->dso);
88 hists__new_col_len(hists, HISTC_DSO, len);
89 }
90
91 if (h->parent)
92 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
93
94 if (h->branch_info) {
95 if (h->branch_info->from.sym) {
96 symlen = (int)h->branch_info->from.sym->namelen + 4;
97 if (verbose > 0)
98 symlen += BITS_PER_LONG / 4 + 2 + 3;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100
101 symlen = dso__name_len(h->branch_info->from.map->dso);
102 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
103 } else {
104 symlen = unresolved_col_width + 4 + 2;
105 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
106 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
107 }
108
109 if (h->branch_info->to.sym) {
110 symlen = (int)h->branch_info->to.sym->namelen + 4;
111 if (verbose > 0)
112 symlen += BITS_PER_LONG / 4 + 2 + 3;
113 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
114
115 symlen = dso__name_len(h->branch_info->to.map->dso);
116 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
117 } else {
118 symlen = unresolved_col_width + 4 + 2;
119 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
120 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
121 }
122
123 if (h->branch_info->srcline_from)
124 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
125 strlen(h->branch_info->srcline_from));
126 if (h->branch_info->srcline_to)
127 hists__new_col_len(hists, HISTC_SRCLINE_TO,
128 strlen(h->branch_info->srcline_to));
129 }
130
131 if (h->mem_info) {
132 if (h->mem_info->daddr.sym) {
133 symlen = (int)h->mem_info->daddr.sym->namelen + 4
134 + unresolved_col_width + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 symlen);
137 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
138 symlen + 1);
139 } else {
140 symlen = unresolved_col_width + 4 + 2;
141 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
142 symlen);
143 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
144 symlen);
145 }
146
147 if (h->mem_info->iaddr.sym) {
148 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
149 + unresolved_col_width + 2;
150 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
151 symlen);
152 } else {
153 symlen = unresolved_col_width + 4 + 2;
154 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
155 symlen);
156 }
157
158 if (h->mem_info->daddr.map) {
159 symlen = dso__name_len(h->mem_info->daddr.map->dso);
160 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
161 symlen);
162 } else {
163 symlen = unresolved_col_width + 4 + 2;
164 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
165 }
166 } else {
167 symlen = unresolved_col_width + 4 + 2;
168 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
169 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
170 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
171 }
172
173 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
174 hists__new_col_len(hists, HISTC_CPU, 3);
175 hists__new_col_len(hists, HISTC_SOCKET, 6);
176 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
177 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
178 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
179 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
180 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
181 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
182
183 if (h->srcline) {
184 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
185 hists__new_col_len(hists, HISTC_SRCLINE, len);
186 }
187
188 if (h->srcfile)
189 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
190
191 if (h->transaction)
192 hists__new_col_len(hists, HISTC_TRANSACTION,
193 hist_entry__transaction_len());
194
195 if (h->trace_output)
196 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
197 }
198
199 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
200 {
201 struct rb_node *next = rb_first(&hists->entries);
202 struct hist_entry *n;
203 int row = 0;
204
205 hists__reset_col_len(hists);
206
207 while (next && row++ < max_rows) {
208 n = rb_entry(next, struct hist_entry, rb_node);
209 if (!n->filtered)
210 hists__calc_col_len(hists, n);
211 next = rb_next(&n->rb_node);
212 }
213 }
214
215 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
216 unsigned int cpumode, u64 period)
217 {
218 switch (cpumode) {
219 case PERF_RECORD_MISC_KERNEL:
220 he_stat->period_sys += period;
221 break;
222 case PERF_RECORD_MISC_USER:
223 he_stat->period_us += period;
224 break;
225 case PERF_RECORD_MISC_GUEST_KERNEL:
226 he_stat->period_guest_sys += period;
227 break;
228 case PERF_RECORD_MISC_GUEST_USER:
229 he_stat->period_guest_us += period;
230 break;
231 default:
232 break;
233 }
234 }
235
236 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
237 u64 weight)
238 {
239
240 he_stat->period += period;
241 he_stat->weight += weight;
242 he_stat->nr_events += 1;
243 }
244
245 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
246 {
247 dest->period += src->period;
248 dest->period_sys += src->period_sys;
249 dest->period_us += src->period_us;
250 dest->period_guest_sys += src->period_guest_sys;
251 dest->period_guest_us += src->period_guest_us;
252 dest->nr_events += src->nr_events;
253 dest->weight += src->weight;
254 }
255
256 static void he_stat__decay(struct he_stat *he_stat)
257 {
258 he_stat->period = (he_stat->period * 7) / 8;
259 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
260 /* XXX need decay for weight too? */
261 }
262
263 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
264
265 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
266 {
267 u64 prev_period = he->stat.period;
268 u64 diff;
269
270 if (prev_period == 0)
271 return true;
272
273 he_stat__decay(&he->stat);
274 if (symbol_conf.cumulate_callchain)
275 he_stat__decay(he->stat_acc);
276 decay_callchain(he->callchain);
277
278 diff = prev_period - he->stat.period;
279
280 if (!he->depth) {
281 hists->stats.total_period -= diff;
282 if (!he->filtered)
283 hists->stats.total_non_filtered_period -= diff;
284 }
285
286 if (!he->leaf) {
287 struct hist_entry *child;
288 struct rb_node *node = rb_first(&he->hroot_out);
289 while (node) {
290 child = rb_entry(node, struct hist_entry, rb_node);
291 node = rb_next(node);
292
293 if (hists__decay_entry(hists, child))
294 hists__delete_entry(hists, child);
295 }
296 }
297
298 return he->stat.period == 0;
299 }
300
301 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
302 {
303 struct rb_root *root_in;
304 struct rb_root *root_out;
305
306 if (he->parent_he) {
307 root_in = &he->parent_he->hroot_in;
308 root_out = &he->parent_he->hroot_out;
309 } else {
310 if (hists__has(hists, need_collapse))
311 root_in = &hists->entries_collapsed;
312 else
313 root_in = hists->entries_in;
314 root_out = &hists->entries;
315 }
316
317 rb_erase(&he->rb_node_in, root_in);
318 rb_erase(&he->rb_node, root_out);
319
320 --hists->nr_entries;
321 if (!he->filtered)
322 --hists->nr_non_filtered_entries;
323
324 hist_entry__delete(he);
325 }
326
327 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
328 {
329 struct rb_node *next = rb_first(&hists->entries);
330 struct hist_entry *n;
331
332 while (next) {
333 n = rb_entry(next, struct hist_entry, rb_node);
334 next = rb_next(&n->rb_node);
335 if (((zap_user && n->level == '.') ||
336 (zap_kernel && n->level != '.') ||
337 hists__decay_entry(hists, n))) {
338 hists__delete_entry(hists, n);
339 }
340 }
341 }
342
343 void hists__delete_entries(struct hists *hists)
344 {
345 struct rb_node *next = rb_first(&hists->entries);
346 struct hist_entry *n;
347
348 while (next) {
349 n = rb_entry(next, struct hist_entry, rb_node);
350 next = rb_next(&n->rb_node);
351
352 hists__delete_entry(hists, n);
353 }
354 }
355
356 /*
357 * histogram, sorted on item, collects periods
358 */
359
360 static int hist_entry__init(struct hist_entry *he,
361 struct hist_entry *template,
362 bool sample_self)
363 {
364 *he = *template;
365
366 if (symbol_conf.cumulate_callchain) {
367 he->stat_acc = malloc(sizeof(he->stat));
368 if (he->stat_acc == NULL)
369 return -ENOMEM;
370 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
371 if (!sample_self)
372 memset(&he->stat, 0, sizeof(he->stat));
373 }
374
375 map__get(he->ms.map);
376
377 if (he->branch_info) {
378 /*
379 * This branch info is (a part of) allocated from
380 * sample__resolve_bstack() and will be freed after
381 * adding new entries. So we need to save a copy.
382 */
383 he->branch_info = malloc(sizeof(*he->branch_info));
384 if (he->branch_info == NULL) {
385 map__zput(he->ms.map);
386 free(he->stat_acc);
387 return -ENOMEM;
388 }
389
390 memcpy(he->branch_info, template->branch_info,
391 sizeof(*he->branch_info));
392
393 map__get(he->branch_info->from.map);
394 map__get(he->branch_info->to.map);
395 }
396
397 if (he->mem_info) {
398 map__get(he->mem_info->iaddr.map);
399 map__get(he->mem_info->daddr.map);
400 }
401
402 if (symbol_conf.use_callchain)
403 callchain_init(he->callchain);
404
405 if (he->raw_data) {
406 he->raw_data = memdup(he->raw_data, he->raw_size);
407
408 if (he->raw_data == NULL) {
409 map__put(he->ms.map);
410 if (he->branch_info) {
411 map__put(he->branch_info->from.map);
412 map__put(he->branch_info->to.map);
413 free(he->branch_info);
414 }
415 if (he->mem_info) {
416 map__put(he->mem_info->iaddr.map);
417 map__put(he->mem_info->daddr.map);
418 }
419 free(he->stat_acc);
420 return -ENOMEM;
421 }
422 }
423 INIT_LIST_HEAD(&he->pairs.node);
424 thread__get(he->thread);
425 he->hroot_in = RB_ROOT;
426 he->hroot_out = RB_ROOT;
427
428 if (!symbol_conf.report_hierarchy)
429 he->leaf = true;
430
431 return 0;
432 }
433
434 static void *hist_entry__zalloc(size_t size)
435 {
436 return zalloc(size + sizeof(struct hist_entry));
437 }
438
439 static void hist_entry__free(void *ptr)
440 {
441 free(ptr);
442 }
443
444 static struct hist_entry_ops default_ops = {
445 .new = hist_entry__zalloc,
446 .free = hist_entry__free,
447 };
448
449 static struct hist_entry *hist_entry__new(struct hist_entry *template,
450 bool sample_self)
451 {
452 struct hist_entry_ops *ops = template->ops;
453 size_t callchain_size = 0;
454 struct hist_entry *he;
455 int err = 0;
456
457 if (!ops)
458 ops = template->ops = &default_ops;
459
460 if (symbol_conf.use_callchain)
461 callchain_size = sizeof(struct callchain_root);
462
463 he = ops->new(callchain_size);
464 if (he) {
465 err = hist_entry__init(he, template, sample_self);
466 if (err) {
467 ops->free(he);
468 he = NULL;
469 }
470 }
471
472 return he;
473 }
474
475 static u8 symbol__parent_filter(const struct symbol *parent)
476 {
477 if (symbol_conf.exclude_other && parent == NULL)
478 return 1 << HIST_FILTER__PARENT;
479 return 0;
480 }
481
482 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
483 {
484 if (!symbol_conf.use_callchain)
485 return;
486
487 he->hists->callchain_period += period;
488 if (!he->filtered)
489 he->hists->callchain_non_filtered_period += period;
490 }
491
492 static struct hist_entry *hists__findnew_entry(struct hists *hists,
493 struct hist_entry *entry,
494 struct addr_location *al,
495 bool sample_self)
496 {
497 struct rb_node **p;
498 struct rb_node *parent = NULL;
499 struct hist_entry *he;
500 int64_t cmp;
501 u64 period = entry->stat.period;
502 u64 weight = entry->stat.weight;
503
504 p = &hists->entries_in->rb_node;
505
506 while (*p != NULL) {
507 parent = *p;
508 he = rb_entry(parent, struct hist_entry, rb_node_in);
509
510 /*
511 * Make sure that it receives arguments in a same order as
512 * hist_entry__collapse() so that we can use an appropriate
513 * function when searching an entry regardless which sort
514 * keys were used.
515 */
516 cmp = hist_entry__cmp(he, entry);
517
518 if (!cmp) {
519 if (sample_self) {
520 he_stat__add_period(&he->stat, period, weight);
521 hist_entry__add_callchain_period(he, period);
522 }
523 if (symbol_conf.cumulate_callchain)
524 he_stat__add_period(he->stat_acc, period, weight);
525
526 /*
527 * This mem info was allocated from sample__resolve_mem
528 * and will not be used anymore.
529 */
530 zfree(&entry->mem_info);
531
532 /* If the map of an existing hist_entry has
533 * become out-of-date due to an exec() or
534 * similar, update it. Otherwise we will
535 * mis-adjust symbol addresses when computing
536 * the history counter to increment.
537 */
538 if (he->ms.map != entry->ms.map) {
539 map__put(he->ms.map);
540 he->ms.map = map__get(entry->ms.map);
541 }
542 goto out;
543 }
544
545 if (cmp < 0)
546 p = &(*p)->rb_left;
547 else
548 p = &(*p)->rb_right;
549 }
550
551 he = hist_entry__new(entry, sample_self);
552 if (!he)
553 return NULL;
554
555 if (sample_self)
556 hist_entry__add_callchain_period(he, period);
557 hists->nr_entries++;
558
559 rb_link_node(&he->rb_node_in, parent, p);
560 rb_insert_color(&he->rb_node_in, hists->entries_in);
561 out:
562 if (sample_self)
563 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
564 if (symbol_conf.cumulate_callchain)
565 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
566 return he;
567 }
568
569 static struct hist_entry*
570 __hists__add_entry(struct hists *hists,
571 struct addr_location *al,
572 struct symbol *sym_parent,
573 struct branch_info *bi,
574 struct mem_info *mi,
575 struct perf_sample *sample,
576 bool sample_self,
577 struct hist_entry_ops *ops)
578 {
579 struct namespaces *ns = thread__namespaces(al->thread);
580 struct hist_entry entry = {
581 .thread = al->thread,
582 .comm = thread__comm(al->thread),
583 .cgroup_id = {
584 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
585 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
586 },
587 .ms = {
588 .map = al->map,
589 .sym = al->sym,
590 },
591 .socket = al->socket,
592 .cpu = al->cpu,
593 .cpumode = al->cpumode,
594 .ip = al->addr,
595 .level = al->level,
596 .stat = {
597 .nr_events = 1,
598 .period = sample->period,
599 .weight = sample->weight,
600 },
601 .parent = sym_parent,
602 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
603 .hists = hists,
604 .branch_info = bi,
605 .mem_info = mi,
606 .transaction = sample->transaction,
607 .raw_data = sample->raw_data,
608 .raw_size = sample->raw_size,
609 .ops = ops,
610 };
611
612 return hists__findnew_entry(hists, &entry, al, sample_self);
613 }
614
615 struct hist_entry *hists__add_entry(struct hists *hists,
616 struct addr_location *al,
617 struct symbol *sym_parent,
618 struct branch_info *bi,
619 struct mem_info *mi,
620 struct perf_sample *sample,
621 bool sample_self)
622 {
623 return __hists__add_entry(hists, al, sym_parent, bi, mi,
624 sample, sample_self, NULL);
625 }
626
627 struct hist_entry *hists__add_entry_ops(struct hists *hists,
628 struct hist_entry_ops *ops,
629 struct addr_location *al,
630 struct symbol *sym_parent,
631 struct branch_info *bi,
632 struct mem_info *mi,
633 struct perf_sample *sample,
634 bool sample_self)
635 {
636 return __hists__add_entry(hists, al, sym_parent, bi, mi,
637 sample, sample_self, ops);
638 }
639
640 static int
641 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
642 struct addr_location *al __maybe_unused)
643 {
644 return 0;
645 }
646
647 static int
648 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
649 struct addr_location *al __maybe_unused)
650 {
651 return 0;
652 }
653
654 static int
655 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
656 {
657 struct perf_sample *sample = iter->sample;
658 struct mem_info *mi;
659
660 mi = sample__resolve_mem(sample, al);
661 if (mi == NULL)
662 return -ENOMEM;
663
664 iter->priv = mi;
665 return 0;
666 }
667
668 static int
669 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
670 {
671 u64 cost;
672 struct mem_info *mi = iter->priv;
673 struct hists *hists = evsel__hists(iter->evsel);
674 struct perf_sample *sample = iter->sample;
675 struct hist_entry *he;
676
677 if (mi == NULL)
678 return -EINVAL;
679
680 cost = sample->weight;
681 if (!cost)
682 cost = 1;
683
684 /*
685 * must pass period=weight in order to get the correct
686 * sorting from hists__collapse_resort() which is solely
687 * based on periods. We want sorting be done on nr_events * weight
688 * and this is indirectly achieved by passing period=weight here
689 * and the he_stat__add_period() function.
690 */
691 sample->period = cost;
692
693 he = hists__add_entry(hists, al, iter->parent, NULL, mi,
694 sample, true);
695 if (!he)
696 return -ENOMEM;
697
698 iter->he = he;
699 return 0;
700 }
701
702 static int
703 iter_finish_mem_entry(struct hist_entry_iter *iter,
704 struct addr_location *al __maybe_unused)
705 {
706 struct perf_evsel *evsel = iter->evsel;
707 struct hists *hists = evsel__hists(evsel);
708 struct hist_entry *he = iter->he;
709 int err = -EINVAL;
710
711 if (he == NULL)
712 goto out;
713
714 hists__inc_nr_samples(hists, he->filtered);
715
716 err = hist_entry__append_callchain(he, iter->sample);
717
718 out:
719 /*
720 * We don't need to free iter->priv (mem_info) here since the mem info
721 * was either already freed in hists__findnew_entry() or passed to a
722 * new hist entry by hist_entry__new().
723 */
724 iter->priv = NULL;
725
726 iter->he = NULL;
727 return err;
728 }
729
730 static int
731 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
732 {
733 struct branch_info *bi;
734 struct perf_sample *sample = iter->sample;
735
736 bi = sample__resolve_bstack(sample, al);
737 if (!bi)
738 return -ENOMEM;
739
740 iter->curr = 0;
741 iter->total = sample->branch_stack->nr;
742
743 iter->priv = bi;
744 return 0;
745 }
746
747 static int
748 iter_add_single_branch_entry(struct hist_entry_iter *iter,
749 struct addr_location *al __maybe_unused)
750 {
751 /* to avoid calling callback function */
752 iter->he = NULL;
753
754 return 0;
755 }
756
757 static int
758 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
759 {
760 struct branch_info *bi = iter->priv;
761 int i = iter->curr;
762
763 if (bi == NULL)
764 return 0;
765
766 if (iter->curr >= iter->total)
767 return 0;
768
769 al->map = bi[i].to.map;
770 al->sym = bi[i].to.sym;
771 al->addr = bi[i].to.addr;
772 return 1;
773 }
774
775 static int
776 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
777 {
778 struct branch_info *bi;
779 struct perf_evsel *evsel = iter->evsel;
780 struct hists *hists = evsel__hists(evsel);
781 struct perf_sample *sample = iter->sample;
782 struct hist_entry *he = NULL;
783 int i = iter->curr;
784 int err = 0;
785
786 bi = iter->priv;
787
788 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
789 goto out;
790
791 /*
792 * The report shows the percentage of total branches captured
793 * and not events sampled. Thus we use a pseudo period of 1.
794 */
795 sample->period = 1;
796 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
797
798 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
799 sample, true);
800 if (he == NULL)
801 return -ENOMEM;
802
803 hists__inc_nr_samples(hists, he->filtered);
804
805 out:
806 iter->he = he;
807 iter->curr++;
808 return err;
809 }
810
811 static int
812 iter_finish_branch_entry(struct hist_entry_iter *iter,
813 struct addr_location *al __maybe_unused)
814 {
815 zfree(&iter->priv);
816 iter->he = NULL;
817
818 return iter->curr >= iter->total ? 0 : -1;
819 }
820
821 static int
822 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
823 struct addr_location *al __maybe_unused)
824 {
825 return 0;
826 }
827
828 static int
829 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
830 {
831 struct perf_evsel *evsel = iter->evsel;
832 struct perf_sample *sample = iter->sample;
833 struct hist_entry *he;
834
835 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
836 sample, true);
837 if (he == NULL)
838 return -ENOMEM;
839
840 iter->he = he;
841 return 0;
842 }
843
844 static int
845 iter_finish_normal_entry(struct hist_entry_iter *iter,
846 struct addr_location *al __maybe_unused)
847 {
848 struct hist_entry *he = iter->he;
849 struct perf_evsel *evsel = iter->evsel;
850 struct perf_sample *sample = iter->sample;
851
852 if (he == NULL)
853 return 0;
854
855 iter->he = NULL;
856
857 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
858
859 return hist_entry__append_callchain(he, sample);
860 }
861
862 static int
863 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
864 struct addr_location *al __maybe_unused)
865 {
866 struct hist_entry **he_cache;
867
868 callchain_cursor_commit(&callchain_cursor);
869
870 /*
871 * This is for detecting cycles or recursions so that they're
872 * cumulated only one time to prevent entries more than 100%
873 * overhead.
874 */
875 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
876 if (he_cache == NULL)
877 return -ENOMEM;
878
879 iter->priv = he_cache;
880 iter->curr = 0;
881
882 return 0;
883 }
884
885 static int
886 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
887 struct addr_location *al)
888 {
889 struct perf_evsel *evsel = iter->evsel;
890 struct hists *hists = evsel__hists(evsel);
891 struct perf_sample *sample = iter->sample;
892 struct hist_entry **he_cache = iter->priv;
893 struct hist_entry *he;
894 int err = 0;
895
896 he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
897 sample, true);
898 if (he == NULL)
899 return -ENOMEM;
900
901 iter->he = he;
902 he_cache[iter->curr++] = he;
903
904 hist_entry__append_callchain(he, sample);
905
906 /*
907 * We need to re-initialize the cursor since callchain_append()
908 * advanced the cursor to the end.
909 */
910 callchain_cursor_commit(&callchain_cursor);
911
912 hists__inc_nr_samples(hists, he->filtered);
913
914 return err;
915 }
916
917 static int
918 iter_next_cumulative_entry(struct hist_entry_iter *iter,
919 struct addr_location *al)
920 {
921 struct callchain_cursor_node *node;
922
923 node = callchain_cursor_current(&callchain_cursor);
924 if (node == NULL)
925 return 0;
926
927 return fill_callchain_info(al, node, iter->hide_unresolved);
928 }
929
930 static int
931 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
932 struct addr_location *al)
933 {
934 struct perf_evsel *evsel = iter->evsel;
935 struct perf_sample *sample = iter->sample;
936 struct hist_entry **he_cache = iter->priv;
937 struct hist_entry *he;
938 struct hist_entry he_tmp = {
939 .hists = evsel__hists(evsel),
940 .cpu = al->cpu,
941 .thread = al->thread,
942 .comm = thread__comm(al->thread),
943 .ip = al->addr,
944 .ms = {
945 .map = al->map,
946 .sym = al->sym,
947 },
948 .parent = iter->parent,
949 .raw_data = sample->raw_data,
950 .raw_size = sample->raw_size,
951 };
952 int i;
953 struct callchain_cursor cursor;
954
955 callchain_cursor_snapshot(&cursor, &callchain_cursor);
956
957 callchain_cursor_advance(&callchain_cursor);
958
959 /*
960 * Check if there's duplicate entries in the callchain.
961 * It's possible that it has cycles or recursive calls.
962 */
963 for (i = 0; i < iter->curr; i++) {
964 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
965 /* to avoid calling callback function */
966 iter->he = NULL;
967 return 0;
968 }
969 }
970
971 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
972 sample, false);
973 if (he == NULL)
974 return -ENOMEM;
975
976 iter->he = he;
977 he_cache[iter->curr++] = he;
978
979 if (symbol_conf.use_callchain)
980 callchain_append(he->callchain, &cursor, sample->period);
981 return 0;
982 }
983
984 static int
985 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
986 struct addr_location *al __maybe_unused)
987 {
988 zfree(&iter->priv);
989 iter->he = NULL;
990
991 return 0;
992 }
993
994 const struct hist_iter_ops hist_iter_mem = {
995 .prepare_entry = iter_prepare_mem_entry,
996 .add_single_entry = iter_add_single_mem_entry,
997 .next_entry = iter_next_nop_entry,
998 .add_next_entry = iter_add_next_nop_entry,
999 .finish_entry = iter_finish_mem_entry,
1000 };
1001
1002 const struct hist_iter_ops hist_iter_branch = {
1003 .prepare_entry = iter_prepare_branch_entry,
1004 .add_single_entry = iter_add_single_branch_entry,
1005 .next_entry = iter_next_branch_entry,
1006 .add_next_entry = iter_add_next_branch_entry,
1007 .finish_entry = iter_finish_branch_entry,
1008 };
1009
1010 const struct hist_iter_ops hist_iter_normal = {
1011 .prepare_entry = iter_prepare_normal_entry,
1012 .add_single_entry = iter_add_single_normal_entry,
1013 .next_entry = iter_next_nop_entry,
1014 .add_next_entry = iter_add_next_nop_entry,
1015 .finish_entry = iter_finish_normal_entry,
1016 };
1017
1018 const struct hist_iter_ops hist_iter_cumulative = {
1019 .prepare_entry = iter_prepare_cumulative_entry,
1020 .add_single_entry = iter_add_single_cumulative_entry,
1021 .next_entry = iter_next_cumulative_entry,
1022 .add_next_entry = iter_add_next_cumulative_entry,
1023 .finish_entry = iter_finish_cumulative_entry,
1024 };
1025
1026 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1027 int max_stack_depth, void *arg)
1028 {
1029 int err, err2;
1030 struct map *alm = NULL;
1031
1032 if (al && al->map)
1033 alm = map__get(al->map);
1034
1035 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1036 iter->evsel, al, max_stack_depth);
1037 if (err)
1038 return err;
1039
1040 iter->max_stack = max_stack_depth;
1041
1042 err = iter->ops->prepare_entry(iter, al);
1043 if (err)
1044 goto out;
1045
1046 err = iter->ops->add_single_entry(iter, al);
1047 if (err)
1048 goto out;
1049
1050 if (iter->he && iter->add_entry_cb) {
1051 err = iter->add_entry_cb(iter, al, true, arg);
1052 if (err)
1053 goto out;
1054 }
1055
1056 while (iter->ops->next_entry(iter, al)) {
1057 err = iter->ops->add_next_entry(iter, al);
1058 if (err)
1059 break;
1060
1061 if (iter->he && iter->add_entry_cb) {
1062 err = iter->add_entry_cb(iter, al, false, arg);
1063 if (err)
1064 goto out;
1065 }
1066 }
1067
1068 out:
1069 err2 = iter->ops->finish_entry(iter, al);
1070 if (!err)
1071 err = err2;
1072
1073 map__put(alm);
1074
1075 return err;
1076 }
1077
1078 int64_t
1079 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1080 {
1081 struct hists *hists = left->hists;
1082 struct perf_hpp_fmt *fmt;
1083 int64_t cmp = 0;
1084
1085 hists__for_each_sort_list(hists, fmt) {
1086 if (perf_hpp__is_dynamic_entry(fmt) &&
1087 !perf_hpp__defined_dynamic_entry(fmt, hists))
1088 continue;
1089
1090 cmp = fmt->cmp(fmt, left, right);
1091 if (cmp)
1092 break;
1093 }
1094
1095 return cmp;
1096 }
1097
1098 int64_t
1099 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1100 {
1101 struct hists *hists = left->hists;
1102 struct perf_hpp_fmt *fmt;
1103 int64_t cmp = 0;
1104
1105 hists__for_each_sort_list(hists, fmt) {
1106 if (perf_hpp__is_dynamic_entry(fmt) &&
1107 !perf_hpp__defined_dynamic_entry(fmt, hists))
1108 continue;
1109
1110 cmp = fmt->collapse(fmt, left, right);
1111 if (cmp)
1112 break;
1113 }
1114
1115 return cmp;
1116 }
1117
1118 void hist_entry__delete(struct hist_entry *he)
1119 {
1120 struct hist_entry_ops *ops = he->ops;
1121
1122 thread__zput(he->thread);
1123 map__zput(he->ms.map);
1124
1125 if (he->branch_info) {
1126 map__zput(he->branch_info->from.map);
1127 map__zput(he->branch_info->to.map);
1128 free_srcline(he->branch_info->srcline_from);
1129 free_srcline(he->branch_info->srcline_to);
1130 zfree(&he->branch_info);
1131 }
1132
1133 if (he->mem_info) {
1134 map__zput(he->mem_info->iaddr.map);
1135 map__zput(he->mem_info->daddr.map);
1136 zfree(&he->mem_info);
1137 }
1138
1139 zfree(&he->stat_acc);
1140 free_srcline(he->srcline);
1141 if (he->srcfile && he->srcfile[0])
1142 free(he->srcfile);
1143 free_callchain(he->callchain);
1144 free(he->trace_output);
1145 free(he->raw_data);
1146 ops->free(he);
1147 }
1148
1149 /*
1150 * If this is not the last column, then we need to pad it according to the
1151 * pre-calculated max lenght for this column, otherwise don't bother adding
1152 * spaces because that would break viewing this with, for instance, 'less',
1153 * that would show tons of trailing spaces when a long C++ demangled method
1154 * names is sampled.
1155 */
1156 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1157 struct perf_hpp_fmt *fmt, int printed)
1158 {
1159 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1160 const int width = fmt->width(fmt, hpp, he->hists);
1161 if (printed < width) {
1162 advance_hpp(hpp, printed);
1163 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1164 }
1165 }
1166
1167 return printed;
1168 }
1169
1170 /*
1171 * collapse the histogram
1172 */
1173
1174 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1175 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1176 enum hist_filter type);
1177
1178 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1179
1180 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1181 {
1182 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1183 }
1184
1185 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1186 enum hist_filter type,
1187 fmt_chk_fn check)
1188 {
1189 struct perf_hpp_fmt *fmt;
1190 bool type_match = false;
1191 struct hist_entry *parent = he->parent_he;
1192
1193 switch (type) {
1194 case HIST_FILTER__THREAD:
1195 if (symbol_conf.comm_list == NULL &&
1196 symbol_conf.pid_list == NULL &&
1197 symbol_conf.tid_list == NULL)
1198 return;
1199 break;
1200 case HIST_FILTER__DSO:
1201 if (symbol_conf.dso_list == NULL)
1202 return;
1203 break;
1204 case HIST_FILTER__SYMBOL:
1205 if (symbol_conf.sym_list == NULL)
1206 return;
1207 break;
1208 case HIST_FILTER__PARENT:
1209 case HIST_FILTER__GUEST:
1210 case HIST_FILTER__HOST:
1211 case HIST_FILTER__SOCKET:
1212 case HIST_FILTER__C2C:
1213 default:
1214 return;
1215 }
1216
1217 /* if it's filtered by own fmt, it has to have filter bits */
1218 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1219 if (check(fmt)) {
1220 type_match = true;
1221 break;
1222 }
1223 }
1224
1225 if (type_match) {
1226 /*
1227 * If the filter is for current level entry, propagate
1228 * filter marker to parents. The marker bit was
1229 * already set by default so it only needs to clear
1230 * non-filtered entries.
1231 */
1232 if (!(he->filtered & (1 << type))) {
1233 while (parent) {
1234 parent->filtered &= ~(1 << type);
1235 parent = parent->parent_he;
1236 }
1237 }
1238 } else {
1239 /*
1240 * If current entry doesn't have matching formats, set
1241 * filter marker for upper level entries. it will be
1242 * cleared if its lower level entries is not filtered.
1243 *
1244 * For lower-level entries, it inherits parent's
1245 * filter bit so that lower level entries of a
1246 * non-filtered entry won't set the filter marker.
1247 */
1248 if (parent == NULL)
1249 he->filtered |= (1 << type);
1250 else
1251 he->filtered |= (parent->filtered & (1 << type));
1252 }
1253 }
1254
1255 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1256 {
1257 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1258 check_thread_entry);
1259
1260 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1261 perf_hpp__is_dso_entry);
1262
1263 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1264 perf_hpp__is_sym_entry);
1265
1266 hists__apply_filters(he->hists, he);
1267 }
1268
1269 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1270 struct rb_root *root,
1271 struct hist_entry *he,
1272 struct hist_entry *parent_he,
1273 struct perf_hpp_list *hpp_list)
1274 {
1275 struct rb_node **p = &root->rb_node;
1276 struct rb_node *parent = NULL;
1277 struct hist_entry *iter, *new;
1278 struct perf_hpp_fmt *fmt;
1279 int64_t cmp;
1280
1281 while (*p != NULL) {
1282 parent = *p;
1283 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1284
1285 cmp = 0;
1286 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1287 cmp = fmt->collapse(fmt, iter, he);
1288 if (cmp)
1289 break;
1290 }
1291
1292 if (!cmp) {
1293 he_stat__add_stat(&iter->stat, &he->stat);
1294 return iter;
1295 }
1296
1297 if (cmp < 0)
1298 p = &parent->rb_left;
1299 else
1300 p = &parent->rb_right;
1301 }
1302
1303 new = hist_entry__new(he, true);
1304 if (new == NULL)
1305 return NULL;
1306
1307 hists->nr_entries++;
1308
1309 /* save related format list for output */
1310 new->hpp_list = hpp_list;
1311 new->parent_he = parent_he;
1312
1313 hist_entry__apply_hierarchy_filters(new);
1314
1315 /* some fields are now passed to 'new' */
1316 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1317 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1318 he->trace_output = NULL;
1319 else
1320 new->trace_output = NULL;
1321
1322 if (perf_hpp__is_srcline_entry(fmt))
1323 he->srcline = NULL;
1324 else
1325 new->srcline = NULL;
1326
1327 if (perf_hpp__is_srcfile_entry(fmt))
1328 he->srcfile = NULL;
1329 else
1330 new->srcfile = NULL;
1331 }
1332
1333 rb_link_node(&new->rb_node_in, parent, p);
1334 rb_insert_color(&new->rb_node_in, root);
1335 return new;
1336 }
1337
1338 static int hists__hierarchy_insert_entry(struct hists *hists,
1339 struct rb_root *root,
1340 struct hist_entry *he)
1341 {
1342 struct perf_hpp_list_node *node;
1343 struct hist_entry *new_he = NULL;
1344 struct hist_entry *parent = NULL;
1345 int depth = 0;
1346 int ret = 0;
1347
1348 list_for_each_entry(node, &hists->hpp_formats, list) {
1349 /* skip period (overhead) and elided columns */
1350 if (node->level == 0 || node->skip)
1351 continue;
1352
1353 /* insert copy of 'he' for each fmt into the hierarchy */
1354 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1355 if (new_he == NULL) {
1356 ret = -1;
1357 break;
1358 }
1359
1360 root = &new_he->hroot_in;
1361 new_he->depth = depth++;
1362 parent = new_he;
1363 }
1364
1365 if (new_he) {
1366 new_he->leaf = true;
1367
1368 if (symbol_conf.use_callchain) {
1369 callchain_cursor_reset(&callchain_cursor);
1370 if (callchain_merge(&callchain_cursor,
1371 new_he->callchain,
1372 he->callchain) < 0)
1373 ret = -1;
1374 }
1375 }
1376
1377 /* 'he' is no longer used */
1378 hist_entry__delete(he);
1379
1380 /* return 0 (or -1) since it already applied filters */
1381 return ret;
1382 }
1383
1384 static int hists__collapse_insert_entry(struct hists *hists,
1385 struct rb_root *root,
1386 struct hist_entry *he)
1387 {
1388 struct rb_node **p = &root->rb_node;
1389 struct rb_node *parent = NULL;
1390 struct hist_entry *iter;
1391 int64_t cmp;
1392
1393 if (symbol_conf.report_hierarchy)
1394 return hists__hierarchy_insert_entry(hists, root, he);
1395
1396 while (*p != NULL) {
1397 parent = *p;
1398 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1399
1400 cmp = hist_entry__collapse(iter, he);
1401
1402 if (!cmp) {
1403 int ret = 0;
1404
1405 he_stat__add_stat(&iter->stat, &he->stat);
1406 if (symbol_conf.cumulate_callchain)
1407 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1408
1409 if (symbol_conf.use_callchain) {
1410 callchain_cursor_reset(&callchain_cursor);
1411 if (callchain_merge(&callchain_cursor,
1412 iter->callchain,
1413 he->callchain) < 0)
1414 ret = -1;
1415 }
1416 hist_entry__delete(he);
1417 return ret;
1418 }
1419
1420 if (cmp < 0)
1421 p = &(*p)->rb_left;
1422 else
1423 p = &(*p)->rb_right;
1424 }
1425 hists->nr_entries++;
1426
1427 rb_link_node(&he->rb_node_in, parent, p);
1428 rb_insert_color(&he->rb_node_in, root);
1429 return 1;
1430 }
1431
1432 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1433 {
1434 struct rb_root *root;
1435
1436 pthread_mutex_lock(&hists->lock);
1437
1438 root = hists->entries_in;
1439 if (++hists->entries_in > &hists->entries_in_array[1])
1440 hists->entries_in = &hists->entries_in_array[0];
1441
1442 pthread_mutex_unlock(&hists->lock);
1443
1444 return root;
1445 }
1446
1447 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1448 {
1449 hists__filter_entry_by_dso(hists, he);
1450 hists__filter_entry_by_thread(hists, he);
1451 hists__filter_entry_by_symbol(hists, he);
1452 hists__filter_entry_by_socket(hists, he);
1453 }
1454
1455 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1456 {
1457 struct rb_root *root;
1458 struct rb_node *next;
1459 struct hist_entry *n;
1460 int ret;
1461
1462 if (!hists__has(hists, need_collapse))
1463 return 0;
1464
1465 hists->nr_entries = 0;
1466
1467 root = hists__get_rotate_entries_in(hists);
1468
1469 next = rb_first(root);
1470
1471 while (next) {
1472 if (session_done())
1473 break;
1474 n = rb_entry(next, struct hist_entry, rb_node_in);
1475 next = rb_next(&n->rb_node_in);
1476
1477 rb_erase(&n->rb_node_in, root);
1478 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1479 if (ret < 0)
1480 return -1;
1481
1482 if (ret) {
1483 /*
1484 * If it wasn't combined with one of the entries already
1485 * collapsed, we need to apply the filters that may have
1486 * been set by, say, the hist_browser.
1487 */
1488 hists__apply_filters(hists, n);
1489 }
1490 if (prog)
1491 ui_progress__update(prog, 1);
1492 }
1493 return 0;
1494 }
1495
1496 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1497 {
1498 struct hists *hists = a->hists;
1499 struct perf_hpp_fmt *fmt;
1500 int64_t cmp = 0;
1501
1502 hists__for_each_sort_list(hists, fmt) {
1503 if (perf_hpp__should_skip(fmt, a->hists))
1504 continue;
1505
1506 cmp = fmt->sort(fmt, a, b);
1507 if (cmp)
1508 break;
1509 }
1510
1511 return cmp;
1512 }
1513
1514 static void hists__reset_filter_stats(struct hists *hists)
1515 {
1516 hists->nr_non_filtered_entries = 0;
1517 hists->stats.total_non_filtered_period = 0;
1518 }
1519
1520 void hists__reset_stats(struct hists *hists)
1521 {
1522 hists->nr_entries = 0;
1523 hists->stats.total_period = 0;
1524
1525 hists__reset_filter_stats(hists);
1526 }
1527
1528 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1529 {
1530 hists->nr_non_filtered_entries++;
1531 hists->stats.total_non_filtered_period += h->stat.period;
1532 }
1533
1534 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1535 {
1536 if (!h->filtered)
1537 hists__inc_filter_stats(hists, h);
1538
1539 hists->nr_entries++;
1540 hists->stats.total_period += h->stat.period;
1541 }
1542
1543 static void hierarchy_recalc_total_periods(struct hists *hists)
1544 {
1545 struct rb_node *node;
1546 struct hist_entry *he;
1547
1548 node = rb_first(&hists->entries);
1549
1550 hists->stats.total_period = 0;
1551 hists->stats.total_non_filtered_period = 0;
1552
1553 /*
1554 * recalculate total period using top-level entries only
1555 * since lower level entries only see non-filtered entries
1556 * but upper level entries have sum of both entries.
1557 */
1558 while (node) {
1559 he = rb_entry(node, struct hist_entry, rb_node);
1560 node = rb_next(node);
1561
1562 hists->stats.total_period += he->stat.period;
1563 if (!he->filtered)
1564 hists->stats.total_non_filtered_period += he->stat.period;
1565 }
1566 }
1567
1568 static void hierarchy_insert_output_entry(struct rb_root *root,
1569 struct hist_entry *he)
1570 {
1571 struct rb_node **p = &root->rb_node;
1572 struct rb_node *parent = NULL;
1573 struct hist_entry *iter;
1574 struct perf_hpp_fmt *fmt;
1575
1576 while (*p != NULL) {
1577 parent = *p;
1578 iter = rb_entry(parent, struct hist_entry, rb_node);
1579
1580 if (hist_entry__sort(he, iter) > 0)
1581 p = &parent->rb_left;
1582 else
1583 p = &parent->rb_right;
1584 }
1585
1586 rb_link_node(&he->rb_node, parent, p);
1587 rb_insert_color(&he->rb_node, root);
1588
1589 /* update column width of dynamic entry */
1590 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1591 if (perf_hpp__is_dynamic_entry(fmt))
1592 fmt->sort(fmt, he, NULL);
1593 }
1594 }
1595
1596 static void hists__hierarchy_output_resort(struct hists *hists,
1597 struct ui_progress *prog,
1598 struct rb_root *root_in,
1599 struct rb_root *root_out,
1600 u64 min_callchain_hits,
1601 bool use_callchain)
1602 {
1603 struct rb_node *node;
1604 struct hist_entry *he;
1605
1606 *root_out = RB_ROOT;
1607 node = rb_first(root_in);
1608
1609 while (node) {
1610 he = rb_entry(node, struct hist_entry, rb_node_in);
1611 node = rb_next(node);
1612
1613 hierarchy_insert_output_entry(root_out, he);
1614
1615 if (prog)
1616 ui_progress__update(prog, 1);
1617
1618 hists->nr_entries++;
1619 if (!he->filtered) {
1620 hists->nr_non_filtered_entries++;
1621 hists__calc_col_len(hists, he);
1622 }
1623
1624 if (!he->leaf) {
1625 hists__hierarchy_output_resort(hists, prog,
1626 &he->hroot_in,
1627 &he->hroot_out,
1628 min_callchain_hits,
1629 use_callchain);
1630 continue;
1631 }
1632
1633 if (!use_callchain)
1634 continue;
1635
1636 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1637 u64 total = he->stat.period;
1638
1639 if (symbol_conf.cumulate_callchain)
1640 total = he->stat_acc->period;
1641
1642 min_callchain_hits = total * (callchain_param.min_percent / 100);
1643 }
1644
1645 callchain_param.sort(&he->sorted_chain, he->callchain,
1646 min_callchain_hits, &callchain_param);
1647 }
1648 }
1649
1650 static void __hists__insert_output_entry(struct rb_root *entries,
1651 struct hist_entry *he,
1652 u64 min_callchain_hits,
1653 bool use_callchain)
1654 {
1655 struct rb_node **p = &entries->rb_node;
1656 struct rb_node *parent = NULL;
1657 struct hist_entry *iter;
1658 struct perf_hpp_fmt *fmt;
1659
1660 if (use_callchain) {
1661 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1662 u64 total = he->stat.period;
1663
1664 if (symbol_conf.cumulate_callchain)
1665 total = he->stat_acc->period;
1666
1667 min_callchain_hits = total * (callchain_param.min_percent / 100);
1668 }
1669 callchain_param.sort(&he->sorted_chain, he->callchain,
1670 min_callchain_hits, &callchain_param);
1671 }
1672
1673 while (*p != NULL) {
1674 parent = *p;
1675 iter = rb_entry(parent, struct hist_entry, rb_node);
1676
1677 if (hist_entry__sort(he, iter) > 0)
1678 p = &(*p)->rb_left;
1679 else
1680 p = &(*p)->rb_right;
1681 }
1682
1683 rb_link_node(&he->rb_node, parent, p);
1684 rb_insert_color(&he->rb_node, entries);
1685
1686 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1687 if (perf_hpp__is_dynamic_entry(fmt) &&
1688 perf_hpp__defined_dynamic_entry(fmt, he->hists))
1689 fmt->sort(fmt, he, NULL); /* update column width */
1690 }
1691 }
1692
1693 static void output_resort(struct hists *hists, struct ui_progress *prog,
1694 bool use_callchain, hists__resort_cb_t cb)
1695 {
1696 struct rb_root *root;
1697 struct rb_node *next;
1698 struct hist_entry *n;
1699 u64 callchain_total;
1700 u64 min_callchain_hits;
1701
1702 callchain_total = hists->callchain_period;
1703 if (symbol_conf.filter_relative)
1704 callchain_total = hists->callchain_non_filtered_period;
1705
1706 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1707
1708 hists__reset_stats(hists);
1709 hists__reset_col_len(hists);
1710
1711 if (symbol_conf.report_hierarchy) {
1712 hists__hierarchy_output_resort(hists, prog,
1713 &hists->entries_collapsed,
1714 &hists->entries,
1715 min_callchain_hits,
1716 use_callchain);
1717 hierarchy_recalc_total_periods(hists);
1718 return;
1719 }
1720
1721 if (hists__has(hists, need_collapse))
1722 root = &hists->entries_collapsed;
1723 else
1724 root = hists->entries_in;
1725
1726 next = rb_first(root);
1727 hists->entries = RB_ROOT;
1728
1729 while (next) {
1730 n = rb_entry(next, struct hist_entry, rb_node_in);
1731 next = rb_next(&n->rb_node_in);
1732
1733 if (cb && cb(n))
1734 continue;
1735
1736 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1737 hists__inc_stats(hists, n);
1738
1739 if (!n->filtered)
1740 hists__calc_col_len(hists, n);
1741
1742 if (prog)
1743 ui_progress__update(prog, 1);
1744 }
1745 }
1746
1747 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1748 {
1749 bool use_callchain;
1750
1751 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1752 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1753 else
1754 use_callchain = symbol_conf.use_callchain;
1755
1756 output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
1757 }
1758
1759 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1760 {
1761 output_resort(hists, prog, symbol_conf.use_callchain, NULL);
1762 }
1763
1764 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1765 hists__resort_cb_t cb)
1766 {
1767 output_resort(hists, prog, symbol_conf.use_callchain, cb);
1768 }
1769
1770 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1771 {
1772 if (he->leaf || hmd == HMD_FORCE_SIBLING)
1773 return false;
1774
1775 if (he->unfolded || hmd == HMD_FORCE_CHILD)
1776 return true;
1777
1778 return false;
1779 }
1780
1781 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1782 {
1783 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1784
1785 while (can_goto_child(he, HMD_NORMAL)) {
1786 node = rb_last(&he->hroot_out);
1787 he = rb_entry(node, struct hist_entry, rb_node);
1788 }
1789 return node;
1790 }
1791
1792 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1793 {
1794 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1795
1796 if (can_goto_child(he, hmd))
1797 node = rb_first(&he->hroot_out);
1798 else
1799 node = rb_next(node);
1800
1801 while (node == NULL) {
1802 he = he->parent_he;
1803 if (he == NULL)
1804 break;
1805
1806 node = rb_next(&he->rb_node);
1807 }
1808 return node;
1809 }
1810
1811 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1812 {
1813 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1814
1815 node = rb_prev(node);
1816 if (node)
1817 return rb_hierarchy_last(node);
1818
1819 he = he->parent_he;
1820 if (he == NULL)
1821 return NULL;
1822
1823 return &he->rb_node;
1824 }
1825
1826 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1827 {
1828 struct rb_node *node;
1829 struct hist_entry *child;
1830 float percent;
1831
1832 if (he->leaf)
1833 return false;
1834
1835 node = rb_first(&he->hroot_out);
1836 child = rb_entry(node, struct hist_entry, rb_node);
1837
1838 while (node && child->filtered) {
1839 node = rb_next(node);
1840 child = rb_entry(node, struct hist_entry, rb_node);
1841 }
1842
1843 if (node)
1844 percent = hist_entry__get_percent_limit(child);
1845 else
1846 percent = 0;
1847
1848 return node && percent >= limit;
1849 }
1850
1851 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1852 enum hist_filter filter)
1853 {
1854 h->filtered &= ~(1 << filter);
1855
1856 if (symbol_conf.report_hierarchy) {
1857 struct hist_entry *parent = h->parent_he;
1858
1859 while (parent) {
1860 he_stat__add_stat(&parent->stat, &h->stat);
1861
1862 parent->filtered &= ~(1 << filter);
1863
1864 if (parent->filtered)
1865 goto next;
1866
1867 /* force fold unfiltered entry for simplicity */
1868 parent->unfolded = false;
1869 parent->has_no_entry = false;
1870 parent->row_offset = 0;
1871 parent->nr_rows = 0;
1872 next:
1873 parent = parent->parent_he;
1874 }
1875 }
1876
1877 if (h->filtered)
1878 return;
1879
1880 /* force fold unfiltered entry for simplicity */
1881 h->unfolded = false;
1882 h->has_no_entry = false;
1883 h->row_offset = 0;
1884 h->nr_rows = 0;
1885
1886 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1887
1888 hists__inc_filter_stats(hists, h);
1889 hists__calc_col_len(hists, h);
1890 }
1891
1892
1893 static bool hists__filter_entry_by_dso(struct hists *hists,
1894 struct hist_entry *he)
1895 {
1896 if (hists->dso_filter != NULL &&
1897 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1898 he->filtered |= (1 << HIST_FILTER__DSO);
1899 return true;
1900 }
1901
1902 return false;
1903 }
1904
1905 static bool hists__filter_entry_by_thread(struct hists *hists,
1906 struct hist_entry *he)
1907 {
1908 if (hists->thread_filter != NULL &&
1909 he->thread != hists->thread_filter) {
1910 he->filtered |= (1 << HIST_FILTER__THREAD);
1911 return true;
1912 }
1913
1914 return false;
1915 }
1916
1917 static bool hists__filter_entry_by_symbol(struct hists *hists,
1918 struct hist_entry *he)
1919 {
1920 if (hists->symbol_filter_str != NULL &&
1921 (!he->ms.sym || strstr(he->ms.sym->name,
1922 hists->symbol_filter_str) == NULL)) {
1923 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1924 return true;
1925 }
1926
1927 return false;
1928 }
1929
1930 static bool hists__filter_entry_by_socket(struct hists *hists,
1931 struct hist_entry *he)
1932 {
1933 if ((hists->socket_filter > -1) &&
1934 (he->socket != hists->socket_filter)) {
1935 he->filtered |= (1 << HIST_FILTER__SOCKET);
1936 return true;
1937 }
1938
1939 return false;
1940 }
1941
1942 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1943
1944 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1945 {
1946 struct rb_node *nd;
1947
1948 hists->stats.nr_non_filtered_samples = 0;
1949
1950 hists__reset_filter_stats(hists);
1951 hists__reset_col_len(hists);
1952
1953 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1954 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1955
1956 if (filter(hists, h))
1957 continue;
1958
1959 hists__remove_entry_filter(hists, h, type);
1960 }
1961 }
1962
1963 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1964 {
1965 struct rb_node **p = &root->rb_node;
1966 struct rb_node *parent = NULL;
1967 struct hist_entry *iter;
1968 struct rb_root new_root = RB_ROOT;
1969 struct rb_node *nd;
1970
1971 while (*p != NULL) {
1972 parent = *p;
1973 iter = rb_entry(parent, struct hist_entry, rb_node);
1974
1975 if (hist_entry__sort(he, iter) > 0)
1976 p = &(*p)->rb_left;
1977 else
1978 p = &(*p)->rb_right;
1979 }
1980
1981 rb_link_node(&he->rb_node, parent, p);
1982 rb_insert_color(&he->rb_node, root);
1983
1984 if (he->leaf || he->filtered)
1985 return;
1986
1987 nd = rb_first(&he->hroot_out);
1988 while (nd) {
1989 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1990
1991 nd = rb_next(nd);
1992 rb_erase(&h->rb_node, &he->hroot_out);
1993
1994 resort_filtered_entry(&new_root, h);
1995 }
1996
1997 he->hroot_out = new_root;
1998 }
1999
2000 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2001 {
2002 struct rb_node *nd;
2003 struct rb_root new_root = RB_ROOT;
2004
2005 hists->stats.nr_non_filtered_samples = 0;
2006
2007 hists__reset_filter_stats(hists);
2008 hists__reset_col_len(hists);
2009
2010 nd = rb_first(&hists->entries);
2011 while (nd) {
2012 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2013 int ret;
2014
2015 ret = hist_entry__filter(h, type, arg);
2016
2017 /*
2018 * case 1. non-matching type
2019 * zero out the period, set filter marker and move to child
2020 */
2021 if (ret < 0) {
2022 memset(&h->stat, 0, sizeof(h->stat));
2023 h->filtered |= (1 << type);
2024
2025 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2026 }
2027 /*
2028 * case 2. matched type (filter out)
2029 * set filter marker and move to next
2030 */
2031 else if (ret == 1) {
2032 h->filtered |= (1 << type);
2033
2034 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2035 }
2036 /*
2037 * case 3. ok (not filtered)
2038 * add period to hists and parents, erase the filter marker
2039 * and move to next sibling
2040 */
2041 else {
2042 hists__remove_entry_filter(hists, h, type);
2043
2044 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2045 }
2046 }
2047
2048 hierarchy_recalc_total_periods(hists);
2049
2050 /*
2051 * resort output after applying a new filter since filter in a lower
2052 * hierarchy can change periods in a upper hierarchy.
2053 */
2054 nd = rb_first(&hists->entries);
2055 while (nd) {
2056 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2057
2058 nd = rb_next(nd);
2059 rb_erase(&h->rb_node, &hists->entries);
2060
2061 resort_filtered_entry(&new_root, h);
2062 }
2063
2064 hists->entries = new_root;
2065 }
2066
2067 void hists__filter_by_thread(struct hists *hists)
2068 {
2069 if (symbol_conf.report_hierarchy)
2070 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2071 hists->thread_filter);
2072 else
2073 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2074 hists__filter_entry_by_thread);
2075 }
2076
2077 void hists__filter_by_dso(struct hists *hists)
2078 {
2079 if (symbol_conf.report_hierarchy)
2080 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2081 hists->dso_filter);
2082 else
2083 hists__filter_by_type(hists, HIST_FILTER__DSO,
2084 hists__filter_entry_by_dso);
2085 }
2086
2087 void hists__filter_by_symbol(struct hists *hists)
2088 {
2089 if (symbol_conf.report_hierarchy)
2090 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2091 hists->symbol_filter_str);
2092 else
2093 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2094 hists__filter_entry_by_symbol);
2095 }
2096
2097 void hists__filter_by_socket(struct hists *hists)
2098 {
2099 if (symbol_conf.report_hierarchy)
2100 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2101 &hists->socket_filter);
2102 else
2103 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2104 hists__filter_entry_by_socket);
2105 }
2106
2107 void events_stats__inc(struct events_stats *stats, u32 type)
2108 {
2109 ++stats->nr_events[0];
2110 ++stats->nr_events[type];
2111 }
2112
2113 void hists__inc_nr_events(struct hists *hists, u32 type)
2114 {
2115 events_stats__inc(&hists->stats, type);
2116 }
2117
2118 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2119 {
2120 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2121 if (!filtered)
2122 hists->stats.nr_non_filtered_samples++;
2123 }
2124
2125 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2126 struct hist_entry *pair)
2127 {
2128 struct rb_root *root;
2129 struct rb_node **p;
2130 struct rb_node *parent = NULL;
2131 struct hist_entry *he;
2132 int64_t cmp;
2133
2134 if (hists__has(hists, need_collapse))
2135 root = &hists->entries_collapsed;
2136 else
2137 root = hists->entries_in;
2138
2139 p = &root->rb_node;
2140
2141 while (*p != NULL) {
2142 parent = *p;
2143 he = rb_entry(parent, struct hist_entry, rb_node_in);
2144
2145 cmp = hist_entry__collapse(he, pair);
2146
2147 if (!cmp)
2148 goto out;
2149
2150 if (cmp < 0)
2151 p = &(*p)->rb_left;
2152 else
2153 p = &(*p)->rb_right;
2154 }
2155
2156 he = hist_entry__new(pair, true);
2157 if (he) {
2158 memset(&he->stat, 0, sizeof(he->stat));
2159 he->hists = hists;
2160 if (symbol_conf.cumulate_callchain)
2161 memset(he->stat_acc, 0, sizeof(he->stat));
2162 rb_link_node(&he->rb_node_in, parent, p);
2163 rb_insert_color(&he->rb_node_in, root);
2164 hists__inc_stats(hists, he);
2165 he->dummy = true;
2166 }
2167 out:
2168 return he;
2169 }
2170
2171 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2172 struct rb_root *root,
2173 struct hist_entry *pair)
2174 {
2175 struct rb_node **p;
2176 struct rb_node *parent = NULL;
2177 struct hist_entry *he;
2178 struct perf_hpp_fmt *fmt;
2179
2180 p = &root->rb_node;
2181 while (*p != NULL) {
2182 int64_t cmp = 0;
2183
2184 parent = *p;
2185 he = rb_entry(parent, struct hist_entry, rb_node_in);
2186
2187 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2188 cmp = fmt->collapse(fmt, he, pair);
2189 if (cmp)
2190 break;
2191 }
2192 if (!cmp)
2193 goto out;
2194
2195 if (cmp < 0)
2196 p = &parent->rb_left;
2197 else
2198 p = &parent->rb_right;
2199 }
2200
2201 he = hist_entry__new(pair, true);
2202 if (he) {
2203 rb_link_node(&he->rb_node_in, parent, p);
2204 rb_insert_color(&he->rb_node_in, root);
2205
2206 he->dummy = true;
2207 he->hists = hists;
2208 memset(&he->stat, 0, sizeof(he->stat));
2209 hists__inc_stats(hists, he);
2210 }
2211 out:
2212 return he;
2213 }
2214
2215 static struct hist_entry *hists__find_entry(struct hists *hists,
2216 struct hist_entry *he)
2217 {
2218 struct rb_node *n;
2219
2220 if (hists__has(hists, need_collapse))
2221 n = hists->entries_collapsed.rb_node;
2222 else
2223 n = hists->entries_in->rb_node;
2224
2225 while (n) {
2226 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2227 int64_t cmp = hist_entry__collapse(iter, he);
2228
2229 if (cmp < 0)
2230 n = n->rb_left;
2231 else if (cmp > 0)
2232 n = n->rb_right;
2233 else
2234 return iter;
2235 }
2236
2237 return NULL;
2238 }
2239
2240 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
2241 struct hist_entry *he)
2242 {
2243 struct rb_node *n = root->rb_node;
2244
2245 while (n) {
2246 struct hist_entry *iter;
2247 struct perf_hpp_fmt *fmt;
2248 int64_t cmp = 0;
2249
2250 iter = rb_entry(n, struct hist_entry, rb_node_in);
2251 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2252 cmp = fmt->collapse(fmt, iter, he);
2253 if (cmp)
2254 break;
2255 }
2256
2257 if (cmp < 0)
2258 n = n->rb_left;
2259 else if (cmp > 0)
2260 n = n->rb_right;
2261 else
2262 return iter;
2263 }
2264
2265 return NULL;
2266 }
2267
2268 static void hists__match_hierarchy(struct rb_root *leader_root,
2269 struct rb_root *other_root)
2270 {
2271 struct rb_node *nd;
2272 struct hist_entry *pos, *pair;
2273
2274 for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
2275 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2276 pair = hists__find_hierarchy_entry(other_root, pos);
2277
2278 if (pair) {
2279 hist_entry__add_pair(pair, pos);
2280 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2281 }
2282 }
2283 }
2284
2285 /*
2286 * Look for pairs to link to the leader buckets (hist_entries):
2287 */
2288 void hists__match(struct hists *leader, struct hists *other)
2289 {
2290 struct rb_root *root;
2291 struct rb_node *nd;
2292 struct hist_entry *pos, *pair;
2293
2294 if (symbol_conf.report_hierarchy) {
2295 /* hierarchy report always collapses entries */
2296 return hists__match_hierarchy(&leader->entries_collapsed,
2297 &other->entries_collapsed);
2298 }
2299
2300 if (hists__has(leader, need_collapse))
2301 root = &leader->entries_collapsed;
2302 else
2303 root = leader->entries_in;
2304
2305 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2306 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2307 pair = hists__find_entry(other, pos);
2308
2309 if (pair)
2310 hist_entry__add_pair(pair, pos);
2311 }
2312 }
2313
2314 static int hists__link_hierarchy(struct hists *leader_hists,
2315 struct hist_entry *parent,
2316 struct rb_root *leader_root,
2317 struct rb_root *other_root)
2318 {
2319 struct rb_node *nd;
2320 struct hist_entry *pos, *leader;
2321
2322 for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
2323 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2324
2325 if (hist_entry__has_pairs(pos)) {
2326 bool found = false;
2327
2328 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2329 if (leader->hists == leader_hists) {
2330 found = true;
2331 break;
2332 }
2333 }
2334 if (!found)
2335 return -1;
2336 } else {
2337 leader = add_dummy_hierarchy_entry(leader_hists,
2338 leader_root, pos);
2339 if (leader == NULL)
2340 return -1;
2341
2342 /* do not point parent in the pos */
2343 leader->parent_he = parent;
2344
2345 hist_entry__add_pair(pos, leader);
2346 }
2347
2348 if (!pos->leaf) {
2349 if (hists__link_hierarchy(leader_hists, leader,
2350 &leader->hroot_in,
2351 &pos->hroot_in) < 0)
2352 return -1;
2353 }
2354 }
2355 return 0;
2356 }
2357
2358 /*
2359 * Look for entries in the other hists that are not present in the leader, if
2360 * we find them, just add a dummy entry on the leader hists, with period=0,
2361 * nr_events=0, to serve as the list header.
2362 */
2363 int hists__link(struct hists *leader, struct hists *other)
2364 {
2365 struct rb_root *root;
2366 struct rb_node *nd;
2367 struct hist_entry *pos, *pair;
2368
2369 if (symbol_conf.report_hierarchy) {
2370 /* hierarchy report always collapses entries */
2371 return hists__link_hierarchy(leader, NULL,
2372 &leader->entries_collapsed,
2373 &other->entries_collapsed);
2374 }
2375
2376 if (hists__has(other, need_collapse))
2377 root = &other->entries_collapsed;
2378 else
2379 root = other->entries_in;
2380
2381 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2382 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2383
2384 if (!hist_entry__has_pairs(pos)) {
2385 pair = hists__add_dummy_entry(leader, pos);
2386 if (pair == NULL)
2387 return -1;
2388 hist_entry__add_pair(pos, pair);
2389 }
2390 }
2391
2392 return 0;
2393 }
2394
2395 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2396 struct perf_sample *sample, bool nonany_branch_mode)
2397 {
2398 struct branch_info *bi;
2399
2400 /* If we have branch cycles always annotate them. */
2401 if (bs && bs->nr && bs->entries[0].flags.cycles) {
2402 int i;
2403
2404 bi = sample__resolve_bstack(sample, al);
2405 if (bi) {
2406 struct addr_map_symbol *prev = NULL;
2407
2408 /*
2409 * Ignore errors, still want to process the
2410 * other entries.
2411 *
2412 * For non standard branch modes always
2413 * force no IPC (prev == NULL)
2414 *
2415 * Note that perf stores branches reversed from
2416 * program order!
2417 */
2418 for (i = bs->nr - 1; i >= 0; i--) {
2419 addr_map_symbol__account_cycles(&bi[i].from,
2420 nonany_branch_mode ? NULL : prev,
2421 bi[i].flags.cycles);
2422 prev = &bi[i].to;
2423 }
2424 free(bi);
2425 }
2426 }
2427 }
2428
2429 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2430 {
2431 struct perf_evsel *pos;
2432 size_t ret = 0;
2433
2434 evlist__for_each_entry(evlist, pos) {
2435 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2436 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2437 }
2438
2439 return ret;
2440 }
2441
2442
2443 u64 hists__total_period(struct hists *hists)
2444 {
2445 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2446 hists->stats.total_period;
2447 }
2448
2449 int parse_filter_percentage(const struct option *opt __maybe_unused,
2450 const char *arg, int unset __maybe_unused)
2451 {
2452 if (!strcmp(arg, "relative"))
2453 symbol_conf.filter_relative = true;
2454 else if (!strcmp(arg, "absolute"))
2455 symbol_conf.filter_relative = false;
2456 else {
2457 pr_debug("Invalud percentage: %s\n", arg);
2458 return -1;
2459 }
2460
2461 return 0;
2462 }
2463
2464 int perf_hist_config(const char *var, const char *value)
2465 {
2466 if (!strcmp(var, "hist.percentage"))
2467 return parse_filter_percentage(NULL, value, 0);
2468
2469 return 0;
2470 }
2471
2472 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2473 {
2474 memset(hists, 0, sizeof(*hists));
2475 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2476 hists->entries_in = &hists->entries_in_array[0];
2477 hists->entries_collapsed = RB_ROOT;
2478 hists->entries = RB_ROOT;
2479 pthread_mutex_init(&hists->lock, NULL);
2480 hists->socket_filter = -1;
2481 hists->hpp_list = hpp_list;
2482 INIT_LIST_HEAD(&hists->hpp_formats);
2483 return 0;
2484 }
2485
2486 static void hists__delete_remaining_entries(struct rb_root *root)
2487 {
2488 struct rb_node *node;
2489 struct hist_entry *he;
2490
2491 while (!RB_EMPTY_ROOT(root)) {
2492 node = rb_first(root);
2493 rb_erase(node, root);
2494
2495 he = rb_entry(node, struct hist_entry, rb_node_in);
2496 hist_entry__delete(he);
2497 }
2498 }
2499
2500 static void hists__delete_all_entries(struct hists *hists)
2501 {
2502 hists__delete_entries(hists);
2503 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2504 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2505 hists__delete_remaining_entries(&hists->entries_collapsed);
2506 }
2507
2508 static void hists_evsel__exit(struct perf_evsel *evsel)
2509 {
2510 struct hists *hists = evsel__hists(evsel);
2511 struct perf_hpp_fmt *fmt, *pos;
2512 struct perf_hpp_list_node *node, *tmp;
2513
2514 hists__delete_all_entries(hists);
2515
2516 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2517 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2518 list_del(&fmt->list);
2519 free(fmt);
2520 }
2521 list_del(&node->list);
2522 free(node);
2523 }
2524 }
2525
2526 static int hists_evsel__init(struct perf_evsel *evsel)
2527 {
2528 struct hists *hists = evsel__hists(evsel);
2529
2530 __hists__init(hists, &perf_hpp_list);
2531 return 0;
2532 }
2533
2534 /*
2535 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2536 * stored in the rbtree...
2537 */
2538
2539 int hists__init(void)
2540 {
2541 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2542 hists_evsel__init,
2543 hists_evsel__exit);
2544 if (err)
2545 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2546
2547 return err;
2548 }
2549
2550 void perf_hpp_list__init(struct perf_hpp_list *list)
2551 {
2552 INIT_LIST_HEAD(&list->fields);
2553 INIT_LIST_HEAD(&list->sorts);
2554 }