]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/hist.c
Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-linus
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "map.h"
5 #include "session.h"
6 #include "namespaces.h"
7 #include "sort.h"
8 #include "evlist.h"
9 #include "evsel.h"
10 #include "annotate.h"
11 #include "srcline.h"
12 #include "thread.h"
13 #include "ui/progress.h"
14 #include <errno.h>
15 #include <math.h>
16 #include <sys/param.h>
17
18 static bool hists__filter_entry_by_dso(struct hists *hists,
19 struct hist_entry *he);
20 static bool hists__filter_entry_by_thread(struct hists *hists,
21 struct hist_entry *he);
22 static bool hists__filter_entry_by_symbol(struct hists *hists,
23 struct hist_entry *he);
24 static bool hists__filter_entry_by_socket(struct hists *hists,
25 struct hist_entry *he);
26
27 u16 hists__col_len(struct hists *hists, enum hist_column col)
28 {
29 return hists->col_len[col];
30 }
31
32 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
33 {
34 hists->col_len[col] = len;
35 }
36
37 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
38 {
39 if (len > hists__col_len(hists, col)) {
40 hists__set_col_len(hists, col, len);
41 return true;
42 }
43 return false;
44 }
45
46 void hists__reset_col_len(struct hists *hists)
47 {
48 enum hist_column col;
49
50 for (col = 0; col < HISTC_NR_COLS; ++col)
51 hists__set_col_len(hists, col, 0);
52 }
53
54 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
55 {
56 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
57
58 if (hists__col_len(hists, dso) < unresolved_col_width &&
59 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
60 !symbol_conf.dso_list)
61 hists__set_col_len(hists, dso, unresolved_col_width);
62 }
63
64 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
65 {
66 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
67 int symlen;
68 u16 len;
69
70 /*
71 * +4 accounts for '[x] ' priv level info
72 * +2 accounts for 0x prefix on raw addresses
73 * +3 accounts for ' y ' symtab origin info
74 */
75 if (h->ms.sym) {
76 symlen = h->ms.sym->namelen + 4;
77 if (verbose > 0)
78 symlen += BITS_PER_LONG / 4 + 2 + 3;
79 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
80 } else {
81 symlen = unresolved_col_width + 4 + 2;
82 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
83 hists__set_unres_dso_col_len(hists, HISTC_DSO);
84 }
85
86 len = thread__comm_len(h->thread);
87 if (hists__new_col_len(hists, HISTC_COMM, len))
88 hists__set_col_len(hists, HISTC_THREAD, len + 8);
89
90 if (h->ms.map) {
91 len = dso__name_len(h->ms.map->dso);
92 hists__new_col_len(hists, HISTC_DSO, len);
93 }
94
95 if (h->parent)
96 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
97
98 if (h->branch_info) {
99 if (h->branch_info->from.sym) {
100 symlen = (int)h->branch_info->from.sym->namelen + 4;
101 if (verbose > 0)
102 symlen += BITS_PER_LONG / 4 + 2 + 3;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104
105 symlen = dso__name_len(h->branch_info->from.map->dso);
106 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
107 } else {
108 symlen = unresolved_col_width + 4 + 2;
109 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
110 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
111 }
112
113 if (h->branch_info->to.sym) {
114 symlen = (int)h->branch_info->to.sym->namelen + 4;
115 if (verbose > 0)
116 symlen += BITS_PER_LONG / 4 + 2 + 3;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118
119 symlen = dso__name_len(h->branch_info->to.map->dso);
120 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
121 } else {
122 symlen = unresolved_col_width + 4 + 2;
123 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
124 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
125 }
126
127 if (h->branch_info->srcline_from)
128 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
129 strlen(h->branch_info->srcline_from));
130 if (h->branch_info->srcline_to)
131 hists__new_col_len(hists, HISTC_SRCLINE_TO,
132 strlen(h->branch_info->srcline_to));
133 }
134
135 if (h->mem_info) {
136 if (h->mem_info->daddr.sym) {
137 symlen = (int)h->mem_info->daddr.sym->namelen + 4
138 + unresolved_col_width + 2;
139 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
140 symlen);
141 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
142 symlen + 1);
143 } else {
144 symlen = unresolved_col_width + 4 + 2;
145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
146 symlen);
147 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
148 symlen);
149 }
150
151 if (h->mem_info->iaddr.sym) {
152 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
153 + unresolved_col_width + 2;
154 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
155 symlen);
156 } else {
157 symlen = unresolved_col_width + 4 + 2;
158 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
159 symlen);
160 }
161
162 if (h->mem_info->daddr.map) {
163 symlen = dso__name_len(h->mem_info->daddr.map->dso);
164 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
165 symlen);
166 } else {
167 symlen = unresolved_col_width + 4 + 2;
168 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
169 }
170 } else {
171 symlen = unresolved_col_width + 4 + 2;
172 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
173 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
174 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
175 }
176
177 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
178 hists__new_col_len(hists, HISTC_CPU, 3);
179 hists__new_col_len(hists, HISTC_SOCKET, 6);
180 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
181 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
182 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
183 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
184 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
185 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
186
187 if (h->srcline) {
188 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
189 hists__new_col_len(hists, HISTC_SRCLINE, len);
190 }
191
192 if (h->srcfile)
193 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
194
195 if (h->transaction)
196 hists__new_col_len(hists, HISTC_TRANSACTION,
197 hist_entry__transaction_len());
198
199 if (h->trace_output)
200 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
201 }
202
203 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
204 {
205 struct rb_node *next = rb_first(&hists->entries);
206 struct hist_entry *n;
207 int row = 0;
208
209 hists__reset_col_len(hists);
210
211 while (next && row++ < max_rows) {
212 n = rb_entry(next, struct hist_entry, rb_node);
213 if (!n->filtered)
214 hists__calc_col_len(hists, n);
215 next = rb_next(&n->rb_node);
216 }
217 }
218
219 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
220 unsigned int cpumode, u64 period)
221 {
222 switch (cpumode) {
223 case PERF_RECORD_MISC_KERNEL:
224 he_stat->period_sys += period;
225 break;
226 case PERF_RECORD_MISC_USER:
227 he_stat->period_us += period;
228 break;
229 case PERF_RECORD_MISC_GUEST_KERNEL:
230 he_stat->period_guest_sys += period;
231 break;
232 case PERF_RECORD_MISC_GUEST_USER:
233 he_stat->period_guest_us += period;
234 break;
235 default:
236 break;
237 }
238 }
239
240 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
241 u64 weight)
242 {
243
244 he_stat->period += period;
245 he_stat->weight += weight;
246 he_stat->nr_events += 1;
247 }
248
249 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
250 {
251 dest->period += src->period;
252 dest->period_sys += src->period_sys;
253 dest->period_us += src->period_us;
254 dest->period_guest_sys += src->period_guest_sys;
255 dest->period_guest_us += src->period_guest_us;
256 dest->nr_events += src->nr_events;
257 dest->weight += src->weight;
258 }
259
260 static void he_stat__decay(struct he_stat *he_stat)
261 {
262 he_stat->period = (he_stat->period * 7) / 8;
263 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
264 /* XXX need decay for weight too? */
265 }
266
267 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
268
269 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
270 {
271 u64 prev_period = he->stat.period;
272 u64 diff;
273
274 if (prev_period == 0)
275 return true;
276
277 he_stat__decay(&he->stat);
278 if (symbol_conf.cumulate_callchain)
279 he_stat__decay(he->stat_acc);
280 decay_callchain(he->callchain);
281
282 diff = prev_period - he->stat.period;
283
284 if (!he->depth) {
285 hists->stats.total_period -= diff;
286 if (!he->filtered)
287 hists->stats.total_non_filtered_period -= diff;
288 }
289
290 if (!he->leaf) {
291 struct hist_entry *child;
292 struct rb_node *node = rb_first(&he->hroot_out);
293 while (node) {
294 child = rb_entry(node, struct hist_entry, rb_node);
295 node = rb_next(node);
296
297 if (hists__decay_entry(hists, child))
298 hists__delete_entry(hists, child);
299 }
300 }
301
302 return he->stat.period == 0;
303 }
304
305 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
306 {
307 struct rb_root *root_in;
308 struct rb_root *root_out;
309
310 if (he->parent_he) {
311 root_in = &he->parent_he->hroot_in;
312 root_out = &he->parent_he->hroot_out;
313 } else {
314 if (hists__has(hists, need_collapse))
315 root_in = &hists->entries_collapsed;
316 else
317 root_in = hists->entries_in;
318 root_out = &hists->entries;
319 }
320
321 rb_erase(&he->rb_node_in, root_in);
322 rb_erase(&he->rb_node, root_out);
323
324 --hists->nr_entries;
325 if (!he->filtered)
326 --hists->nr_non_filtered_entries;
327
328 hist_entry__delete(he);
329 }
330
331 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
332 {
333 struct rb_node *next = rb_first(&hists->entries);
334 struct hist_entry *n;
335
336 while (next) {
337 n = rb_entry(next, struct hist_entry, rb_node);
338 next = rb_next(&n->rb_node);
339 if (((zap_user && n->level == '.') ||
340 (zap_kernel && n->level != '.') ||
341 hists__decay_entry(hists, n))) {
342 hists__delete_entry(hists, n);
343 }
344 }
345 }
346
347 void hists__delete_entries(struct hists *hists)
348 {
349 struct rb_node *next = rb_first(&hists->entries);
350 struct hist_entry *n;
351
352 while (next) {
353 n = rb_entry(next, struct hist_entry, rb_node);
354 next = rb_next(&n->rb_node);
355
356 hists__delete_entry(hists, n);
357 }
358 }
359
360 /*
361 * histogram, sorted on item, collects periods
362 */
363
364 static int hist_entry__init(struct hist_entry *he,
365 struct hist_entry *template,
366 bool sample_self)
367 {
368 *he = *template;
369
370 if (symbol_conf.cumulate_callchain) {
371 he->stat_acc = malloc(sizeof(he->stat));
372 if (he->stat_acc == NULL)
373 return -ENOMEM;
374 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
375 if (!sample_self)
376 memset(&he->stat, 0, sizeof(he->stat));
377 }
378
379 map__get(he->ms.map);
380
381 if (he->branch_info) {
382 /*
383 * This branch info is (a part of) allocated from
384 * sample__resolve_bstack() and will be freed after
385 * adding new entries. So we need to save a copy.
386 */
387 he->branch_info = malloc(sizeof(*he->branch_info));
388 if (he->branch_info == NULL) {
389 map__zput(he->ms.map);
390 free(he->stat_acc);
391 return -ENOMEM;
392 }
393
394 memcpy(he->branch_info, template->branch_info,
395 sizeof(*he->branch_info));
396
397 map__get(he->branch_info->from.map);
398 map__get(he->branch_info->to.map);
399 }
400
401 if (he->mem_info) {
402 map__get(he->mem_info->iaddr.map);
403 map__get(he->mem_info->daddr.map);
404 }
405
406 if (symbol_conf.use_callchain)
407 callchain_init(he->callchain);
408
409 if (he->raw_data) {
410 he->raw_data = memdup(he->raw_data, he->raw_size);
411
412 if (he->raw_data == NULL) {
413 map__put(he->ms.map);
414 if (he->branch_info) {
415 map__put(he->branch_info->from.map);
416 map__put(he->branch_info->to.map);
417 free(he->branch_info);
418 }
419 if (he->mem_info) {
420 map__put(he->mem_info->iaddr.map);
421 map__put(he->mem_info->daddr.map);
422 }
423 free(he->stat_acc);
424 return -ENOMEM;
425 }
426 }
427 INIT_LIST_HEAD(&he->pairs.node);
428 thread__get(he->thread);
429 he->hroot_in = RB_ROOT;
430 he->hroot_out = RB_ROOT;
431
432 if (!symbol_conf.report_hierarchy)
433 he->leaf = true;
434
435 return 0;
436 }
437
438 static void *hist_entry__zalloc(size_t size)
439 {
440 return zalloc(size + sizeof(struct hist_entry));
441 }
442
443 static void hist_entry__free(void *ptr)
444 {
445 free(ptr);
446 }
447
448 static struct hist_entry_ops default_ops = {
449 .new = hist_entry__zalloc,
450 .free = hist_entry__free,
451 };
452
453 static struct hist_entry *hist_entry__new(struct hist_entry *template,
454 bool sample_self)
455 {
456 struct hist_entry_ops *ops = template->ops;
457 size_t callchain_size = 0;
458 struct hist_entry *he;
459 int err = 0;
460
461 if (!ops)
462 ops = template->ops = &default_ops;
463
464 if (symbol_conf.use_callchain)
465 callchain_size = sizeof(struct callchain_root);
466
467 he = ops->new(callchain_size);
468 if (he) {
469 err = hist_entry__init(he, template, sample_self);
470 if (err) {
471 ops->free(he);
472 he = NULL;
473 }
474 }
475
476 return he;
477 }
478
479 static u8 symbol__parent_filter(const struct symbol *parent)
480 {
481 if (symbol_conf.exclude_other && parent == NULL)
482 return 1 << HIST_FILTER__PARENT;
483 return 0;
484 }
485
486 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
487 {
488 if (!symbol_conf.use_callchain)
489 return;
490
491 he->hists->callchain_period += period;
492 if (!he->filtered)
493 he->hists->callchain_non_filtered_period += period;
494 }
495
496 static struct hist_entry *hists__findnew_entry(struct hists *hists,
497 struct hist_entry *entry,
498 struct addr_location *al,
499 bool sample_self)
500 {
501 struct rb_node **p;
502 struct rb_node *parent = NULL;
503 struct hist_entry *he;
504 int64_t cmp;
505 u64 period = entry->stat.period;
506 u64 weight = entry->stat.weight;
507
508 p = &hists->entries_in->rb_node;
509
510 while (*p != NULL) {
511 parent = *p;
512 he = rb_entry(parent, struct hist_entry, rb_node_in);
513
514 /*
515 * Make sure that it receives arguments in a same order as
516 * hist_entry__collapse() so that we can use an appropriate
517 * function when searching an entry regardless which sort
518 * keys were used.
519 */
520 cmp = hist_entry__cmp(he, entry);
521
522 if (!cmp) {
523 if (sample_self) {
524 he_stat__add_period(&he->stat, period, weight);
525 hist_entry__add_callchain_period(he, period);
526 }
527 if (symbol_conf.cumulate_callchain)
528 he_stat__add_period(he->stat_acc, period, weight);
529
530 /*
531 * This mem info was allocated from sample__resolve_mem
532 * and will not be used anymore.
533 */
534 zfree(&entry->mem_info);
535
536 /* If the map of an existing hist_entry has
537 * become out-of-date due to an exec() or
538 * similar, update it. Otherwise we will
539 * mis-adjust symbol addresses when computing
540 * the history counter to increment.
541 */
542 if (he->ms.map != entry->ms.map) {
543 map__put(he->ms.map);
544 he->ms.map = map__get(entry->ms.map);
545 }
546 goto out;
547 }
548
549 if (cmp < 0)
550 p = &(*p)->rb_left;
551 else
552 p = &(*p)->rb_right;
553 }
554
555 he = hist_entry__new(entry, sample_self);
556 if (!he)
557 return NULL;
558
559 if (sample_self)
560 hist_entry__add_callchain_period(he, period);
561 hists->nr_entries++;
562
563 rb_link_node(&he->rb_node_in, parent, p);
564 rb_insert_color(&he->rb_node_in, hists->entries_in);
565 out:
566 if (sample_self)
567 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
568 if (symbol_conf.cumulate_callchain)
569 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
570 return he;
571 }
572
573 static struct hist_entry*
574 __hists__add_entry(struct hists *hists,
575 struct addr_location *al,
576 struct symbol *sym_parent,
577 struct branch_info *bi,
578 struct mem_info *mi,
579 struct perf_sample *sample,
580 bool sample_self,
581 struct hist_entry_ops *ops)
582 {
583 struct namespaces *ns = thread__namespaces(al->thread);
584 struct hist_entry entry = {
585 .thread = al->thread,
586 .comm = thread__comm(al->thread),
587 .cgroup_id = {
588 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
589 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
590 },
591 .ms = {
592 .map = al->map,
593 .sym = al->sym,
594 },
595 .socket = al->socket,
596 .cpu = al->cpu,
597 .cpumode = al->cpumode,
598 .ip = al->addr,
599 .level = al->level,
600 .stat = {
601 .nr_events = 1,
602 .period = sample->period,
603 .weight = sample->weight,
604 },
605 .parent = sym_parent,
606 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
607 .hists = hists,
608 .branch_info = bi,
609 .mem_info = mi,
610 .transaction = sample->transaction,
611 .raw_data = sample->raw_data,
612 .raw_size = sample->raw_size,
613 .ops = ops,
614 };
615
616 return hists__findnew_entry(hists, &entry, al, sample_self);
617 }
618
619 struct hist_entry *hists__add_entry(struct hists *hists,
620 struct addr_location *al,
621 struct symbol *sym_parent,
622 struct branch_info *bi,
623 struct mem_info *mi,
624 struct perf_sample *sample,
625 bool sample_self)
626 {
627 return __hists__add_entry(hists, al, sym_parent, bi, mi,
628 sample, sample_self, NULL);
629 }
630
631 struct hist_entry *hists__add_entry_ops(struct hists *hists,
632 struct hist_entry_ops *ops,
633 struct addr_location *al,
634 struct symbol *sym_parent,
635 struct branch_info *bi,
636 struct mem_info *mi,
637 struct perf_sample *sample,
638 bool sample_self)
639 {
640 return __hists__add_entry(hists, al, sym_parent, bi, mi,
641 sample, sample_self, ops);
642 }
643
644 static int
645 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
646 struct addr_location *al __maybe_unused)
647 {
648 return 0;
649 }
650
651 static int
652 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
653 struct addr_location *al __maybe_unused)
654 {
655 return 0;
656 }
657
658 static int
659 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
660 {
661 struct perf_sample *sample = iter->sample;
662 struct mem_info *mi;
663
664 mi = sample__resolve_mem(sample, al);
665 if (mi == NULL)
666 return -ENOMEM;
667
668 iter->priv = mi;
669 return 0;
670 }
671
672 static int
673 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
674 {
675 u64 cost;
676 struct mem_info *mi = iter->priv;
677 struct hists *hists = evsel__hists(iter->evsel);
678 struct perf_sample *sample = iter->sample;
679 struct hist_entry *he;
680
681 if (mi == NULL)
682 return -EINVAL;
683
684 cost = sample->weight;
685 if (!cost)
686 cost = 1;
687
688 /*
689 * must pass period=weight in order to get the correct
690 * sorting from hists__collapse_resort() which is solely
691 * based on periods. We want sorting be done on nr_events * weight
692 * and this is indirectly achieved by passing period=weight here
693 * and the he_stat__add_period() function.
694 */
695 sample->period = cost;
696
697 he = hists__add_entry(hists, al, iter->parent, NULL, mi,
698 sample, true);
699 if (!he)
700 return -ENOMEM;
701
702 iter->he = he;
703 return 0;
704 }
705
706 static int
707 iter_finish_mem_entry(struct hist_entry_iter *iter,
708 struct addr_location *al __maybe_unused)
709 {
710 struct perf_evsel *evsel = iter->evsel;
711 struct hists *hists = evsel__hists(evsel);
712 struct hist_entry *he = iter->he;
713 int err = -EINVAL;
714
715 if (he == NULL)
716 goto out;
717
718 hists__inc_nr_samples(hists, he->filtered);
719
720 err = hist_entry__append_callchain(he, iter->sample);
721
722 out:
723 /*
724 * We don't need to free iter->priv (mem_info) here since the mem info
725 * was either already freed in hists__findnew_entry() or passed to a
726 * new hist entry by hist_entry__new().
727 */
728 iter->priv = NULL;
729
730 iter->he = NULL;
731 return err;
732 }
733
734 static int
735 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
736 {
737 struct branch_info *bi;
738 struct perf_sample *sample = iter->sample;
739
740 bi = sample__resolve_bstack(sample, al);
741 if (!bi)
742 return -ENOMEM;
743
744 iter->curr = 0;
745 iter->total = sample->branch_stack->nr;
746
747 iter->priv = bi;
748 return 0;
749 }
750
751 static int
752 iter_add_single_branch_entry(struct hist_entry_iter *iter,
753 struct addr_location *al __maybe_unused)
754 {
755 /* to avoid calling callback function */
756 iter->he = NULL;
757
758 return 0;
759 }
760
761 static int
762 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
763 {
764 struct branch_info *bi = iter->priv;
765 int i = iter->curr;
766
767 if (bi == NULL)
768 return 0;
769
770 if (iter->curr >= iter->total)
771 return 0;
772
773 al->map = bi[i].to.map;
774 al->sym = bi[i].to.sym;
775 al->addr = bi[i].to.addr;
776 return 1;
777 }
778
779 static int
780 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
781 {
782 struct branch_info *bi;
783 struct perf_evsel *evsel = iter->evsel;
784 struct hists *hists = evsel__hists(evsel);
785 struct perf_sample *sample = iter->sample;
786 struct hist_entry *he = NULL;
787 int i = iter->curr;
788 int err = 0;
789
790 bi = iter->priv;
791
792 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
793 goto out;
794
795 /*
796 * The report shows the percentage of total branches captured
797 * and not events sampled. Thus we use a pseudo period of 1.
798 */
799 sample->period = 1;
800 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
801
802 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
803 sample, true);
804 if (he == NULL)
805 return -ENOMEM;
806
807 hists__inc_nr_samples(hists, he->filtered);
808
809 out:
810 iter->he = he;
811 iter->curr++;
812 return err;
813 }
814
815 static int
816 iter_finish_branch_entry(struct hist_entry_iter *iter,
817 struct addr_location *al __maybe_unused)
818 {
819 zfree(&iter->priv);
820 iter->he = NULL;
821
822 return iter->curr >= iter->total ? 0 : -1;
823 }
824
825 static int
826 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
827 struct addr_location *al __maybe_unused)
828 {
829 return 0;
830 }
831
832 static int
833 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
834 {
835 struct perf_evsel *evsel = iter->evsel;
836 struct perf_sample *sample = iter->sample;
837 struct hist_entry *he;
838
839 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
840 sample, true);
841 if (he == NULL)
842 return -ENOMEM;
843
844 iter->he = he;
845 return 0;
846 }
847
848 static int
849 iter_finish_normal_entry(struct hist_entry_iter *iter,
850 struct addr_location *al __maybe_unused)
851 {
852 struct hist_entry *he = iter->he;
853 struct perf_evsel *evsel = iter->evsel;
854 struct perf_sample *sample = iter->sample;
855
856 if (he == NULL)
857 return 0;
858
859 iter->he = NULL;
860
861 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
862
863 return hist_entry__append_callchain(he, sample);
864 }
865
866 static int
867 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
868 struct addr_location *al __maybe_unused)
869 {
870 struct hist_entry **he_cache;
871
872 callchain_cursor_commit(&callchain_cursor);
873
874 /*
875 * This is for detecting cycles or recursions so that they're
876 * cumulated only one time to prevent entries more than 100%
877 * overhead.
878 */
879 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
880 if (he_cache == NULL)
881 return -ENOMEM;
882
883 iter->priv = he_cache;
884 iter->curr = 0;
885
886 return 0;
887 }
888
889 static int
890 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
891 struct addr_location *al)
892 {
893 struct perf_evsel *evsel = iter->evsel;
894 struct hists *hists = evsel__hists(evsel);
895 struct perf_sample *sample = iter->sample;
896 struct hist_entry **he_cache = iter->priv;
897 struct hist_entry *he;
898 int err = 0;
899
900 he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
901 sample, true);
902 if (he == NULL)
903 return -ENOMEM;
904
905 iter->he = he;
906 he_cache[iter->curr++] = he;
907
908 hist_entry__append_callchain(he, sample);
909
910 /*
911 * We need to re-initialize the cursor since callchain_append()
912 * advanced the cursor to the end.
913 */
914 callchain_cursor_commit(&callchain_cursor);
915
916 hists__inc_nr_samples(hists, he->filtered);
917
918 return err;
919 }
920
921 static int
922 iter_next_cumulative_entry(struct hist_entry_iter *iter,
923 struct addr_location *al)
924 {
925 struct callchain_cursor_node *node;
926
927 node = callchain_cursor_current(&callchain_cursor);
928 if (node == NULL)
929 return 0;
930
931 return fill_callchain_info(al, node, iter->hide_unresolved);
932 }
933
934 static int
935 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
936 struct addr_location *al)
937 {
938 struct perf_evsel *evsel = iter->evsel;
939 struct perf_sample *sample = iter->sample;
940 struct hist_entry **he_cache = iter->priv;
941 struct hist_entry *he;
942 struct hist_entry he_tmp = {
943 .hists = evsel__hists(evsel),
944 .cpu = al->cpu,
945 .thread = al->thread,
946 .comm = thread__comm(al->thread),
947 .ip = al->addr,
948 .ms = {
949 .map = al->map,
950 .sym = al->sym,
951 },
952 .parent = iter->parent,
953 .raw_data = sample->raw_data,
954 .raw_size = sample->raw_size,
955 };
956 int i;
957 struct callchain_cursor cursor;
958
959 callchain_cursor_snapshot(&cursor, &callchain_cursor);
960
961 callchain_cursor_advance(&callchain_cursor);
962
963 /*
964 * Check if there's duplicate entries in the callchain.
965 * It's possible that it has cycles or recursive calls.
966 */
967 for (i = 0; i < iter->curr; i++) {
968 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
969 /* to avoid calling callback function */
970 iter->he = NULL;
971 return 0;
972 }
973 }
974
975 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
976 sample, false);
977 if (he == NULL)
978 return -ENOMEM;
979
980 iter->he = he;
981 he_cache[iter->curr++] = he;
982
983 if (symbol_conf.use_callchain)
984 callchain_append(he->callchain, &cursor, sample->period);
985 return 0;
986 }
987
988 static int
989 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
990 struct addr_location *al __maybe_unused)
991 {
992 zfree(&iter->priv);
993 iter->he = NULL;
994
995 return 0;
996 }
997
998 const struct hist_iter_ops hist_iter_mem = {
999 .prepare_entry = iter_prepare_mem_entry,
1000 .add_single_entry = iter_add_single_mem_entry,
1001 .next_entry = iter_next_nop_entry,
1002 .add_next_entry = iter_add_next_nop_entry,
1003 .finish_entry = iter_finish_mem_entry,
1004 };
1005
1006 const struct hist_iter_ops hist_iter_branch = {
1007 .prepare_entry = iter_prepare_branch_entry,
1008 .add_single_entry = iter_add_single_branch_entry,
1009 .next_entry = iter_next_branch_entry,
1010 .add_next_entry = iter_add_next_branch_entry,
1011 .finish_entry = iter_finish_branch_entry,
1012 };
1013
1014 const struct hist_iter_ops hist_iter_normal = {
1015 .prepare_entry = iter_prepare_normal_entry,
1016 .add_single_entry = iter_add_single_normal_entry,
1017 .next_entry = iter_next_nop_entry,
1018 .add_next_entry = iter_add_next_nop_entry,
1019 .finish_entry = iter_finish_normal_entry,
1020 };
1021
1022 const struct hist_iter_ops hist_iter_cumulative = {
1023 .prepare_entry = iter_prepare_cumulative_entry,
1024 .add_single_entry = iter_add_single_cumulative_entry,
1025 .next_entry = iter_next_cumulative_entry,
1026 .add_next_entry = iter_add_next_cumulative_entry,
1027 .finish_entry = iter_finish_cumulative_entry,
1028 };
1029
1030 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1031 int max_stack_depth, void *arg)
1032 {
1033 int err, err2;
1034 struct map *alm = NULL;
1035
1036 if (al && al->map)
1037 alm = map__get(al->map);
1038
1039 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1040 iter->evsel, al, max_stack_depth);
1041 if (err)
1042 return err;
1043
1044 iter->max_stack = max_stack_depth;
1045
1046 err = iter->ops->prepare_entry(iter, al);
1047 if (err)
1048 goto out;
1049
1050 err = iter->ops->add_single_entry(iter, al);
1051 if (err)
1052 goto out;
1053
1054 if (iter->he && iter->add_entry_cb) {
1055 err = iter->add_entry_cb(iter, al, true, arg);
1056 if (err)
1057 goto out;
1058 }
1059
1060 while (iter->ops->next_entry(iter, al)) {
1061 err = iter->ops->add_next_entry(iter, al);
1062 if (err)
1063 break;
1064
1065 if (iter->he && iter->add_entry_cb) {
1066 err = iter->add_entry_cb(iter, al, false, arg);
1067 if (err)
1068 goto out;
1069 }
1070 }
1071
1072 out:
1073 err2 = iter->ops->finish_entry(iter, al);
1074 if (!err)
1075 err = err2;
1076
1077 map__put(alm);
1078
1079 return err;
1080 }
1081
1082 int64_t
1083 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1084 {
1085 struct hists *hists = left->hists;
1086 struct perf_hpp_fmt *fmt;
1087 int64_t cmp = 0;
1088
1089 hists__for_each_sort_list(hists, fmt) {
1090 if (perf_hpp__is_dynamic_entry(fmt) &&
1091 !perf_hpp__defined_dynamic_entry(fmt, hists))
1092 continue;
1093
1094 cmp = fmt->cmp(fmt, left, right);
1095 if (cmp)
1096 break;
1097 }
1098
1099 return cmp;
1100 }
1101
1102 int64_t
1103 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1104 {
1105 struct hists *hists = left->hists;
1106 struct perf_hpp_fmt *fmt;
1107 int64_t cmp = 0;
1108
1109 hists__for_each_sort_list(hists, fmt) {
1110 if (perf_hpp__is_dynamic_entry(fmt) &&
1111 !perf_hpp__defined_dynamic_entry(fmt, hists))
1112 continue;
1113
1114 cmp = fmt->collapse(fmt, left, right);
1115 if (cmp)
1116 break;
1117 }
1118
1119 return cmp;
1120 }
1121
1122 void hist_entry__delete(struct hist_entry *he)
1123 {
1124 struct hist_entry_ops *ops = he->ops;
1125
1126 thread__zput(he->thread);
1127 map__zput(he->ms.map);
1128
1129 if (he->branch_info) {
1130 map__zput(he->branch_info->from.map);
1131 map__zput(he->branch_info->to.map);
1132 free_srcline(he->branch_info->srcline_from);
1133 free_srcline(he->branch_info->srcline_to);
1134 zfree(&he->branch_info);
1135 }
1136
1137 if (he->mem_info) {
1138 map__zput(he->mem_info->iaddr.map);
1139 map__zput(he->mem_info->daddr.map);
1140 zfree(&he->mem_info);
1141 }
1142
1143 if (he->inline_node) {
1144 inline_node__delete(he->inline_node);
1145 he->inline_node = NULL;
1146 }
1147
1148 zfree(&he->stat_acc);
1149 free_srcline(he->srcline);
1150 if (he->srcfile && he->srcfile[0])
1151 free(he->srcfile);
1152 free_callchain(he->callchain);
1153 free(he->trace_output);
1154 free(he->raw_data);
1155 ops->free(he);
1156 }
1157
1158 /*
1159 * If this is not the last column, then we need to pad it according to the
1160 * pre-calculated max lenght for this column, otherwise don't bother adding
1161 * spaces because that would break viewing this with, for instance, 'less',
1162 * that would show tons of trailing spaces when a long C++ demangled method
1163 * names is sampled.
1164 */
1165 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1166 struct perf_hpp_fmt *fmt, int printed)
1167 {
1168 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1169 const int width = fmt->width(fmt, hpp, he->hists);
1170 if (printed < width) {
1171 advance_hpp(hpp, printed);
1172 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1173 }
1174 }
1175
1176 return printed;
1177 }
1178
1179 /*
1180 * collapse the histogram
1181 */
1182
1183 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1184 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1185 enum hist_filter type);
1186
1187 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1188
1189 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1190 {
1191 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1192 }
1193
1194 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1195 enum hist_filter type,
1196 fmt_chk_fn check)
1197 {
1198 struct perf_hpp_fmt *fmt;
1199 bool type_match = false;
1200 struct hist_entry *parent = he->parent_he;
1201
1202 switch (type) {
1203 case HIST_FILTER__THREAD:
1204 if (symbol_conf.comm_list == NULL &&
1205 symbol_conf.pid_list == NULL &&
1206 symbol_conf.tid_list == NULL)
1207 return;
1208 break;
1209 case HIST_FILTER__DSO:
1210 if (symbol_conf.dso_list == NULL)
1211 return;
1212 break;
1213 case HIST_FILTER__SYMBOL:
1214 if (symbol_conf.sym_list == NULL)
1215 return;
1216 break;
1217 case HIST_FILTER__PARENT:
1218 case HIST_FILTER__GUEST:
1219 case HIST_FILTER__HOST:
1220 case HIST_FILTER__SOCKET:
1221 case HIST_FILTER__C2C:
1222 default:
1223 return;
1224 }
1225
1226 /* if it's filtered by own fmt, it has to have filter bits */
1227 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1228 if (check(fmt)) {
1229 type_match = true;
1230 break;
1231 }
1232 }
1233
1234 if (type_match) {
1235 /*
1236 * If the filter is for current level entry, propagate
1237 * filter marker to parents. The marker bit was
1238 * already set by default so it only needs to clear
1239 * non-filtered entries.
1240 */
1241 if (!(he->filtered & (1 << type))) {
1242 while (parent) {
1243 parent->filtered &= ~(1 << type);
1244 parent = parent->parent_he;
1245 }
1246 }
1247 } else {
1248 /*
1249 * If current entry doesn't have matching formats, set
1250 * filter marker for upper level entries. it will be
1251 * cleared if its lower level entries is not filtered.
1252 *
1253 * For lower-level entries, it inherits parent's
1254 * filter bit so that lower level entries of a
1255 * non-filtered entry won't set the filter marker.
1256 */
1257 if (parent == NULL)
1258 he->filtered |= (1 << type);
1259 else
1260 he->filtered |= (parent->filtered & (1 << type));
1261 }
1262 }
1263
1264 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1265 {
1266 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1267 check_thread_entry);
1268
1269 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1270 perf_hpp__is_dso_entry);
1271
1272 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1273 perf_hpp__is_sym_entry);
1274
1275 hists__apply_filters(he->hists, he);
1276 }
1277
1278 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1279 struct rb_root *root,
1280 struct hist_entry *he,
1281 struct hist_entry *parent_he,
1282 struct perf_hpp_list *hpp_list)
1283 {
1284 struct rb_node **p = &root->rb_node;
1285 struct rb_node *parent = NULL;
1286 struct hist_entry *iter, *new;
1287 struct perf_hpp_fmt *fmt;
1288 int64_t cmp;
1289
1290 while (*p != NULL) {
1291 parent = *p;
1292 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1293
1294 cmp = 0;
1295 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1296 cmp = fmt->collapse(fmt, iter, he);
1297 if (cmp)
1298 break;
1299 }
1300
1301 if (!cmp) {
1302 he_stat__add_stat(&iter->stat, &he->stat);
1303 return iter;
1304 }
1305
1306 if (cmp < 0)
1307 p = &parent->rb_left;
1308 else
1309 p = &parent->rb_right;
1310 }
1311
1312 new = hist_entry__new(he, true);
1313 if (new == NULL)
1314 return NULL;
1315
1316 hists->nr_entries++;
1317
1318 /* save related format list for output */
1319 new->hpp_list = hpp_list;
1320 new->parent_he = parent_he;
1321
1322 hist_entry__apply_hierarchy_filters(new);
1323
1324 /* some fields are now passed to 'new' */
1325 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1326 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1327 he->trace_output = NULL;
1328 else
1329 new->trace_output = NULL;
1330
1331 if (perf_hpp__is_srcline_entry(fmt))
1332 he->srcline = NULL;
1333 else
1334 new->srcline = NULL;
1335
1336 if (perf_hpp__is_srcfile_entry(fmt))
1337 he->srcfile = NULL;
1338 else
1339 new->srcfile = NULL;
1340 }
1341
1342 rb_link_node(&new->rb_node_in, parent, p);
1343 rb_insert_color(&new->rb_node_in, root);
1344 return new;
1345 }
1346
1347 static int hists__hierarchy_insert_entry(struct hists *hists,
1348 struct rb_root *root,
1349 struct hist_entry *he)
1350 {
1351 struct perf_hpp_list_node *node;
1352 struct hist_entry *new_he = NULL;
1353 struct hist_entry *parent = NULL;
1354 int depth = 0;
1355 int ret = 0;
1356
1357 list_for_each_entry(node, &hists->hpp_formats, list) {
1358 /* skip period (overhead) and elided columns */
1359 if (node->level == 0 || node->skip)
1360 continue;
1361
1362 /* insert copy of 'he' for each fmt into the hierarchy */
1363 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1364 if (new_he == NULL) {
1365 ret = -1;
1366 break;
1367 }
1368
1369 root = &new_he->hroot_in;
1370 new_he->depth = depth++;
1371 parent = new_he;
1372 }
1373
1374 if (new_he) {
1375 new_he->leaf = true;
1376
1377 if (symbol_conf.use_callchain) {
1378 callchain_cursor_reset(&callchain_cursor);
1379 if (callchain_merge(&callchain_cursor,
1380 new_he->callchain,
1381 he->callchain) < 0)
1382 ret = -1;
1383 }
1384 }
1385
1386 /* 'he' is no longer used */
1387 hist_entry__delete(he);
1388
1389 /* return 0 (or -1) since it already applied filters */
1390 return ret;
1391 }
1392
1393 static int hists__collapse_insert_entry(struct hists *hists,
1394 struct rb_root *root,
1395 struct hist_entry *he)
1396 {
1397 struct rb_node **p = &root->rb_node;
1398 struct rb_node *parent = NULL;
1399 struct hist_entry *iter;
1400 int64_t cmp;
1401
1402 if (symbol_conf.report_hierarchy)
1403 return hists__hierarchy_insert_entry(hists, root, he);
1404
1405 while (*p != NULL) {
1406 parent = *p;
1407 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1408
1409 cmp = hist_entry__collapse(iter, he);
1410
1411 if (!cmp) {
1412 int ret = 0;
1413
1414 he_stat__add_stat(&iter->stat, &he->stat);
1415 if (symbol_conf.cumulate_callchain)
1416 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1417
1418 if (symbol_conf.use_callchain) {
1419 callchain_cursor_reset(&callchain_cursor);
1420 if (callchain_merge(&callchain_cursor,
1421 iter->callchain,
1422 he->callchain) < 0)
1423 ret = -1;
1424 }
1425 hist_entry__delete(he);
1426 return ret;
1427 }
1428
1429 if (cmp < 0)
1430 p = &(*p)->rb_left;
1431 else
1432 p = &(*p)->rb_right;
1433 }
1434 hists->nr_entries++;
1435
1436 rb_link_node(&he->rb_node_in, parent, p);
1437 rb_insert_color(&he->rb_node_in, root);
1438 return 1;
1439 }
1440
1441 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1442 {
1443 struct rb_root *root;
1444
1445 pthread_mutex_lock(&hists->lock);
1446
1447 root = hists->entries_in;
1448 if (++hists->entries_in > &hists->entries_in_array[1])
1449 hists->entries_in = &hists->entries_in_array[0];
1450
1451 pthread_mutex_unlock(&hists->lock);
1452
1453 return root;
1454 }
1455
1456 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1457 {
1458 hists__filter_entry_by_dso(hists, he);
1459 hists__filter_entry_by_thread(hists, he);
1460 hists__filter_entry_by_symbol(hists, he);
1461 hists__filter_entry_by_socket(hists, he);
1462 }
1463
1464 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1465 {
1466 struct rb_root *root;
1467 struct rb_node *next;
1468 struct hist_entry *n;
1469 int ret;
1470
1471 if (!hists__has(hists, need_collapse))
1472 return 0;
1473
1474 hists->nr_entries = 0;
1475
1476 root = hists__get_rotate_entries_in(hists);
1477
1478 next = rb_first(root);
1479
1480 while (next) {
1481 if (session_done())
1482 break;
1483 n = rb_entry(next, struct hist_entry, rb_node_in);
1484 next = rb_next(&n->rb_node_in);
1485
1486 rb_erase(&n->rb_node_in, root);
1487 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1488 if (ret < 0)
1489 return -1;
1490
1491 if (ret) {
1492 /*
1493 * If it wasn't combined with one of the entries already
1494 * collapsed, we need to apply the filters that may have
1495 * been set by, say, the hist_browser.
1496 */
1497 hists__apply_filters(hists, n);
1498 }
1499 if (prog)
1500 ui_progress__update(prog, 1);
1501 }
1502 return 0;
1503 }
1504
1505 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1506 {
1507 struct hists *hists = a->hists;
1508 struct perf_hpp_fmt *fmt;
1509 int64_t cmp = 0;
1510
1511 hists__for_each_sort_list(hists, fmt) {
1512 if (perf_hpp__should_skip(fmt, a->hists))
1513 continue;
1514
1515 cmp = fmt->sort(fmt, a, b);
1516 if (cmp)
1517 break;
1518 }
1519
1520 return cmp;
1521 }
1522
1523 static void hists__reset_filter_stats(struct hists *hists)
1524 {
1525 hists->nr_non_filtered_entries = 0;
1526 hists->stats.total_non_filtered_period = 0;
1527 }
1528
1529 void hists__reset_stats(struct hists *hists)
1530 {
1531 hists->nr_entries = 0;
1532 hists->stats.total_period = 0;
1533
1534 hists__reset_filter_stats(hists);
1535 }
1536
1537 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1538 {
1539 hists->nr_non_filtered_entries++;
1540 hists->stats.total_non_filtered_period += h->stat.period;
1541 }
1542
1543 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1544 {
1545 if (!h->filtered)
1546 hists__inc_filter_stats(hists, h);
1547
1548 hists->nr_entries++;
1549 hists->stats.total_period += h->stat.period;
1550 }
1551
1552 static void hierarchy_recalc_total_periods(struct hists *hists)
1553 {
1554 struct rb_node *node;
1555 struct hist_entry *he;
1556
1557 node = rb_first(&hists->entries);
1558
1559 hists->stats.total_period = 0;
1560 hists->stats.total_non_filtered_period = 0;
1561
1562 /*
1563 * recalculate total period using top-level entries only
1564 * since lower level entries only see non-filtered entries
1565 * but upper level entries have sum of both entries.
1566 */
1567 while (node) {
1568 he = rb_entry(node, struct hist_entry, rb_node);
1569 node = rb_next(node);
1570
1571 hists->stats.total_period += he->stat.period;
1572 if (!he->filtered)
1573 hists->stats.total_non_filtered_period += he->stat.period;
1574 }
1575 }
1576
1577 static void hierarchy_insert_output_entry(struct rb_root *root,
1578 struct hist_entry *he)
1579 {
1580 struct rb_node **p = &root->rb_node;
1581 struct rb_node *parent = NULL;
1582 struct hist_entry *iter;
1583 struct perf_hpp_fmt *fmt;
1584
1585 while (*p != NULL) {
1586 parent = *p;
1587 iter = rb_entry(parent, struct hist_entry, rb_node);
1588
1589 if (hist_entry__sort(he, iter) > 0)
1590 p = &parent->rb_left;
1591 else
1592 p = &parent->rb_right;
1593 }
1594
1595 rb_link_node(&he->rb_node, parent, p);
1596 rb_insert_color(&he->rb_node, root);
1597
1598 /* update column width of dynamic entry */
1599 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1600 if (perf_hpp__is_dynamic_entry(fmt))
1601 fmt->sort(fmt, he, NULL);
1602 }
1603 }
1604
1605 static void hists__hierarchy_output_resort(struct hists *hists,
1606 struct ui_progress *prog,
1607 struct rb_root *root_in,
1608 struct rb_root *root_out,
1609 u64 min_callchain_hits,
1610 bool use_callchain)
1611 {
1612 struct rb_node *node;
1613 struct hist_entry *he;
1614
1615 *root_out = RB_ROOT;
1616 node = rb_first(root_in);
1617
1618 while (node) {
1619 he = rb_entry(node, struct hist_entry, rb_node_in);
1620 node = rb_next(node);
1621
1622 hierarchy_insert_output_entry(root_out, he);
1623
1624 if (prog)
1625 ui_progress__update(prog, 1);
1626
1627 hists->nr_entries++;
1628 if (!he->filtered) {
1629 hists->nr_non_filtered_entries++;
1630 hists__calc_col_len(hists, he);
1631 }
1632
1633 if (!he->leaf) {
1634 hists__hierarchy_output_resort(hists, prog,
1635 &he->hroot_in,
1636 &he->hroot_out,
1637 min_callchain_hits,
1638 use_callchain);
1639 continue;
1640 }
1641
1642 if (!use_callchain)
1643 continue;
1644
1645 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1646 u64 total = he->stat.period;
1647
1648 if (symbol_conf.cumulate_callchain)
1649 total = he->stat_acc->period;
1650
1651 min_callchain_hits = total * (callchain_param.min_percent / 100);
1652 }
1653
1654 callchain_param.sort(&he->sorted_chain, he->callchain,
1655 min_callchain_hits, &callchain_param);
1656 }
1657 }
1658
1659 static void __hists__insert_output_entry(struct rb_root *entries,
1660 struct hist_entry *he,
1661 u64 min_callchain_hits,
1662 bool use_callchain)
1663 {
1664 struct rb_node **p = &entries->rb_node;
1665 struct rb_node *parent = NULL;
1666 struct hist_entry *iter;
1667 struct perf_hpp_fmt *fmt;
1668
1669 if (use_callchain) {
1670 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1671 u64 total = he->stat.period;
1672
1673 if (symbol_conf.cumulate_callchain)
1674 total = he->stat_acc->period;
1675
1676 min_callchain_hits = total * (callchain_param.min_percent / 100);
1677 }
1678 callchain_param.sort(&he->sorted_chain, he->callchain,
1679 min_callchain_hits, &callchain_param);
1680 }
1681
1682 while (*p != NULL) {
1683 parent = *p;
1684 iter = rb_entry(parent, struct hist_entry, rb_node);
1685
1686 if (hist_entry__sort(he, iter) > 0)
1687 p = &(*p)->rb_left;
1688 else
1689 p = &(*p)->rb_right;
1690 }
1691
1692 rb_link_node(&he->rb_node, parent, p);
1693 rb_insert_color(&he->rb_node, entries);
1694
1695 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1696 if (perf_hpp__is_dynamic_entry(fmt) &&
1697 perf_hpp__defined_dynamic_entry(fmt, he->hists))
1698 fmt->sort(fmt, he, NULL); /* update column width */
1699 }
1700 }
1701
1702 static void output_resort(struct hists *hists, struct ui_progress *prog,
1703 bool use_callchain, hists__resort_cb_t cb)
1704 {
1705 struct rb_root *root;
1706 struct rb_node *next;
1707 struct hist_entry *n;
1708 u64 callchain_total;
1709 u64 min_callchain_hits;
1710
1711 callchain_total = hists->callchain_period;
1712 if (symbol_conf.filter_relative)
1713 callchain_total = hists->callchain_non_filtered_period;
1714
1715 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1716
1717 hists__reset_stats(hists);
1718 hists__reset_col_len(hists);
1719
1720 if (symbol_conf.report_hierarchy) {
1721 hists__hierarchy_output_resort(hists, prog,
1722 &hists->entries_collapsed,
1723 &hists->entries,
1724 min_callchain_hits,
1725 use_callchain);
1726 hierarchy_recalc_total_periods(hists);
1727 return;
1728 }
1729
1730 if (hists__has(hists, need_collapse))
1731 root = &hists->entries_collapsed;
1732 else
1733 root = hists->entries_in;
1734
1735 next = rb_first(root);
1736 hists->entries = RB_ROOT;
1737
1738 while (next) {
1739 n = rb_entry(next, struct hist_entry, rb_node_in);
1740 next = rb_next(&n->rb_node_in);
1741
1742 if (cb && cb(n))
1743 continue;
1744
1745 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1746 hists__inc_stats(hists, n);
1747
1748 if (!n->filtered)
1749 hists__calc_col_len(hists, n);
1750
1751 if (prog)
1752 ui_progress__update(prog, 1);
1753 }
1754 }
1755
1756 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1757 {
1758 bool use_callchain;
1759
1760 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1761 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1762 else
1763 use_callchain = symbol_conf.use_callchain;
1764
1765 output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
1766 }
1767
1768 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1769 {
1770 output_resort(hists, prog, symbol_conf.use_callchain, NULL);
1771 }
1772
1773 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1774 hists__resort_cb_t cb)
1775 {
1776 output_resort(hists, prog, symbol_conf.use_callchain, cb);
1777 }
1778
1779 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1780 {
1781 if (he->leaf || hmd == HMD_FORCE_SIBLING)
1782 return false;
1783
1784 if (he->unfolded || hmd == HMD_FORCE_CHILD)
1785 return true;
1786
1787 return false;
1788 }
1789
1790 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1791 {
1792 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1793
1794 while (can_goto_child(he, HMD_NORMAL)) {
1795 node = rb_last(&he->hroot_out);
1796 he = rb_entry(node, struct hist_entry, rb_node);
1797 }
1798 return node;
1799 }
1800
1801 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1802 {
1803 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1804
1805 if (can_goto_child(he, hmd))
1806 node = rb_first(&he->hroot_out);
1807 else
1808 node = rb_next(node);
1809
1810 while (node == NULL) {
1811 he = he->parent_he;
1812 if (he == NULL)
1813 break;
1814
1815 node = rb_next(&he->rb_node);
1816 }
1817 return node;
1818 }
1819
1820 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1821 {
1822 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1823
1824 node = rb_prev(node);
1825 if (node)
1826 return rb_hierarchy_last(node);
1827
1828 he = he->parent_he;
1829 if (he == NULL)
1830 return NULL;
1831
1832 return &he->rb_node;
1833 }
1834
1835 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1836 {
1837 struct rb_node *node;
1838 struct hist_entry *child;
1839 float percent;
1840
1841 if (he->leaf)
1842 return false;
1843
1844 node = rb_first(&he->hroot_out);
1845 child = rb_entry(node, struct hist_entry, rb_node);
1846
1847 while (node && child->filtered) {
1848 node = rb_next(node);
1849 child = rb_entry(node, struct hist_entry, rb_node);
1850 }
1851
1852 if (node)
1853 percent = hist_entry__get_percent_limit(child);
1854 else
1855 percent = 0;
1856
1857 return node && percent >= limit;
1858 }
1859
1860 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1861 enum hist_filter filter)
1862 {
1863 h->filtered &= ~(1 << filter);
1864
1865 if (symbol_conf.report_hierarchy) {
1866 struct hist_entry *parent = h->parent_he;
1867
1868 while (parent) {
1869 he_stat__add_stat(&parent->stat, &h->stat);
1870
1871 parent->filtered &= ~(1 << filter);
1872
1873 if (parent->filtered)
1874 goto next;
1875
1876 /* force fold unfiltered entry for simplicity */
1877 parent->unfolded = false;
1878 parent->has_no_entry = false;
1879 parent->row_offset = 0;
1880 parent->nr_rows = 0;
1881 next:
1882 parent = parent->parent_he;
1883 }
1884 }
1885
1886 if (h->filtered)
1887 return;
1888
1889 /* force fold unfiltered entry for simplicity */
1890 h->unfolded = false;
1891 h->has_no_entry = false;
1892 h->row_offset = 0;
1893 h->nr_rows = 0;
1894
1895 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1896
1897 hists__inc_filter_stats(hists, h);
1898 hists__calc_col_len(hists, h);
1899 }
1900
1901
1902 static bool hists__filter_entry_by_dso(struct hists *hists,
1903 struct hist_entry *he)
1904 {
1905 if (hists->dso_filter != NULL &&
1906 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1907 he->filtered |= (1 << HIST_FILTER__DSO);
1908 return true;
1909 }
1910
1911 return false;
1912 }
1913
1914 static bool hists__filter_entry_by_thread(struct hists *hists,
1915 struct hist_entry *he)
1916 {
1917 if (hists->thread_filter != NULL &&
1918 he->thread != hists->thread_filter) {
1919 he->filtered |= (1 << HIST_FILTER__THREAD);
1920 return true;
1921 }
1922
1923 return false;
1924 }
1925
1926 static bool hists__filter_entry_by_symbol(struct hists *hists,
1927 struct hist_entry *he)
1928 {
1929 if (hists->symbol_filter_str != NULL &&
1930 (!he->ms.sym || strstr(he->ms.sym->name,
1931 hists->symbol_filter_str) == NULL)) {
1932 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1933 return true;
1934 }
1935
1936 return false;
1937 }
1938
1939 static bool hists__filter_entry_by_socket(struct hists *hists,
1940 struct hist_entry *he)
1941 {
1942 if ((hists->socket_filter > -1) &&
1943 (he->socket != hists->socket_filter)) {
1944 he->filtered |= (1 << HIST_FILTER__SOCKET);
1945 return true;
1946 }
1947
1948 return false;
1949 }
1950
1951 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1952
1953 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1954 {
1955 struct rb_node *nd;
1956
1957 hists->stats.nr_non_filtered_samples = 0;
1958
1959 hists__reset_filter_stats(hists);
1960 hists__reset_col_len(hists);
1961
1962 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1963 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1964
1965 if (filter(hists, h))
1966 continue;
1967
1968 hists__remove_entry_filter(hists, h, type);
1969 }
1970 }
1971
1972 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1973 {
1974 struct rb_node **p = &root->rb_node;
1975 struct rb_node *parent = NULL;
1976 struct hist_entry *iter;
1977 struct rb_root new_root = RB_ROOT;
1978 struct rb_node *nd;
1979
1980 while (*p != NULL) {
1981 parent = *p;
1982 iter = rb_entry(parent, struct hist_entry, rb_node);
1983
1984 if (hist_entry__sort(he, iter) > 0)
1985 p = &(*p)->rb_left;
1986 else
1987 p = &(*p)->rb_right;
1988 }
1989
1990 rb_link_node(&he->rb_node, parent, p);
1991 rb_insert_color(&he->rb_node, root);
1992
1993 if (he->leaf || he->filtered)
1994 return;
1995
1996 nd = rb_first(&he->hroot_out);
1997 while (nd) {
1998 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1999
2000 nd = rb_next(nd);
2001 rb_erase(&h->rb_node, &he->hroot_out);
2002
2003 resort_filtered_entry(&new_root, h);
2004 }
2005
2006 he->hroot_out = new_root;
2007 }
2008
2009 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2010 {
2011 struct rb_node *nd;
2012 struct rb_root new_root = RB_ROOT;
2013
2014 hists->stats.nr_non_filtered_samples = 0;
2015
2016 hists__reset_filter_stats(hists);
2017 hists__reset_col_len(hists);
2018
2019 nd = rb_first(&hists->entries);
2020 while (nd) {
2021 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2022 int ret;
2023
2024 ret = hist_entry__filter(h, type, arg);
2025
2026 /*
2027 * case 1. non-matching type
2028 * zero out the period, set filter marker and move to child
2029 */
2030 if (ret < 0) {
2031 memset(&h->stat, 0, sizeof(h->stat));
2032 h->filtered |= (1 << type);
2033
2034 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2035 }
2036 /*
2037 * case 2. matched type (filter out)
2038 * set filter marker and move to next
2039 */
2040 else if (ret == 1) {
2041 h->filtered |= (1 << type);
2042
2043 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2044 }
2045 /*
2046 * case 3. ok (not filtered)
2047 * add period to hists and parents, erase the filter marker
2048 * and move to next sibling
2049 */
2050 else {
2051 hists__remove_entry_filter(hists, h, type);
2052
2053 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2054 }
2055 }
2056
2057 hierarchy_recalc_total_periods(hists);
2058
2059 /*
2060 * resort output after applying a new filter since filter in a lower
2061 * hierarchy can change periods in a upper hierarchy.
2062 */
2063 nd = rb_first(&hists->entries);
2064 while (nd) {
2065 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2066
2067 nd = rb_next(nd);
2068 rb_erase(&h->rb_node, &hists->entries);
2069
2070 resort_filtered_entry(&new_root, h);
2071 }
2072
2073 hists->entries = new_root;
2074 }
2075
2076 void hists__filter_by_thread(struct hists *hists)
2077 {
2078 if (symbol_conf.report_hierarchy)
2079 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2080 hists->thread_filter);
2081 else
2082 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2083 hists__filter_entry_by_thread);
2084 }
2085
2086 void hists__filter_by_dso(struct hists *hists)
2087 {
2088 if (symbol_conf.report_hierarchy)
2089 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2090 hists->dso_filter);
2091 else
2092 hists__filter_by_type(hists, HIST_FILTER__DSO,
2093 hists__filter_entry_by_dso);
2094 }
2095
2096 void hists__filter_by_symbol(struct hists *hists)
2097 {
2098 if (symbol_conf.report_hierarchy)
2099 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2100 hists->symbol_filter_str);
2101 else
2102 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2103 hists__filter_entry_by_symbol);
2104 }
2105
2106 void hists__filter_by_socket(struct hists *hists)
2107 {
2108 if (symbol_conf.report_hierarchy)
2109 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2110 &hists->socket_filter);
2111 else
2112 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2113 hists__filter_entry_by_socket);
2114 }
2115
2116 void events_stats__inc(struct events_stats *stats, u32 type)
2117 {
2118 ++stats->nr_events[0];
2119 ++stats->nr_events[type];
2120 }
2121
2122 void hists__inc_nr_events(struct hists *hists, u32 type)
2123 {
2124 events_stats__inc(&hists->stats, type);
2125 }
2126
2127 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2128 {
2129 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2130 if (!filtered)
2131 hists->stats.nr_non_filtered_samples++;
2132 }
2133
2134 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2135 struct hist_entry *pair)
2136 {
2137 struct rb_root *root;
2138 struct rb_node **p;
2139 struct rb_node *parent = NULL;
2140 struct hist_entry *he;
2141 int64_t cmp;
2142
2143 if (hists__has(hists, need_collapse))
2144 root = &hists->entries_collapsed;
2145 else
2146 root = hists->entries_in;
2147
2148 p = &root->rb_node;
2149
2150 while (*p != NULL) {
2151 parent = *p;
2152 he = rb_entry(parent, struct hist_entry, rb_node_in);
2153
2154 cmp = hist_entry__collapse(he, pair);
2155
2156 if (!cmp)
2157 goto out;
2158
2159 if (cmp < 0)
2160 p = &(*p)->rb_left;
2161 else
2162 p = &(*p)->rb_right;
2163 }
2164
2165 he = hist_entry__new(pair, true);
2166 if (he) {
2167 memset(&he->stat, 0, sizeof(he->stat));
2168 he->hists = hists;
2169 if (symbol_conf.cumulate_callchain)
2170 memset(he->stat_acc, 0, sizeof(he->stat));
2171 rb_link_node(&he->rb_node_in, parent, p);
2172 rb_insert_color(&he->rb_node_in, root);
2173 hists__inc_stats(hists, he);
2174 he->dummy = true;
2175 }
2176 out:
2177 return he;
2178 }
2179
2180 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2181 struct rb_root *root,
2182 struct hist_entry *pair)
2183 {
2184 struct rb_node **p;
2185 struct rb_node *parent = NULL;
2186 struct hist_entry *he;
2187 struct perf_hpp_fmt *fmt;
2188
2189 p = &root->rb_node;
2190 while (*p != NULL) {
2191 int64_t cmp = 0;
2192
2193 parent = *p;
2194 he = rb_entry(parent, struct hist_entry, rb_node_in);
2195
2196 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2197 cmp = fmt->collapse(fmt, he, pair);
2198 if (cmp)
2199 break;
2200 }
2201 if (!cmp)
2202 goto out;
2203
2204 if (cmp < 0)
2205 p = &parent->rb_left;
2206 else
2207 p = &parent->rb_right;
2208 }
2209
2210 he = hist_entry__new(pair, true);
2211 if (he) {
2212 rb_link_node(&he->rb_node_in, parent, p);
2213 rb_insert_color(&he->rb_node_in, root);
2214
2215 he->dummy = true;
2216 he->hists = hists;
2217 memset(&he->stat, 0, sizeof(he->stat));
2218 hists__inc_stats(hists, he);
2219 }
2220 out:
2221 return he;
2222 }
2223
2224 static struct hist_entry *hists__find_entry(struct hists *hists,
2225 struct hist_entry *he)
2226 {
2227 struct rb_node *n;
2228
2229 if (hists__has(hists, need_collapse))
2230 n = hists->entries_collapsed.rb_node;
2231 else
2232 n = hists->entries_in->rb_node;
2233
2234 while (n) {
2235 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2236 int64_t cmp = hist_entry__collapse(iter, he);
2237
2238 if (cmp < 0)
2239 n = n->rb_left;
2240 else if (cmp > 0)
2241 n = n->rb_right;
2242 else
2243 return iter;
2244 }
2245
2246 return NULL;
2247 }
2248
2249 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
2250 struct hist_entry *he)
2251 {
2252 struct rb_node *n = root->rb_node;
2253
2254 while (n) {
2255 struct hist_entry *iter;
2256 struct perf_hpp_fmt *fmt;
2257 int64_t cmp = 0;
2258
2259 iter = rb_entry(n, struct hist_entry, rb_node_in);
2260 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2261 cmp = fmt->collapse(fmt, iter, he);
2262 if (cmp)
2263 break;
2264 }
2265
2266 if (cmp < 0)
2267 n = n->rb_left;
2268 else if (cmp > 0)
2269 n = n->rb_right;
2270 else
2271 return iter;
2272 }
2273
2274 return NULL;
2275 }
2276
2277 static void hists__match_hierarchy(struct rb_root *leader_root,
2278 struct rb_root *other_root)
2279 {
2280 struct rb_node *nd;
2281 struct hist_entry *pos, *pair;
2282
2283 for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
2284 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2285 pair = hists__find_hierarchy_entry(other_root, pos);
2286
2287 if (pair) {
2288 hist_entry__add_pair(pair, pos);
2289 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2290 }
2291 }
2292 }
2293
2294 /*
2295 * Look for pairs to link to the leader buckets (hist_entries):
2296 */
2297 void hists__match(struct hists *leader, struct hists *other)
2298 {
2299 struct rb_root *root;
2300 struct rb_node *nd;
2301 struct hist_entry *pos, *pair;
2302
2303 if (symbol_conf.report_hierarchy) {
2304 /* hierarchy report always collapses entries */
2305 return hists__match_hierarchy(&leader->entries_collapsed,
2306 &other->entries_collapsed);
2307 }
2308
2309 if (hists__has(leader, need_collapse))
2310 root = &leader->entries_collapsed;
2311 else
2312 root = leader->entries_in;
2313
2314 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2315 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2316 pair = hists__find_entry(other, pos);
2317
2318 if (pair)
2319 hist_entry__add_pair(pair, pos);
2320 }
2321 }
2322
2323 static int hists__link_hierarchy(struct hists *leader_hists,
2324 struct hist_entry *parent,
2325 struct rb_root *leader_root,
2326 struct rb_root *other_root)
2327 {
2328 struct rb_node *nd;
2329 struct hist_entry *pos, *leader;
2330
2331 for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
2332 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2333
2334 if (hist_entry__has_pairs(pos)) {
2335 bool found = false;
2336
2337 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2338 if (leader->hists == leader_hists) {
2339 found = true;
2340 break;
2341 }
2342 }
2343 if (!found)
2344 return -1;
2345 } else {
2346 leader = add_dummy_hierarchy_entry(leader_hists,
2347 leader_root, pos);
2348 if (leader == NULL)
2349 return -1;
2350
2351 /* do not point parent in the pos */
2352 leader->parent_he = parent;
2353
2354 hist_entry__add_pair(pos, leader);
2355 }
2356
2357 if (!pos->leaf) {
2358 if (hists__link_hierarchy(leader_hists, leader,
2359 &leader->hroot_in,
2360 &pos->hroot_in) < 0)
2361 return -1;
2362 }
2363 }
2364 return 0;
2365 }
2366
2367 /*
2368 * Look for entries in the other hists that are not present in the leader, if
2369 * we find them, just add a dummy entry on the leader hists, with period=0,
2370 * nr_events=0, to serve as the list header.
2371 */
2372 int hists__link(struct hists *leader, struct hists *other)
2373 {
2374 struct rb_root *root;
2375 struct rb_node *nd;
2376 struct hist_entry *pos, *pair;
2377
2378 if (symbol_conf.report_hierarchy) {
2379 /* hierarchy report always collapses entries */
2380 return hists__link_hierarchy(leader, NULL,
2381 &leader->entries_collapsed,
2382 &other->entries_collapsed);
2383 }
2384
2385 if (hists__has(other, need_collapse))
2386 root = &other->entries_collapsed;
2387 else
2388 root = other->entries_in;
2389
2390 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2391 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2392
2393 if (!hist_entry__has_pairs(pos)) {
2394 pair = hists__add_dummy_entry(leader, pos);
2395 if (pair == NULL)
2396 return -1;
2397 hist_entry__add_pair(pos, pair);
2398 }
2399 }
2400
2401 return 0;
2402 }
2403
2404 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2405 struct perf_sample *sample, bool nonany_branch_mode)
2406 {
2407 struct branch_info *bi;
2408
2409 /* If we have branch cycles always annotate them. */
2410 if (bs && bs->nr && bs->entries[0].flags.cycles) {
2411 int i;
2412
2413 bi = sample__resolve_bstack(sample, al);
2414 if (bi) {
2415 struct addr_map_symbol *prev = NULL;
2416
2417 /*
2418 * Ignore errors, still want to process the
2419 * other entries.
2420 *
2421 * For non standard branch modes always
2422 * force no IPC (prev == NULL)
2423 *
2424 * Note that perf stores branches reversed from
2425 * program order!
2426 */
2427 for (i = bs->nr - 1; i >= 0; i--) {
2428 addr_map_symbol__account_cycles(&bi[i].from,
2429 nonany_branch_mode ? NULL : prev,
2430 bi[i].flags.cycles);
2431 prev = &bi[i].to;
2432 }
2433 free(bi);
2434 }
2435 }
2436 }
2437
2438 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2439 {
2440 struct perf_evsel *pos;
2441 size_t ret = 0;
2442
2443 evlist__for_each_entry(evlist, pos) {
2444 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2445 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2446 }
2447
2448 return ret;
2449 }
2450
2451
2452 u64 hists__total_period(struct hists *hists)
2453 {
2454 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2455 hists->stats.total_period;
2456 }
2457
2458 int parse_filter_percentage(const struct option *opt __maybe_unused,
2459 const char *arg, int unset __maybe_unused)
2460 {
2461 if (!strcmp(arg, "relative"))
2462 symbol_conf.filter_relative = true;
2463 else if (!strcmp(arg, "absolute"))
2464 symbol_conf.filter_relative = false;
2465 else {
2466 pr_debug("Invalid percentage: %s\n", arg);
2467 return -1;
2468 }
2469
2470 return 0;
2471 }
2472
2473 int perf_hist_config(const char *var, const char *value)
2474 {
2475 if (!strcmp(var, "hist.percentage"))
2476 return parse_filter_percentage(NULL, value, 0);
2477
2478 return 0;
2479 }
2480
2481 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2482 {
2483 memset(hists, 0, sizeof(*hists));
2484 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2485 hists->entries_in = &hists->entries_in_array[0];
2486 hists->entries_collapsed = RB_ROOT;
2487 hists->entries = RB_ROOT;
2488 pthread_mutex_init(&hists->lock, NULL);
2489 hists->socket_filter = -1;
2490 hists->hpp_list = hpp_list;
2491 INIT_LIST_HEAD(&hists->hpp_formats);
2492 return 0;
2493 }
2494
2495 static void hists__delete_remaining_entries(struct rb_root *root)
2496 {
2497 struct rb_node *node;
2498 struct hist_entry *he;
2499
2500 while (!RB_EMPTY_ROOT(root)) {
2501 node = rb_first(root);
2502 rb_erase(node, root);
2503
2504 he = rb_entry(node, struct hist_entry, rb_node_in);
2505 hist_entry__delete(he);
2506 }
2507 }
2508
2509 static void hists__delete_all_entries(struct hists *hists)
2510 {
2511 hists__delete_entries(hists);
2512 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2513 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2514 hists__delete_remaining_entries(&hists->entries_collapsed);
2515 }
2516
2517 static void hists_evsel__exit(struct perf_evsel *evsel)
2518 {
2519 struct hists *hists = evsel__hists(evsel);
2520 struct perf_hpp_fmt *fmt, *pos;
2521 struct perf_hpp_list_node *node, *tmp;
2522
2523 hists__delete_all_entries(hists);
2524
2525 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2526 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2527 list_del(&fmt->list);
2528 free(fmt);
2529 }
2530 list_del(&node->list);
2531 free(node);
2532 }
2533 }
2534
2535 static int hists_evsel__init(struct perf_evsel *evsel)
2536 {
2537 struct hists *hists = evsel__hists(evsel);
2538
2539 __hists__init(hists, &perf_hpp_list);
2540 return 0;
2541 }
2542
2543 /*
2544 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2545 * stored in the rbtree...
2546 */
2547
2548 int hists__init(void)
2549 {
2550 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2551 hists_evsel__init,
2552 hists_evsel__exit);
2553 if (err)
2554 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2555
2556 return err;
2557 }
2558
2559 void perf_hpp_list__init(struct perf_hpp_list *list)
2560 {
2561 INIT_LIST_HEAD(&list->fields);
2562 INIT_LIST_HEAD(&list->sorts);
2563 }