]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/hist.c
perf probe: Fix possible double free on error
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
CommitLineData
8a0ecfb8 1#include "util.h"
598357eb 2#include "build-id.h"
3d1d07ec 3#include "hist.h"
4e4f06e4
ACM
4#include "session.h"
5#include "sort.h"
2a1731fb 6#include "evlist.h"
29d720ed 7#include "evsel.h"
69bcb019 8#include "annotate.h"
740b97f9 9#include "ui/progress.h"
9b33827d 10#include <math.h>
3d1d07ec 11
90cf1fb5
ACM
12static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
e94d53eb
NK
16static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
90cf1fb5 18
42b28ac0 19u16 hists__col_len(struct hists *hists, enum hist_column col)
8a6c5b26 20{
42b28ac0 21 return hists->col_len[col];
8a6c5b26
ACM
22}
23
42b28ac0 24void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 25{
42b28ac0 26 hists->col_len[col] = len;
8a6c5b26
ACM
27}
28
42b28ac0 29bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 30{
42b28ac0
ACM
31 if (len > hists__col_len(hists, col)) {
32 hists__set_col_len(hists, col, len);
8a6c5b26
ACM
33 return true;
34 }
35 return false;
36}
37
7ccf4f90 38void hists__reset_col_len(struct hists *hists)
8a6c5b26
ACM
39{
40 enum hist_column col;
41
42 for (col = 0; col < HISTC_NR_COLS; ++col)
42b28ac0 43 hists__set_col_len(hists, col, 0);
8a6c5b26
ACM
44}
45
b5387528
RAV
46static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
47{
48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
49
50 if (hists__col_len(hists, dso) < unresolved_col_width &&
51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 !symbol_conf.dso_list)
53 hists__set_col_len(hists, dso, unresolved_col_width);
54}
55
7ccf4f90 56void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
8a6c5b26 57{
b5387528 58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
98a3b32c 59 int symlen;
8a6c5b26
ACM
60 u16 len;
61
ded19d57
NK
62 /*
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
66 */
67 if (h->ms.sym) {
68 symlen = h->ms.sym->namelen + 4;
69 if (verbose)
70 symlen += BITS_PER_LONG / 4 + 2 + 3;
71 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 } else {
98a3b32c
SE
73 symlen = unresolved_col_width + 4 + 2;
74 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
b5387528 75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
98a3b32c 76 }
8a6c5b26
ACM
77
78 len = thread__comm_len(h->thread);
42b28ac0
ACM
79 if (hists__new_col_len(hists, HISTC_COMM, len))
80 hists__set_col_len(hists, HISTC_THREAD, len + 6);
8a6c5b26
ACM
81
82 if (h->ms.map) {
83 len = dso__name_len(h->ms.map->dso);
42b28ac0 84 hists__new_col_len(hists, HISTC_DSO, len);
8a6c5b26 85 }
b5387528 86
cb993744
NK
87 if (h->parent)
88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
89
b5387528 90 if (h->branch_info) {
b5387528
RAV
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
ded19d57
NK
93 if (verbose)
94 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
96
97 symlen = dso__name_len(h->branch_info->from.map->dso);
98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 } else {
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 }
104
105 if (h->branch_info->to.sym) {
106 symlen = (int)h->branch_info->to.sym->namelen + 4;
ded19d57
NK
107 if (verbose)
108 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
110
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 } else {
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 }
118 }
98a3b32c
SE
119
120 if (h->mem_info) {
98a3b32c
SE
121 if (h->mem_info->daddr.sym) {
122 symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 + unresolved_col_width + 2;
124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 symlen);
9b32ba71
DZ
126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 symlen + 1);
98a3b32c
SE
128 } else {
129 symlen = unresolved_col_width + 4 + 2;
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 symlen);
132 }
133 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 symlen);
137 } else {
138 symlen = unresolved_col_width + 4 + 2;
139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
140 }
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 }
146
147 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
475eeab9
AK
153
154 if (h->transaction)
155 hists__new_col_len(hists, HISTC_TRANSACTION,
156 hist_entry__transaction_len());
8a6c5b26
ACM
157}
158
7ccf4f90
NK
159void hists__output_recalc_col_len(struct hists *hists, int max_rows)
160{
161 struct rb_node *next = rb_first(&hists->entries);
162 struct hist_entry *n;
163 int row = 0;
164
165 hists__reset_col_len(hists);
166
167 while (next && row++ < max_rows) {
168 n = rb_entry(next, struct hist_entry, rb_node);
169 if (!n->filtered)
170 hists__calc_col_len(hists, n);
171 next = rb_next(&n->rb_node);
172 }
173}
174
f39056f9
NK
175static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 unsigned int cpumode, u64 period)
a1645ce1 177{
28e2a106 178 switch (cpumode) {
a1645ce1 179 case PERF_RECORD_MISC_KERNEL:
f39056f9 180 he_stat->period_sys += period;
a1645ce1
ZY
181 break;
182 case PERF_RECORD_MISC_USER:
f39056f9 183 he_stat->period_us += period;
a1645ce1
ZY
184 break;
185 case PERF_RECORD_MISC_GUEST_KERNEL:
f39056f9 186 he_stat->period_guest_sys += period;
a1645ce1
ZY
187 break;
188 case PERF_RECORD_MISC_GUEST_USER:
f39056f9 189 he_stat->period_guest_us += period;
a1645ce1
ZY
190 break;
191 default:
192 break;
193 }
194}
195
05484298
AK
196static void he_stat__add_period(struct he_stat *he_stat, u64 period,
197 u64 weight)
139c0815 198{
98a3b32c 199
139c0815 200 he_stat->period += period;
05484298 201 he_stat->weight += weight;
139c0815
NK
202 he_stat->nr_events += 1;
203}
204
205static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
206{
207 dest->period += src->period;
208 dest->period_sys += src->period_sys;
209 dest->period_us += src->period_us;
210 dest->period_guest_sys += src->period_guest_sys;
211 dest->period_guest_us += src->period_guest_us;
212 dest->nr_events += src->nr_events;
05484298 213 dest->weight += src->weight;
139c0815
NK
214}
215
f39056f9 216static void he_stat__decay(struct he_stat *he_stat)
ab81f3fd 217{
f39056f9
NK
218 he_stat->period = (he_stat->period * 7) / 8;
219 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
05484298 220 /* XXX need decay for weight too? */
ab81f3fd
ACM
221}
222
223static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
224{
b24c28f7 225 u64 prev_period = he->stat.period;
3186b681 226 u64 diff;
c64550cf
ACM
227
228 if (prev_period == 0)
df71d95f 229 return true;
c64550cf 230
f39056f9 231 he_stat__decay(&he->stat);
f8be1c8c
NK
232 if (symbol_conf.cumulate_callchain)
233 he_stat__decay(he->stat_acc);
c64550cf 234
3186b681
NK
235 diff = prev_period - he->stat.period;
236
237 hists->stats.total_period -= diff;
c64550cf 238 if (!he->filtered)
3186b681 239 hists->stats.total_non_filtered_period -= diff;
c64550cf 240
b24c28f7 241 return he->stat.period == 0;
ab81f3fd
ACM
242}
243
956b65e1
ACM
244static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
245{
246 rb_erase(&he->rb_node, &hists->entries);
247
248 if (sort__need_collapse)
249 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
250
251 --hists->nr_entries;
252 if (!he->filtered)
253 --hists->nr_non_filtered_entries;
254
255 hist_entry__delete(he);
256}
257
3a5714f8 258void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
ab81f3fd
ACM
259{
260 struct rb_node *next = rb_first(&hists->entries);
261 struct hist_entry *n;
262
263 while (next) {
264 n = rb_entry(next, struct hist_entry, rb_node);
265 next = rb_next(&n->rb_node);
df71d95f
ACM
266 /*
267 * We may be annotating this, for instance, so keep it here in
268 * case some it gets new samples, we'll eventually free it when
269 * the user stops browsing and it agains gets fully decayed.
270 */
b079d4e9
ACM
271 if (((zap_user && n->level == '.') ||
272 (zap_kernel && n->level != '.') ||
273 hists__decay_entry(hists, n)) &&
274 !n->used) {
956b65e1 275 hists__delete_entry(hists, n);
ab81f3fd
ACM
276 }
277 }
278}
279
701937bd
NK
280void hists__delete_entries(struct hists *hists)
281{
282 struct rb_node *next = rb_first(&hists->entries);
283 struct hist_entry *n;
284
285 while (next) {
286 n = rb_entry(next, struct hist_entry, rb_node);
287 next = rb_next(&n->rb_node);
288
956b65e1 289 hists__delete_entry(hists, n);
701937bd
NK
290 }
291}
292
3d1d07ec 293/*
c82ee828 294 * histogram, sorted on item, collects periods
3d1d07ec
JK
295 */
296
a0b51af3
NK
297static struct hist_entry *hist_entry__new(struct hist_entry *template,
298 bool sample_self)
28e2a106 299{
f8be1c8c
NK
300 size_t callchain_size = 0;
301 struct hist_entry *he;
302
82aa019e 303 if (symbol_conf.use_callchain)
f8be1c8c
NK
304 callchain_size = sizeof(struct callchain_root);
305
306 he = zalloc(sizeof(*he) + callchain_size);
28e2a106 307
12c14278
ACM
308 if (he != NULL) {
309 *he = *template;
c4b35351 310
f8be1c8c
NK
311 if (symbol_conf.cumulate_callchain) {
312 he->stat_acc = malloc(sizeof(he->stat));
313 if (he->stat_acc == NULL) {
314 free(he);
315 return NULL;
316 }
317 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
a0b51af3
NK
318 if (!sample_self)
319 memset(&he->stat, 0, sizeof(he->stat));
f8be1c8c
NK
320 }
321
12c14278
ACM
322 if (he->ms.map)
323 he->ms.map->referenced = true;
3cf0cb1f
SE
324
325 if (he->branch_info) {
26353a61
NK
326 /*
327 * This branch info is (a part of) allocated from
644f2df2 328 * sample__resolve_bstack() and will be freed after
26353a61
NK
329 * adding new entries. So we need to save a copy.
330 */
331 he->branch_info = malloc(sizeof(*he->branch_info));
332 if (he->branch_info == NULL) {
f8be1c8c 333 free(he->stat_acc);
26353a61
NK
334 free(he);
335 return NULL;
336 }
337
338 memcpy(he->branch_info, template->branch_info,
339 sizeof(*he->branch_info));
340
3cf0cb1f
SE
341 if (he->branch_info->from.map)
342 he->branch_info->from.map->referenced = true;
343 if (he->branch_info->to.map)
344 he->branch_info->to.map->referenced = true;
345 }
346
98a3b32c
SE
347 if (he->mem_info) {
348 if (he->mem_info->iaddr.map)
349 he->mem_info->iaddr.map->referenced = true;
350 if (he->mem_info->daddr.map)
351 he->mem_info->daddr.map->referenced = true;
352 }
353
28e2a106 354 if (symbol_conf.use_callchain)
12c14278 355 callchain_init(he->callchain);
b821c732
ACM
356
357 INIT_LIST_HEAD(&he->pairs.node);
f3b623b8 358 thread__get(he->thread);
28e2a106
ACM
359 }
360
12c14278 361 return he;
28e2a106
ACM
362}
363
7a007ca9
ACM
364static u8 symbol__parent_filter(const struct symbol *parent)
365{
366 if (symbol_conf.exclude_other && parent == NULL)
367 return 1 << HIST_FILTER__PARENT;
368 return 0;
369}
370
b5387528 371static struct hist_entry *add_hist_entry(struct hists *hists,
f1cbf78d 372 struct hist_entry *entry,
a0b51af3
NK
373 struct addr_location *al,
374 bool sample_self)
9735abf1 375{
1980c2eb 376 struct rb_node **p;
9735abf1
ACM
377 struct rb_node *parent = NULL;
378 struct hist_entry *he;
354cc40e 379 int64_t cmp;
f1cbf78d
NK
380 u64 period = entry->stat.period;
381 u64 weight = entry->stat.weight;
9735abf1 382
1980c2eb
ACM
383 p = &hists->entries_in->rb_node;
384
9735abf1
ACM
385 while (*p != NULL) {
386 parent = *p;
1980c2eb 387 he = rb_entry(parent, struct hist_entry, rb_node_in);
9735abf1 388
9afcf930
NK
389 /*
390 * Make sure that it receives arguments in a same order as
391 * hist_entry__collapse() so that we can use an appropriate
392 * function when searching an entry regardless which sort
393 * keys were used.
394 */
395 cmp = hist_entry__cmp(he, entry);
9735abf1
ACM
396
397 if (!cmp) {
a0b51af3
NK
398 if (sample_self)
399 he_stat__add_period(&he->stat, period, weight);
f8be1c8c
NK
400 if (symbol_conf.cumulate_callchain)
401 he_stat__add_period(he->stat_acc, period, weight);
63fa471d 402
ceb2acbc 403 /*
e80faac0 404 * This mem info was allocated from sample__resolve_mem
ceb2acbc
NK
405 * and will not be used anymore.
406 */
74cf249d 407 zfree(&entry->mem_info);
ceb2acbc 408
63fa471d
DM
409 /* If the map of an existing hist_entry has
410 * become out-of-date due to an exec() or
411 * similar, update it. Otherwise we will
412 * mis-adjust symbol addresses when computing
413 * the history counter to increment.
414 */
415 if (he->ms.map != entry->ms.map) {
416 he->ms.map = entry->ms.map;
417 if (he->ms.map)
418 he->ms.map->referenced = true;
419 }
28e2a106 420 goto out;
9735abf1
ACM
421 }
422
423 if (cmp < 0)
424 p = &(*p)->rb_left;
425 else
426 p = &(*p)->rb_right;
427 }
428
a0b51af3 429 he = hist_entry__new(entry, sample_self);
9735abf1 430 if (!he)
27a0dcb7 431 return NULL;
1980c2eb 432
590cd344
NK
433 hists->nr_entries++;
434
1980c2eb
ACM
435 rb_link_node(&he->rb_node_in, parent, p);
436 rb_insert_color(&he->rb_node_in, hists->entries_in);
28e2a106 437out:
a0b51af3
NK
438 if (sample_self)
439 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
f8be1c8c
NK
440 if (symbol_conf.cumulate_callchain)
441 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
9735abf1
ACM
442 return he;
443}
444
c824c433 445struct hist_entry *__hists__add_entry(struct hists *hists,
b5387528 446 struct addr_location *al,
41a4e6e2
NK
447 struct symbol *sym_parent,
448 struct branch_info *bi,
449 struct mem_info *mi,
a0b51af3
NK
450 u64 period, u64 weight, u64 transaction,
451 bool sample_self)
b5387528
RAV
452{
453 struct hist_entry entry = {
454 .thread = al->thread,
4dfced35 455 .comm = thread__comm(al->thread),
b5387528
RAV
456 .ms = {
457 .map = al->map,
458 .sym = al->sym,
459 },
7365be55
DZ
460 .cpu = al->cpu,
461 .cpumode = al->cpumode,
462 .ip = al->addr,
463 .level = al->level,
b24c28f7 464 .stat = {
c4b35351 465 .nr_events = 1,
41a4e6e2 466 .period = period,
05484298 467 .weight = weight,
b24c28f7 468 },
b5387528 469 .parent = sym_parent,
2c86c7ca 470 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
c824c433 471 .hists = hists,
41a4e6e2
NK
472 .branch_info = bi,
473 .mem_info = mi,
475eeab9 474 .transaction = transaction,
b5387528
RAV
475 };
476
a0b51af3 477 return add_hist_entry(hists, &entry, al, sample_self);
b5387528
RAV
478}
479
69bcb019
NK
480static int
481iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
482 struct addr_location *al __maybe_unused)
483{
484 return 0;
485}
486
487static int
488iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
489 struct addr_location *al __maybe_unused)
490{
491 return 0;
492}
493
494static int
495iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
496{
497 struct perf_sample *sample = iter->sample;
498 struct mem_info *mi;
499
500 mi = sample__resolve_mem(sample, al);
501 if (mi == NULL)
502 return -ENOMEM;
503
504 iter->priv = mi;
505 return 0;
506}
507
508static int
509iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
510{
511 u64 cost;
512 struct mem_info *mi = iter->priv;
4ea062ed 513 struct hists *hists = evsel__hists(iter->evsel);
69bcb019
NK
514 struct hist_entry *he;
515
516 if (mi == NULL)
517 return -EINVAL;
518
519 cost = iter->sample->weight;
520 if (!cost)
521 cost = 1;
522
523 /*
524 * must pass period=weight in order to get the correct
525 * sorting from hists__collapse_resort() which is solely
526 * based on periods. We want sorting be done on nr_events * weight
527 * and this is indirectly achieved by passing period=weight here
528 * and the he_stat__add_period() function.
529 */
4ea062ed 530 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
a0b51af3 531 cost, cost, 0, true);
69bcb019
NK
532 if (!he)
533 return -ENOMEM;
534
535 iter->he = he;
536 return 0;
537}
538
539static int
9d3c02d7
NK
540iter_finish_mem_entry(struct hist_entry_iter *iter,
541 struct addr_location *al __maybe_unused)
69bcb019
NK
542{
543 struct perf_evsel *evsel = iter->evsel;
4ea062ed 544 struct hists *hists = evsel__hists(evsel);
69bcb019 545 struct hist_entry *he = iter->he;
69bcb019
NK
546 int err = -EINVAL;
547
548 if (he == NULL)
549 goto out;
550
4ea062ed 551 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
552
553 err = hist_entry__append_callchain(he, iter->sample);
554
555out:
556 /*
557 * We don't need to free iter->priv (mem_info) here since
558 * the mem info was either already freed in add_hist_entry() or
559 * passed to a new hist entry by hist_entry__new().
560 */
561 iter->priv = NULL;
562
563 iter->he = NULL;
564 return err;
565}
566
567static int
568iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
569{
570 struct branch_info *bi;
571 struct perf_sample *sample = iter->sample;
572
573 bi = sample__resolve_bstack(sample, al);
574 if (!bi)
575 return -ENOMEM;
576
577 iter->curr = 0;
578 iter->total = sample->branch_stack->nr;
579
580 iter->priv = bi;
581 return 0;
582}
583
584static int
585iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
586 struct addr_location *al __maybe_unused)
587{
9d3c02d7
NK
588 /* to avoid calling callback function */
589 iter->he = NULL;
590
69bcb019
NK
591 return 0;
592}
593
594static int
595iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
596{
597 struct branch_info *bi = iter->priv;
598 int i = iter->curr;
599
600 if (bi == NULL)
601 return 0;
602
603 if (iter->curr >= iter->total)
604 return 0;
605
606 al->map = bi[i].to.map;
607 al->sym = bi[i].to.sym;
608 al->addr = bi[i].to.addr;
609 return 1;
610}
611
612static int
613iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
614{
9d3c02d7 615 struct branch_info *bi;
69bcb019 616 struct perf_evsel *evsel = iter->evsel;
4ea062ed 617 struct hists *hists = evsel__hists(evsel);
69bcb019
NK
618 struct hist_entry *he = NULL;
619 int i = iter->curr;
620 int err = 0;
621
622 bi = iter->priv;
623
624 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
625 goto out;
626
627 /*
628 * The report shows the percentage of total branches captured
629 * and not events sampled. Thus we use a pseudo period of 1.
630 */
4ea062ed 631 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
a0b51af3 632 1, 1, 0, true);
69bcb019
NK
633 if (he == NULL)
634 return -ENOMEM;
635
4ea062ed 636 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
637
638out:
639 iter->he = he;
640 iter->curr++;
641 return err;
642}
643
644static int
645iter_finish_branch_entry(struct hist_entry_iter *iter,
646 struct addr_location *al __maybe_unused)
647{
648 zfree(&iter->priv);
649 iter->he = NULL;
650
651 return iter->curr >= iter->total ? 0 : -1;
652}
653
654static int
655iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
656 struct addr_location *al __maybe_unused)
657{
658 return 0;
659}
660
661static int
662iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
663{
664 struct perf_evsel *evsel = iter->evsel;
665 struct perf_sample *sample = iter->sample;
666 struct hist_entry *he;
667
4ea062ed 668 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
69bcb019 669 sample->period, sample->weight,
a0b51af3 670 sample->transaction, true);
69bcb019
NK
671 if (he == NULL)
672 return -ENOMEM;
673
674 iter->he = he;
675 return 0;
676}
677
678static int
9d3c02d7
NK
679iter_finish_normal_entry(struct hist_entry_iter *iter,
680 struct addr_location *al __maybe_unused)
69bcb019 681{
69bcb019
NK
682 struct hist_entry *he = iter->he;
683 struct perf_evsel *evsel = iter->evsel;
684 struct perf_sample *sample = iter->sample;
685
686 if (he == NULL)
687 return 0;
688
689 iter->he = NULL;
690
4ea062ed 691 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
69bcb019
NK
692
693 return hist_entry__append_callchain(he, sample);
694}
695
7a13aa28
NK
696static int
697iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
698 struct addr_location *al __maybe_unused)
699{
b4d3c8bd
NK
700 struct hist_entry **he_cache;
701
7a13aa28 702 callchain_cursor_commit(&callchain_cursor);
b4d3c8bd
NK
703
704 /*
705 * This is for detecting cycles or recursions so that they're
706 * cumulated only one time to prevent entries more than 100%
707 * overhead.
708 */
709 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
710 if (he_cache == NULL)
711 return -ENOMEM;
712
713 iter->priv = he_cache;
714 iter->curr = 0;
715
7a13aa28
NK
716 return 0;
717}
718
719static int
720iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
721 struct addr_location *al)
722{
723 struct perf_evsel *evsel = iter->evsel;
4ea062ed 724 struct hists *hists = evsel__hists(evsel);
7a13aa28 725 struct perf_sample *sample = iter->sample;
b4d3c8bd 726 struct hist_entry **he_cache = iter->priv;
7a13aa28
NK
727 struct hist_entry *he;
728 int err = 0;
729
4ea062ed 730 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
7a13aa28
NK
731 sample->period, sample->weight,
732 sample->transaction, true);
733 if (he == NULL)
734 return -ENOMEM;
735
736 iter->he = he;
b4d3c8bd 737 he_cache[iter->curr++] = he;
7a13aa28 738
82aa019e 739 hist_entry__append_callchain(he, sample);
be7f855a
NK
740
741 /*
742 * We need to re-initialize the cursor since callchain_append()
743 * advanced the cursor to the end.
744 */
745 callchain_cursor_commit(&callchain_cursor);
746
4ea062ed 747 hists__inc_nr_samples(hists, he->filtered);
7a13aa28
NK
748
749 return err;
750}
751
752static int
753iter_next_cumulative_entry(struct hist_entry_iter *iter,
754 struct addr_location *al)
755{
756 struct callchain_cursor_node *node;
757
758 node = callchain_cursor_current(&callchain_cursor);
759 if (node == NULL)
760 return 0;
761
c7405d85 762 return fill_callchain_info(al, node, iter->hide_unresolved);
7a13aa28
NK
763}
764
765static int
766iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
767 struct addr_location *al)
768{
769 struct perf_evsel *evsel = iter->evsel;
770 struct perf_sample *sample = iter->sample;
b4d3c8bd 771 struct hist_entry **he_cache = iter->priv;
7a13aa28 772 struct hist_entry *he;
b4d3c8bd
NK
773 struct hist_entry he_tmp = {
774 .cpu = al->cpu,
775 .thread = al->thread,
776 .comm = thread__comm(al->thread),
777 .ip = al->addr,
778 .ms = {
779 .map = al->map,
780 .sym = al->sym,
781 },
782 .parent = iter->parent,
783 };
784 int i;
be7f855a
NK
785 struct callchain_cursor cursor;
786
787 callchain_cursor_snapshot(&cursor, &callchain_cursor);
788
789 callchain_cursor_advance(&callchain_cursor);
b4d3c8bd
NK
790
791 /*
792 * Check if there's duplicate entries in the callchain.
793 * It's possible that it has cycles or recursive calls.
794 */
795 for (i = 0; i < iter->curr; i++) {
9d3c02d7
NK
796 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
797 /* to avoid calling callback function */
798 iter->he = NULL;
b4d3c8bd 799 return 0;
9d3c02d7 800 }
b4d3c8bd 801 }
7a13aa28 802
4ea062ed 803 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
7a13aa28
NK
804 sample->period, sample->weight,
805 sample->transaction, false);
806 if (he == NULL)
807 return -ENOMEM;
808
809 iter->he = he;
b4d3c8bd 810 he_cache[iter->curr++] = he;
7a13aa28 811
82aa019e
NK
812 if (symbol_conf.use_callchain)
813 callchain_append(he->callchain, &cursor, sample->period);
7a13aa28
NK
814 return 0;
815}
816
817static int
818iter_finish_cumulative_entry(struct hist_entry_iter *iter,
819 struct addr_location *al __maybe_unused)
820{
b4d3c8bd 821 zfree(&iter->priv);
7a13aa28 822 iter->he = NULL;
b4d3c8bd 823
7a13aa28
NK
824 return 0;
825}
826
69bcb019
NK
827const struct hist_iter_ops hist_iter_mem = {
828 .prepare_entry = iter_prepare_mem_entry,
829 .add_single_entry = iter_add_single_mem_entry,
830 .next_entry = iter_next_nop_entry,
831 .add_next_entry = iter_add_next_nop_entry,
832 .finish_entry = iter_finish_mem_entry,
833};
834
835const struct hist_iter_ops hist_iter_branch = {
836 .prepare_entry = iter_prepare_branch_entry,
837 .add_single_entry = iter_add_single_branch_entry,
838 .next_entry = iter_next_branch_entry,
839 .add_next_entry = iter_add_next_branch_entry,
840 .finish_entry = iter_finish_branch_entry,
841};
842
843const struct hist_iter_ops hist_iter_normal = {
844 .prepare_entry = iter_prepare_normal_entry,
845 .add_single_entry = iter_add_single_normal_entry,
846 .next_entry = iter_next_nop_entry,
847 .add_next_entry = iter_add_next_nop_entry,
848 .finish_entry = iter_finish_normal_entry,
849};
850
7a13aa28
NK
851const struct hist_iter_ops hist_iter_cumulative = {
852 .prepare_entry = iter_prepare_cumulative_entry,
853 .add_single_entry = iter_add_single_cumulative_entry,
854 .next_entry = iter_next_cumulative_entry,
855 .add_next_entry = iter_add_next_cumulative_entry,
856 .finish_entry = iter_finish_cumulative_entry,
857};
858
69bcb019
NK
859int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
860 struct perf_evsel *evsel, struct perf_sample *sample,
9d3c02d7 861 int max_stack_depth, void *arg)
69bcb019
NK
862{
863 int err, err2;
864
865 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
866 max_stack_depth);
867 if (err)
868 return err;
869
870 iter->evsel = evsel;
871 iter->sample = sample;
872
873 err = iter->ops->prepare_entry(iter, al);
874 if (err)
875 goto out;
876
877 err = iter->ops->add_single_entry(iter, al);
878 if (err)
879 goto out;
880
9d3c02d7
NK
881 if (iter->he && iter->add_entry_cb) {
882 err = iter->add_entry_cb(iter, al, true, arg);
883 if (err)
884 goto out;
885 }
886
69bcb019
NK
887 while (iter->ops->next_entry(iter, al)) {
888 err = iter->ops->add_next_entry(iter, al);
889 if (err)
890 break;
9d3c02d7
NK
891
892 if (iter->he && iter->add_entry_cb) {
893 err = iter->add_entry_cb(iter, al, false, arg);
894 if (err)
895 goto out;
896 }
69bcb019
NK
897 }
898
899out:
900 err2 = iter->ops->finish_entry(iter, al);
901 if (!err)
902 err = err2;
903
904 return err;
905}
906
3d1d07ec
JK
907int64_t
908hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
909{
093f0ef3 910 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
911 int64_t cmp = 0;
912
093f0ef3 913 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
914 if (perf_hpp__should_skip(fmt))
915 continue;
916
87bbdf76 917 cmp = fmt->cmp(fmt, left, right);
3d1d07ec
JK
918 if (cmp)
919 break;
920 }
921
922 return cmp;
923}
924
925int64_t
926hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
927{
093f0ef3 928 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
929 int64_t cmp = 0;
930
093f0ef3 931 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
932 if (perf_hpp__should_skip(fmt))
933 continue;
934
87bbdf76 935 cmp = fmt->collapse(fmt, left, right);
3d1d07ec
JK
936 if (cmp)
937 break;
938 }
939
940 return cmp;
941}
942
6733d1bf 943void hist_entry__delete(struct hist_entry *he)
3d1d07ec 944{
f3b623b8 945 thread__zput(he->thread);
74cf249d
ACM
946 zfree(&he->branch_info);
947 zfree(&he->mem_info);
f8be1c8c 948 zfree(&he->stat_acc);
f048d548 949 free_srcline(he->srcline);
d114960c 950 free_callchain(he->callchain);
3d1d07ec
JK
951 free(he);
952}
953
954/*
955 * collapse the histogram
956 */
957
1d037ca1 958static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1b3a0e95
FW
959 struct rb_root *root,
960 struct hist_entry *he)
3d1d07ec 961{
b9bf0892 962 struct rb_node **p = &root->rb_node;
3d1d07ec
JK
963 struct rb_node *parent = NULL;
964 struct hist_entry *iter;
965 int64_t cmp;
966
967 while (*p != NULL) {
968 parent = *p;
1980c2eb 969 iter = rb_entry(parent, struct hist_entry, rb_node_in);
3d1d07ec
JK
970
971 cmp = hist_entry__collapse(iter, he);
972
973 if (!cmp) {
139c0815 974 he_stat__add_stat(&iter->stat, &he->stat);
f8be1c8c
NK
975 if (symbol_conf.cumulate_callchain)
976 he_stat__add_stat(iter->stat_acc, he->stat_acc);
9ec60972 977
1b3a0e95 978 if (symbol_conf.use_callchain) {
47260645
NK
979 callchain_cursor_reset(&callchain_cursor);
980 callchain_merge(&callchain_cursor,
981 iter->callchain,
1b3a0e95
FW
982 he->callchain);
983 }
6733d1bf 984 hist_entry__delete(he);
fefb0b94 985 return false;
3d1d07ec
JK
986 }
987
988 if (cmp < 0)
989 p = &(*p)->rb_left;
990 else
991 p = &(*p)->rb_right;
992 }
740b97f9 993 hists->nr_entries++;
3d1d07ec 994
1980c2eb
ACM
995 rb_link_node(&he->rb_node_in, parent, p);
996 rb_insert_color(&he->rb_node_in, root);
fefb0b94 997 return true;
3d1d07ec
JK
998}
999
1980c2eb 1000static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
3d1d07ec 1001{
1980c2eb
ACM
1002 struct rb_root *root;
1003
1004 pthread_mutex_lock(&hists->lock);
1005
1006 root = hists->entries_in;
1007 if (++hists->entries_in > &hists->entries_in_array[1])
1008 hists->entries_in = &hists->entries_in_array[0];
1009
1010 pthread_mutex_unlock(&hists->lock);
1011
1012 return root;
1013}
1014
90cf1fb5
ACM
1015static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1016{
1017 hists__filter_entry_by_dso(hists, he);
1018 hists__filter_entry_by_thread(hists, he);
e94d53eb 1019 hists__filter_entry_by_symbol(hists, he);
90cf1fb5
ACM
1020}
1021
c1fb5651 1022void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1980c2eb
ACM
1023{
1024 struct rb_root *root;
3d1d07ec
JK
1025 struct rb_node *next;
1026 struct hist_entry *n;
1027
3a5714f8 1028 if (!sort__need_collapse)
3d1d07ec
JK
1029 return;
1030
740b97f9
NK
1031 hists->nr_entries = 0;
1032
1980c2eb 1033 root = hists__get_rotate_entries_in(hists);
740b97f9 1034
1980c2eb 1035 next = rb_first(root);
b9bf0892 1036
3d1d07ec 1037 while (next) {
33e940a2
ACM
1038 if (session_done())
1039 break;
1980c2eb
ACM
1040 n = rb_entry(next, struct hist_entry, rb_node_in);
1041 next = rb_next(&n->rb_node_in);
3d1d07ec 1042
1980c2eb 1043 rb_erase(&n->rb_node_in, root);
90cf1fb5
ACM
1044 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1045 /*
1046 * If it wasn't combined with one of the entries already
1047 * collapsed, we need to apply the filters that may have
1048 * been set by, say, the hist_browser.
1049 */
1050 hists__apply_filters(hists, n);
90cf1fb5 1051 }
c1fb5651
NK
1052 if (prog)
1053 ui_progress__update(prog, 1);
3d1d07ec 1054 }
1980c2eb 1055}
b9bf0892 1056
043ca389 1057static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
29d720ed 1058{
043ca389
NK
1059 struct perf_hpp_fmt *fmt;
1060 int64_t cmp = 0;
29d720ed 1061
26d8b338 1062 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
1063 if (perf_hpp__should_skip(fmt))
1064 continue;
1065
87bbdf76 1066 cmp = fmt->sort(fmt, a, b);
043ca389 1067 if (cmp)
29d720ed
NK
1068 break;
1069 }
1070
043ca389 1071 return cmp;
29d720ed
NK
1072}
1073
9283ba9b
NK
1074static void hists__reset_filter_stats(struct hists *hists)
1075{
1076 hists->nr_non_filtered_entries = 0;
1077 hists->stats.total_non_filtered_period = 0;
1078}
1079
1080void hists__reset_stats(struct hists *hists)
1081{
1082 hists->nr_entries = 0;
1083 hists->stats.total_period = 0;
1084
1085 hists__reset_filter_stats(hists);
1086}
1087
1088static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1089{
1090 hists->nr_non_filtered_entries++;
1091 hists->stats.total_non_filtered_period += h->stat.period;
1092}
1093
1094void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1095{
1096 if (!h->filtered)
1097 hists__inc_filter_stats(hists, h);
1098
1099 hists->nr_entries++;
1100 hists->stats.total_period += h->stat.period;
1101}
1102
1c02c4d2
ACM
1103static void __hists__insert_output_entry(struct rb_root *entries,
1104 struct hist_entry *he,
1105 u64 min_callchain_hits)
3d1d07ec 1106{
1c02c4d2 1107 struct rb_node **p = &entries->rb_node;
3d1d07ec
JK
1108 struct rb_node *parent = NULL;
1109 struct hist_entry *iter;
1110
d599db3f 1111 if (symbol_conf.use_callchain)
b9fb9304 1112 callchain_param.sort(&he->sorted_chain, he->callchain,
3d1d07ec
JK
1113 min_callchain_hits, &callchain_param);
1114
1115 while (*p != NULL) {
1116 parent = *p;
1117 iter = rb_entry(parent, struct hist_entry, rb_node);
1118
043ca389 1119 if (hist_entry__sort(he, iter) > 0)
3d1d07ec
JK
1120 p = &(*p)->rb_left;
1121 else
1122 p = &(*p)->rb_right;
1123 }
1124
1125 rb_link_node(&he->rb_node, parent, p);
1c02c4d2 1126 rb_insert_color(&he->rb_node, entries);
3d1d07ec
JK
1127}
1128
740b97f9 1129void hists__output_resort(struct hists *hists, struct ui_progress *prog)
3d1d07ec 1130{
1980c2eb 1131 struct rb_root *root;
3d1d07ec
JK
1132 struct rb_node *next;
1133 struct hist_entry *n;
3d1d07ec
JK
1134 u64 min_callchain_hits;
1135
42b28ac0 1136 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
3d1d07ec 1137
3a5714f8 1138 if (sort__need_collapse)
1980c2eb
ACM
1139 root = &hists->entries_collapsed;
1140 else
1141 root = hists->entries_in;
1142
1143 next = rb_first(root);
1144 hists->entries = RB_ROOT;
3d1d07ec 1145
9283ba9b 1146 hists__reset_stats(hists);
42b28ac0 1147 hists__reset_col_len(hists);
fefb0b94 1148
3d1d07ec 1149 while (next) {
1980c2eb
ACM
1150 n = rb_entry(next, struct hist_entry, rb_node_in);
1151 next = rb_next(&n->rb_node_in);
3d1d07ec 1152
1980c2eb 1153 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
6263835a 1154 hists__inc_stats(hists, n);
ae993efc
NK
1155
1156 if (!n->filtered)
1157 hists__calc_col_len(hists, n);
740b97f9
NK
1158
1159 if (prog)
1160 ui_progress__update(prog, 1);
3d1d07ec 1161 }
1980c2eb 1162}
b9bf0892 1163
42b28ac0 1164static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
cc5edb0e
ACM
1165 enum hist_filter filter)
1166{
1167 h->filtered &= ~(1 << filter);
1168 if (h->filtered)
1169 return;
1170
87e90f43
NK
1171 /* force fold unfiltered entry for simplicity */
1172 h->ms.unfolded = false;
0f0cbf7a 1173 h->row_offset = 0;
9283ba9b 1174
1ab1fa5d 1175 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
cc5edb0e 1176
9283ba9b 1177 hists__inc_filter_stats(hists, h);
42b28ac0 1178 hists__calc_col_len(hists, h);
cc5edb0e
ACM
1179}
1180
90cf1fb5
ACM
1181
1182static bool hists__filter_entry_by_dso(struct hists *hists,
1183 struct hist_entry *he)
1184{
1185 if (hists->dso_filter != NULL &&
1186 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1187 he->filtered |= (1 << HIST_FILTER__DSO);
1188 return true;
1189 }
1190
1191 return false;
1192}
1193
d7b76f09 1194void hists__filter_by_dso(struct hists *hists)
b09e0190
ACM
1195{
1196 struct rb_node *nd;
1197
1ab1fa5d 1198 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1199
1200 hists__reset_filter_stats(hists);
42b28ac0 1201 hists__reset_col_len(hists);
b09e0190 1202
42b28ac0 1203 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1204 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1205
1206 if (symbol_conf.exclude_other && !h->parent)
1207 continue;
1208
90cf1fb5 1209 if (hists__filter_entry_by_dso(hists, h))
b09e0190 1210 continue;
b09e0190 1211
42b28ac0 1212 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
b09e0190
ACM
1213 }
1214}
1215
90cf1fb5
ACM
1216static bool hists__filter_entry_by_thread(struct hists *hists,
1217 struct hist_entry *he)
1218{
1219 if (hists->thread_filter != NULL &&
1220 he->thread != hists->thread_filter) {
1221 he->filtered |= (1 << HIST_FILTER__THREAD);
1222 return true;
1223 }
1224
1225 return false;
1226}
1227
d7b76f09 1228void hists__filter_by_thread(struct hists *hists)
b09e0190
ACM
1229{
1230 struct rb_node *nd;
1231
1ab1fa5d 1232 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1233
1234 hists__reset_filter_stats(hists);
42b28ac0 1235 hists__reset_col_len(hists);
b09e0190 1236
42b28ac0 1237 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1238 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1239
90cf1fb5 1240 if (hists__filter_entry_by_thread(hists, h))
b09e0190 1241 continue;
cc5edb0e 1242
42b28ac0 1243 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
b09e0190
ACM
1244 }
1245}
ef7b93a1 1246
e94d53eb
NK
1247static bool hists__filter_entry_by_symbol(struct hists *hists,
1248 struct hist_entry *he)
1249{
1250 if (hists->symbol_filter_str != NULL &&
1251 (!he->ms.sym || strstr(he->ms.sym->name,
1252 hists->symbol_filter_str) == NULL)) {
1253 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1254 return true;
1255 }
1256
1257 return false;
1258}
1259
1260void hists__filter_by_symbol(struct hists *hists)
1261{
1262 struct rb_node *nd;
1263
1ab1fa5d 1264 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1265
1266 hists__reset_filter_stats(hists);
e94d53eb
NK
1267 hists__reset_col_len(hists);
1268
1269 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1270 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1271
1272 if (hists__filter_entry_by_symbol(hists, h))
1273 continue;
1274
1275 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1276 }
1277}
1278
28a6b6aa
ACM
1279void events_stats__inc(struct events_stats *stats, u32 type)
1280{
1281 ++stats->nr_events[0];
1282 ++stats->nr_events[type];
1283}
1284
42b28ac0 1285void hists__inc_nr_events(struct hists *hists, u32 type)
c8446b9b 1286{
28a6b6aa 1287 events_stats__inc(&hists->stats, type);
c8446b9b 1288}
95529be4 1289
1844dbcb
NK
1290void hists__inc_nr_samples(struct hists *hists, bool filtered)
1291{
1292 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1293 if (!filtered)
1294 hists->stats.nr_non_filtered_samples++;
1295}
1296
494d70a1
ACM
1297static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1298 struct hist_entry *pair)
1299{
ce74f60e
NK
1300 struct rb_root *root;
1301 struct rb_node **p;
494d70a1
ACM
1302 struct rb_node *parent = NULL;
1303 struct hist_entry *he;
354cc40e 1304 int64_t cmp;
494d70a1 1305
ce74f60e
NK
1306 if (sort__need_collapse)
1307 root = &hists->entries_collapsed;
1308 else
1309 root = hists->entries_in;
1310
1311 p = &root->rb_node;
1312
494d70a1
ACM
1313 while (*p != NULL) {
1314 parent = *p;
ce74f60e 1315 he = rb_entry(parent, struct hist_entry, rb_node_in);
494d70a1 1316
ce74f60e 1317 cmp = hist_entry__collapse(he, pair);
494d70a1
ACM
1318
1319 if (!cmp)
1320 goto out;
1321
1322 if (cmp < 0)
1323 p = &(*p)->rb_left;
1324 else
1325 p = &(*p)->rb_right;
1326 }
1327
a0b51af3 1328 he = hist_entry__new(pair, true);
494d70a1 1329 if (he) {
30193d78
ACM
1330 memset(&he->stat, 0, sizeof(he->stat));
1331 he->hists = hists;
ce74f60e
NK
1332 rb_link_node(&he->rb_node_in, parent, p);
1333 rb_insert_color(&he->rb_node_in, root);
6263835a 1334 hists__inc_stats(hists, he);
e0af43d2 1335 he->dummy = true;
494d70a1
ACM
1336 }
1337out:
1338 return he;
1339}
1340
95529be4
ACM
1341static struct hist_entry *hists__find_entry(struct hists *hists,
1342 struct hist_entry *he)
1343{
ce74f60e
NK
1344 struct rb_node *n;
1345
1346 if (sort__need_collapse)
1347 n = hists->entries_collapsed.rb_node;
1348 else
1349 n = hists->entries_in->rb_node;
95529be4
ACM
1350
1351 while (n) {
ce74f60e
NK
1352 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1353 int64_t cmp = hist_entry__collapse(iter, he);
95529be4
ACM
1354
1355 if (cmp < 0)
1356 n = n->rb_left;
1357 else if (cmp > 0)
1358 n = n->rb_right;
1359 else
1360 return iter;
1361 }
1362
1363 return NULL;
1364}
1365
1366/*
1367 * Look for pairs to link to the leader buckets (hist_entries):
1368 */
1369void hists__match(struct hists *leader, struct hists *other)
1370{
ce74f60e 1371 struct rb_root *root;
95529be4
ACM
1372 struct rb_node *nd;
1373 struct hist_entry *pos, *pair;
1374
ce74f60e
NK
1375 if (sort__need_collapse)
1376 root = &leader->entries_collapsed;
1377 else
1378 root = leader->entries_in;
1379
1380 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1381 pos = rb_entry(nd, struct hist_entry, rb_node_in);
95529be4
ACM
1382 pair = hists__find_entry(other, pos);
1383
1384 if (pair)
5fa9041b 1385 hist_entry__add_pair(pair, pos);
95529be4
ACM
1386 }
1387}
494d70a1
ACM
1388
1389/*
1390 * Look for entries in the other hists that are not present in the leader, if
1391 * we find them, just add a dummy entry on the leader hists, with period=0,
1392 * nr_events=0, to serve as the list header.
1393 */
1394int hists__link(struct hists *leader, struct hists *other)
1395{
ce74f60e 1396 struct rb_root *root;
494d70a1
ACM
1397 struct rb_node *nd;
1398 struct hist_entry *pos, *pair;
1399
ce74f60e
NK
1400 if (sort__need_collapse)
1401 root = &other->entries_collapsed;
1402 else
1403 root = other->entries_in;
1404
1405 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1406 pos = rb_entry(nd, struct hist_entry, rb_node_in);
494d70a1
ACM
1407
1408 if (!hist_entry__has_pairs(pos)) {
1409 pair = hists__add_dummy_entry(leader, pos);
1410 if (pair == NULL)
1411 return -1;
5fa9041b 1412 hist_entry__add_pair(pos, pair);
494d70a1
ACM
1413 }
1414 }
1415
1416 return 0;
1417}
f2148330 1418
2a1731fb
ACM
1419
1420size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1421{
1422 struct perf_evsel *pos;
1423 size_t ret = 0;
1424
1425 evlist__for_each(evlist, pos) {
1426 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1427 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1428 }
1429
1430 return ret;
1431}
1432
1433
f2148330
NK
1434u64 hists__total_period(struct hists *hists)
1435{
1436 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1437 hists->stats.total_period;
1438}
33db4568
NK
1439
1440int parse_filter_percentage(const struct option *opt __maybe_unused,
1441 const char *arg, int unset __maybe_unused)
1442{
1443 if (!strcmp(arg, "relative"))
1444 symbol_conf.filter_relative = true;
1445 else if (!strcmp(arg, "absolute"))
1446 symbol_conf.filter_relative = false;
1447 else
1448 return -1;
1449
1450 return 0;
1451}
0b93da17
NK
1452
1453int perf_hist_config(const char *var, const char *value)
1454{
1455 if (!strcmp(var, "hist.percentage"))
1456 return parse_filter_percentage(NULL, value, 0);
1457
1458 return 0;
1459}
a635fc51
ACM
1460
1461static int hists_evsel__init(struct perf_evsel *evsel)
1462{
1463 struct hists *hists = evsel__hists(evsel);
1464
1465 memset(hists, 0, sizeof(*hists));
1466 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1467 hists->entries_in = &hists->entries_in_array[0];
1468 hists->entries_collapsed = RB_ROOT;
1469 hists->entries = RB_ROOT;
1470 pthread_mutex_init(&hists->lock, NULL);
1471 return 0;
1472}
1473
1474/*
1475 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1476 * stored in the rbtree...
1477 */
1478
1479int hists__init(void)
1480{
1481 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1482 hists_evsel__init, NULL);
1483 if (err)
1484 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1485
1486 return err;
1487}