]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/hist.c
perf report: Make max_stack value allow for synthesized callchains
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
CommitLineData
8a0ecfb8 1#include "util.h"
598357eb 2#include "build-id.h"
3d1d07ec 3#include "hist.h"
4e4f06e4
ACM
4#include "session.h"
5#include "sort.h"
2a1731fb 6#include "evlist.h"
29d720ed 7#include "evsel.h"
69bcb019 8#include "annotate.h"
740b97f9 9#include "ui/progress.h"
9b33827d 10#include <math.h>
3d1d07ec 11
90cf1fb5
ACM
12static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
e94d53eb
NK
16static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
21394d94
KL
18static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
90cf1fb5 20
42b28ac0 21u16 hists__col_len(struct hists *hists, enum hist_column col)
8a6c5b26 22{
42b28ac0 23 return hists->col_len[col];
8a6c5b26
ACM
24}
25
42b28ac0 26void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 27{
42b28ac0 28 hists->col_len[col] = len;
8a6c5b26
ACM
29}
30
42b28ac0 31bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 32{
42b28ac0
ACM
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
8a6c5b26
ACM
35 return true;
36 }
37 return false;
38}
39
7ccf4f90 40void hists__reset_col_len(struct hists *hists)
8a6c5b26
ACM
41{
42 enum hist_column col;
43
44 for (col = 0; col < HISTC_NR_COLS; ++col)
42b28ac0 45 hists__set_col_len(hists, col, 0);
8a6c5b26
ACM
46}
47
b5387528
RAV
48static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49{
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
56}
57
7ccf4f90 58void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
8a6c5b26 59{
b5387528 60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
98a3b32c 61 int symlen;
8a6c5b26
ACM
62 u16 len;
63
ded19d57
NK
64 /*
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
68 */
69 if (h->ms.sym) {
70 symlen = h->ms.sym->namelen + 4;
71 if (verbose)
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74 } else {
98a3b32c
SE
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
b5387528 77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
98a3b32c 78 }
8a6c5b26
ACM
79
80 len = thread__comm_len(h->thread);
42b28ac0
ACM
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
8a6c5b26
ACM
83
84 if (h->ms.map) {
85 len = dso__name_len(h->ms.map->dso);
42b28ac0 86 hists__new_col_len(hists, HISTC_DSO, len);
8a6c5b26 87 }
b5387528 88
cb993744
NK
89 if (h->parent)
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
b5387528 92 if (h->branch_info) {
b5387528
RAV
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
ded19d57
NK
95 if (verbose)
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101 } else {
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 }
106
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
ded19d57
NK
109 if (verbose)
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115 } else {
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 }
120 }
98a3b32c
SE
121
122 if (h->mem_info) {
98a3b32c
SE
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127 symlen);
9b32ba71
DZ
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen + 1);
98a3b32c
SE
130 } else {
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 symlen);
134 }
135 if (h->mem_info->daddr.map) {
136 symlen = dso__name_len(h->mem_info->daddr.map->dso);
137 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
138 symlen);
139 } else {
140 symlen = unresolved_col_width + 4 + 2;
141 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
142 }
143 } else {
144 symlen = unresolved_col_width + 4 + 2;
145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
146 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 }
148
a4978eca 149 hists__new_col_len(hists, HISTC_CPU, 3);
2e7ea3ab 150 hists__new_col_len(hists, HISTC_SOCKET, 6);
98a3b32c
SE
151 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
152 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
153 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
154 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
155 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
156 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
475eeab9 157
e8e6d37e
ACM
158 if (h->srcline)
159 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
160
31191a85
AK
161 if (h->srcfile)
162 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
163
475eeab9
AK
164 if (h->transaction)
165 hists__new_col_len(hists, HISTC_TRANSACTION,
166 hist_entry__transaction_len());
8a6c5b26
ACM
167}
168
7ccf4f90
NK
169void hists__output_recalc_col_len(struct hists *hists, int max_rows)
170{
171 struct rb_node *next = rb_first(&hists->entries);
172 struct hist_entry *n;
173 int row = 0;
174
175 hists__reset_col_len(hists);
176
177 while (next && row++ < max_rows) {
178 n = rb_entry(next, struct hist_entry, rb_node);
179 if (!n->filtered)
180 hists__calc_col_len(hists, n);
181 next = rb_next(&n->rb_node);
182 }
183}
184
f39056f9
NK
185static void he_stat__add_cpumode_period(struct he_stat *he_stat,
186 unsigned int cpumode, u64 period)
a1645ce1 187{
28e2a106 188 switch (cpumode) {
a1645ce1 189 case PERF_RECORD_MISC_KERNEL:
f39056f9 190 he_stat->period_sys += period;
a1645ce1
ZY
191 break;
192 case PERF_RECORD_MISC_USER:
f39056f9 193 he_stat->period_us += period;
a1645ce1
ZY
194 break;
195 case PERF_RECORD_MISC_GUEST_KERNEL:
f39056f9 196 he_stat->period_guest_sys += period;
a1645ce1
ZY
197 break;
198 case PERF_RECORD_MISC_GUEST_USER:
f39056f9 199 he_stat->period_guest_us += period;
a1645ce1
ZY
200 break;
201 default:
202 break;
203 }
204}
205
05484298
AK
206static void he_stat__add_period(struct he_stat *he_stat, u64 period,
207 u64 weight)
139c0815 208{
98a3b32c 209
139c0815 210 he_stat->period += period;
05484298 211 he_stat->weight += weight;
139c0815
NK
212 he_stat->nr_events += 1;
213}
214
215static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
216{
217 dest->period += src->period;
218 dest->period_sys += src->period_sys;
219 dest->period_us += src->period_us;
220 dest->period_guest_sys += src->period_guest_sys;
221 dest->period_guest_us += src->period_guest_us;
222 dest->nr_events += src->nr_events;
05484298 223 dest->weight += src->weight;
139c0815
NK
224}
225
f39056f9 226static void he_stat__decay(struct he_stat *he_stat)
ab81f3fd 227{
f39056f9
NK
228 he_stat->period = (he_stat->period * 7) / 8;
229 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
05484298 230 /* XXX need decay for weight too? */
ab81f3fd
ACM
231}
232
233static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
234{
b24c28f7 235 u64 prev_period = he->stat.period;
3186b681 236 u64 diff;
c64550cf
ACM
237
238 if (prev_period == 0)
df71d95f 239 return true;
c64550cf 240
f39056f9 241 he_stat__decay(&he->stat);
f8be1c8c
NK
242 if (symbol_conf.cumulate_callchain)
243 he_stat__decay(he->stat_acc);
c64550cf 244
3186b681
NK
245 diff = prev_period - he->stat.period;
246
247 hists->stats.total_period -= diff;
c64550cf 248 if (!he->filtered)
3186b681 249 hists->stats.total_non_filtered_period -= diff;
c64550cf 250
b24c28f7 251 return he->stat.period == 0;
ab81f3fd
ACM
252}
253
956b65e1
ACM
254static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
255{
256 rb_erase(&he->rb_node, &hists->entries);
257
258 if (sort__need_collapse)
259 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
260
261 --hists->nr_entries;
262 if (!he->filtered)
263 --hists->nr_non_filtered_entries;
264
265 hist_entry__delete(he);
266}
267
3a5714f8 268void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
ab81f3fd
ACM
269{
270 struct rb_node *next = rb_first(&hists->entries);
271 struct hist_entry *n;
272
273 while (next) {
274 n = rb_entry(next, struct hist_entry, rb_node);
275 next = rb_next(&n->rb_node);
b079d4e9
ACM
276 if (((zap_user && n->level == '.') ||
277 (zap_kernel && n->level != '.') ||
4c47f4fc 278 hists__decay_entry(hists, n))) {
956b65e1 279 hists__delete_entry(hists, n);
ab81f3fd
ACM
280 }
281 }
282}
283
701937bd
NK
284void hists__delete_entries(struct hists *hists)
285{
286 struct rb_node *next = rb_first(&hists->entries);
287 struct hist_entry *n;
288
289 while (next) {
290 n = rb_entry(next, struct hist_entry, rb_node);
291 next = rb_next(&n->rb_node);
292
956b65e1 293 hists__delete_entry(hists, n);
701937bd
NK
294 }
295}
296
3d1d07ec 297/*
c82ee828 298 * histogram, sorted on item, collects periods
3d1d07ec
JK
299 */
300
a0b51af3
NK
301static struct hist_entry *hist_entry__new(struct hist_entry *template,
302 bool sample_self)
28e2a106 303{
f8be1c8c
NK
304 size_t callchain_size = 0;
305 struct hist_entry *he;
306
82aa019e 307 if (symbol_conf.use_callchain)
f8be1c8c
NK
308 callchain_size = sizeof(struct callchain_root);
309
310 he = zalloc(sizeof(*he) + callchain_size);
28e2a106 311
12c14278
ACM
312 if (he != NULL) {
313 *he = *template;
c4b35351 314
f8be1c8c
NK
315 if (symbol_conf.cumulate_callchain) {
316 he->stat_acc = malloc(sizeof(he->stat));
317 if (he->stat_acc == NULL) {
318 free(he);
319 return NULL;
320 }
321 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
a0b51af3
NK
322 if (!sample_self)
323 memset(&he->stat, 0, sizeof(he->stat));
f8be1c8c
NK
324 }
325
5c24b67a 326 map__get(he->ms.map);
3cf0cb1f
SE
327
328 if (he->branch_info) {
26353a61
NK
329 /*
330 * This branch info is (a part of) allocated from
644f2df2 331 * sample__resolve_bstack() and will be freed after
26353a61
NK
332 * adding new entries. So we need to save a copy.
333 */
334 he->branch_info = malloc(sizeof(*he->branch_info));
335 if (he->branch_info == NULL) {
5c24b67a 336 map__zput(he->ms.map);
f8be1c8c 337 free(he->stat_acc);
26353a61
NK
338 free(he);
339 return NULL;
340 }
341
342 memcpy(he->branch_info, template->branch_info,
343 sizeof(*he->branch_info));
344
5c24b67a
ACM
345 map__get(he->branch_info->from.map);
346 map__get(he->branch_info->to.map);
3cf0cb1f
SE
347 }
348
98a3b32c 349 if (he->mem_info) {
5c24b67a
ACM
350 map__get(he->mem_info->iaddr.map);
351 map__get(he->mem_info->daddr.map);
98a3b32c
SE
352 }
353
28e2a106 354 if (symbol_conf.use_callchain)
12c14278 355 callchain_init(he->callchain);
b821c732
ACM
356
357 INIT_LIST_HEAD(&he->pairs.node);
f3b623b8 358 thread__get(he->thread);
28e2a106
ACM
359 }
360
12c14278 361 return he;
28e2a106
ACM
362}
363
7a007ca9
ACM
364static u8 symbol__parent_filter(const struct symbol *parent)
365{
366 if (symbol_conf.exclude_other && parent == NULL)
367 return 1 << HIST_FILTER__PARENT;
368 return 0;
369}
370
e7e0efcd
ACM
371static struct hist_entry *hists__findnew_entry(struct hists *hists,
372 struct hist_entry *entry,
373 struct addr_location *al,
374 bool sample_self)
9735abf1 375{
1980c2eb 376 struct rb_node **p;
9735abf1
ACM
377 struct rb_node *parent = NULL;
378 struct hist_entry *he;
354cc40e 379 int64_t cmp;
f1cbf78d
NK
380 u64 period = entry->stat.period;
381 u64 weight = entry->stat.weight;
9735abf1 382
1980c2eb
ACM
383 p = &hists->entries_in->rb_node;
384
9735abf1
ACM
385 while (*p != NULL) {
386 parent = *p;
1980c2eb 387 he = rb_entry(parent, struct hist_entry, rb_node_in);
9735abf1 388
9afcf930
NK
389 /*
390 * Make sure that it receives arguments in a same order as
391 * hist_entry__collapse() so that we can use an appropriate
392 * function when searching an entry regardless which sort
393 * keys were used.
394 */
395 cmp = hist_entry__cmp(he, entry);
9735abf1
ACM
396
397 if (!cmp) {
a0b51af3
NK
398 if (sample_self)
399 he_stat__add_period(&he->stat, period, weight);
f8be1c8c
NK
400 if (symbol_conf.cumulate_callchain)
401 he_stat__add_period(he->stat_acc, period, weight);
63fa471d 402
ceb2acbc 403 /*
e80faac0 404 * This mem info was allocated from sample__resolve_mem
ceb2acbc
NK
405 * and will not be used anymore.
406 */
74cf249d 407 zfree(&entry->mem_info);
ceb2acbc 408
63fa471d
DM
409 /* If the map of an existing hist_entry has
410 * become out-of-date due to an exec() or
411 * similar, update it. Otherwise we will
412 * mis-adjust symbol addresses when computing
413 * the history counter to increment.
414 */
415 if (he->ms.map != entry->ms.map) {
5c24b67a
ACM
416 map__put(he->ms.map);
417 he->ms.map = map__get(entry->ms.map);
63fa471d 418 }
28e2a106 419 goto out;
9735abf1
ACM
420 }
421
422 if (cmp < 0)
423 p = &(*p)->rb_left;
424 else
425 p = &(*p)->rb_right;
426 }
427
a0b51af3 428 he = hist_entry__new(entry, sample_self);
9735abf1 429 if (!he)
27a0dcb7 430 return NULL;
1980c2eb 431
590cd344
NK
432 hists->nr_entries++;
433
1980c2eb
ACM
434 rb_link_node(&he->rb_node_in, parent, p);
435 rb_insert_color(&he->rb_node_in, hists->entries_in);
28e2a106 436out:
a0b51af3
NK
437 if (sample_self)
438 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
f8be1c8c
NK
439 if (symbol_conf.cumulate_callchain)
440 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
9735abf1
ACM
441 return he;
442}
443
c824c433 444struct hist_entry *__hists__add_entry(struct hists *hists,
b5387528 445 struct addr_location *al,
41a4e6e2
NK
446 struct symbol *sym_parent,
447 struct branch_info *bi,
448 struct mem_info *mi,
a0b51af3
NK
449 u64 period, u64 weight, u64 transaction,
450 bool sample_self)
b5387528
RAV
451{
452 struct hist_entry entry = {
453 .thread = al->thread,
4dfced35 454 .comm = thread__comm(al->thread),
b5387528
RAV
455 .ms = {
456 .map = al->map,
457 .sym = al->sym,
458 },
0c4c4deb 459 .socket = al->socket,
7365be55
DZ
460 .cpu = al->cpu,
461 .cpumode = al->cpumode,
462 .ip = al->addr,
463 .level = al->level,
b24c28f7 464 .stat = {
c4b35351 465 .nr_events = 1,
41a4e6e2 466 .period = period,
05484298 467 .weight = weight,
b24c28f7 468 },
b5387528 469 .parent = sym_parent,
2c86c7ca 470 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
c824c433 471 .hists = hists,
41a4e6e2
NK
472 .branch_info = bi,
473 .mem_info = mi,
475eeab9 474 .transaction = transaction,
b5387528
RAV
475 };
476
e7e0efcd 477 return hists__findnew_entry(hists, &entry, al, sample_self);
b5387528
RAV
478}
479
69bcb019
NK
480static int
481iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
482 struct addr_location *al __maybe_unused)
483{
484 return 0;
485}
486
487static int
488iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
489 struct addr_location *al __maybe_unused)
490{
491 return 0;
492}
493
494static int
495iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
496{
497 struct perf_sample *sample = iter->sample;
498 struct mem_info *mi;
499
500 mi = sample__resolve_mem(sample, al);
501 if (mi == NULL)
502 return -ENOMEM;
503
504 iter->priv = mi;
505 return 0;
506}
507
508static int
509iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
510{
511 u64 cost;
512 struct mem_info *mi = iter->priv;
4ea062ed 513 struct hists *hists = evsel__hists(iter->evsel);
69bcb019
NK
514 struct hist_entry *he;
515
516 if (mi == NULL)
517 return -EINVAL;
518
519 cost = iter->sample->weight;
520 if (!cost)
521 cost = 1;
522
523 /*
524 * must pass period=weight in order to get the correct
525 * sorting from hists__collapse_resort() which is solely
526 * based on periods. We want sorting be done on nr_events * weight
527 * and this is indirectly achieved by passing period=weight here
528 * and the he_stat__add_period() function.
529 */
4ea062ed 530 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
a0b51af3 531 cost, cost, 0, true);
69bcb019
NK
532 if (!he)
533 return -ENOMEM;
534
535 iter->he = he;
536 return 0;
537}
538
539static int
9d3c02d7
NK
540iter_finish_mem_entry(struct hist_entry_iter *iter,
541 struct addr_location *al __maybe_unused)
69bcb019
NK
542{
543 struct perf_evsel *evsel = iter->evsel;
4ea062ed 544 struct hists *hists = evsel__hists(evsel);
69bcb019 545 struct hist_entry *he = iter->he;
69bcb019
NK
546 int err = -EINVAL;
547
548 if (he == NULL)
549 goto out;
550
4ea062ed 551 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
552
553 err = hist_entry__append_callchain(he, iter->sample);
554
555out:
556 /*
e7e0efcd
ACM
557 * We don't need to free iter->priv (mem_info) here since the mem info
558 * was either already freed in hists__findnew_entry() or passed to a
559 * new hist entry by hist_entry__new().
69bcb019
NK
560 */
561 iter->priv = NULL;
562
563 iter->he = NULL;
564 return err;
565}
566
567static int
568iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
569{
570 struct branch_info *bi;
571 struct perf_sample *sample = iter->sample;
572
573 bi = sample__resolve_bstack(sample, al);
574 if (!bi)
575 return -ENOMEM;
576
577 iter->curr = 0;
578 iter->total = sample->branch_stack->nr;
579
580 iter->priv = bi;
581 return 0;
582}
583
584static int
585iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
586 struct addr_location *al __maybe_unused)
587{
9d3c02d7
NK
588 /* to avoid calling callback function */
589 iter->he = NULL;
590
69bcb019
NK
591 return 0;
592}
593
594static int
595iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
596{
597 struct branch_info *bi = iter->priv;
598 int i = iter->curr;
599
600 if (bi == NULL)
601 return 0;
602
603 if (iter->curr >= iter->total)
604 return 0;
605
606 al->map = bi[i].to.map;
607 al->sym = bi[i].to.sym;
608 al->addr = bi[i].to.addr;
609 return 1;
610}
611
612static int
613iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
614{
9d3c02d7 615 struct branch_info *bi;
69bcb019 616 struct perf_evsel *evsel = iter->evsel;
4ea062ed 617 struct hists *hists = evsel__hists(evsel);
69bcb019
NK
618 struct hist_entry *he = NULL;
619 int i = iter->curr;
620 int err = 0;
621
622 bi = iter->priv;
623
624 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
625 goto out;
626
627 /*
628 * The report shows the percentage of total branches captured
629 * and not events sampled. Thus we use a pseudo period of 1.
630 */
4ea062ed 631 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
0e332f03
AK
632 1, bi->flags.cycles ? bi->flags.cycles : 1,
633 0, true);
69bcb019
NK
634 if (he == NULL)
635 return -ENOMEM;
636
4ea062ed 637 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
638
639out:
640 iter->he = he;
641 iter->curr++;
642 return err;
643}
644
645static int
646iter_finish_branch_entry(struct hist_entry_iter *iter,
647 struct addr_location *al __maybe_unused)
648{
649 zfree(&iter->priv);
650 iter->he = NULL;
651
652 return iter->curr >= iter->total ? 0 : -1;
653}
654
655static int
656iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
657 struct addr_location *al __maybe_unused)
658{
659 return 0;
660}
661
662static int
663iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
664{
665 struct perf_evsel *evsel = iter->evsel;
666 struct perf_sample *sample = iter->sample;
667 struct hist_entry *he;
668
4ea062ed 669 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
69bcb019 670 sample->period, sample->weight,
a0b51af3 671 sample->transaction, true);
69bcb019
NK
672 if (he == NULL)
673 return -ENOMEM;
674
675 iter->he = he;
676 return 0;
677}
678
679static int
9d3c02d7
NK
680iter_finish_normal_entry(struct hist_entry_iter *iter,
681 struct addr_location *al __maybe_unused)
69bcb019 682{
69bcb019
NK
683 struct hist_entry *he = iter->he;
684 struct perf_evsel *evsel = iter->evsel;
685 struct perf_sample *sample = iter->sample;
686
687 if (he == NULL)
688 return 0;
689
690 iter->he = NULL;
691
4ea062ed 692 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
69bcb019
NK
693
694 return hist_entry__append_callchain(he, sample);
695}
696
7a13aa28
NK
697static int
698iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
699 struct addr_location *al __maybe_unused)
700{
b4d3c8bd
NK
701 struct hist_entry **he_cache;
702
7a13aa28 703 callchain_cursor_commit(&callchain_cursor);
b4d3c8bd
NK
704
705 /*
706 * This is for detecting cycles or recursions so that they're
707 * cumulated only one time to prevent entries more than 100%
708 * overhead.
709 */
710 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
711 if (he_cache == NULL)
712 return -ENOMEM;
713
714 iter->priv = he_cache;
715 iter->curr = 0;
716
7a13aa28
NK
717 return 0;
718}
719
720static int
721iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
722 struct addr_location *al)
723{
724 struct perf_evsel *evsel = iter->evsel;
4ea062ed 725 struct hists *hists = evsel__hists(evsel);
7a13aa28 726 struct perf_sample *sample = iter->sample;
b4d3c8bd 727 struct hist_entry **he_cache = iter->priv;
7a13aa28
NK
728 struct hist_entry *he;
729 int err = 0;
730
4ea062ed 731 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
7a13aa28
NK
732 sample->period, sample->weight,
733 sample->transaction, true);
734 if (he == NULL)
735 return -ENOMEM;
736
737 iter->he = he;
b4d3c8bd 738 he_cache[iter->curr++] = he;
7a13aa28 739
82aa019e 740 hist_entry__append_callchain(he, sample);
be7f855a
NK
741
742 /*
743 * We need to re-initialize the cursor since callchain_append()
744 * advanced the cursor to the end.
745 */
746 callchain_cursor_commit(&callchain_cursor);
747
4ea062ed 748 hists__inc_nr_samples(hists, he->filtered);
7a13aa28
NK
749
750 return err;
751}
752
753static int
754iter_next_cumulative_entry(struct hist_entry_iter *iter,
755 struct addr_location *al)
756{
757 struct callchain_cursor_node *node;
758
759 node = callchain_cursor_current(&callchain_cursor);
760 if (node == NULL)
761 return 0;
762
c7405d85 763 return fill_callchain_info(al, node, iter->hide_unresolved);
7a13aa28
NK
764}
765
766static int
767iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
768 struct addr_location *al)
769{
770 struct perf_evsel *evsel = iter->evsel;
771 struct perf_sample *sample = iter->sample;
b4d3c8bd 772 struct hist_entry **he_cache = iter->priv;
7a13aa28 773 struct hist_entry *he;
b4d3c8bd 774 struct hist_entry he_tmp = {
5cef8976 775 .hists = evsel__hists(evsel),
b4d3c8bd
NK
776 .cpu = al->cpu,
777 .thread = al->thread,
778 .comm = thread__comm(al->thread),
779 .ip = al->addr,
780 .ms = {
781 .map = al->map,
782 .sym = al->sym,
783 },
784 .parent = iter->parent,
785 };
786 int i;
be7f855a
NK
787 struct callchain_cursor cursor;
788
789 callchain_cursor_snapshot(&cursor, &callchain_cursor);
790
791 callchain_cursor_advance(&callchain_cursor);
b4d3c8bd
NK
792
793 /*
794 * Check if there's duplicate entries in the callchain.
795 * It's possible that it has cycles or recursive calls.
796 */
797 for (i = 0; i < iter->curr; i++) {
9d3c02d7
NK
798 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
799 /* to avoid calling callback function */
800 iter->he = NULL;
b4d3c8bd 801 return 0;
9d3c02d7 802 }
b4d3c8bd 803 }
7a13aa28 804
4ea062ed 805 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
7a13aa28
NK
806 sample->period, sample->weight,
807 sample->transaction, false);
808 if (he == NULL)
809 return -ENOMEM;
810
811 iter->he = he;
b4d3c8bd 812 he_cache[iter->curr++] = he;
7a13aa28 813
82aa019e
NK
814 if (symbol_conf.use_callchain)
815 callchain_append(he->callchain, &cursor, sample->period);
7a13aa28
NK
816 return 0;
817}
818
819static int
820iter_finish_cumulative_entry(struct hist_entry_iter *iter,
821 struct addr_location *al __maybe_unused)
822{
b4d3c8bd 823 zfree(&iter->priv);
7a13aa28 824 iter->he = NULL;
b4d3c8bd 825
7a13aa28
NK
826 return 0;
827}
828
69bcb019
NK
829const struct hist_iter_ops hist_iter_mem = {
830 .prepare_entry = iter_prepare_mem_entry,
831 .add_single_entry = iter_add_single_mem_entry,
832 .next_entry = iter_next_nop_entry,
833 .add_next_entry = iter_add_next_nop_entry,
834 .finish_entry = iter_finish_mem_entry,
835};
836
837const struct hist_iter_ops hist_iter_branch = {
838 .prepare_entry = iter_prepare_branch_entry,
839 .add_single_entry = iter_add_single_branch_entry,
840 .next_entry = iter_next_branch_entry,
841 .add_next_entry = iter_add_next_branch_entry,
842 .finish_entry = iter_finish_branch_entry,
843};
844
845const struct hist_iter_ops hist_iter_normal = {
846 .prepare_entry = iter_prepare_normal_entry,
847 .add_single_entry = iter_add_single_normal_entry,
848 .next_entry = iter_next_nop_entry,
849 .add_next_entry = iter_add_next_nop_entry,
850 .finish_entry = iter_finish_normal_entry,
851};
852
7a13aa28
NK
853const struct hist_iter_ops hist_iter_cumulative = {
854 .prepare_entry = iter_prepare_cumulative_entry,
855 .add_single_entry = iter_add_single_cumulative_entry,
856 .next_entry = iter_next_cumulative_entry,
857 .add_next_entry = iter_add_next_cumulative_entry,
858 .finish_entry = iter_finish_cumulative_entry,
859};
860
69bcb019 861int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
9d3c02d7 862 int max_stack_depth, void *arg)
69bcb019
NK
863{
864 int err, err2;
865
063bd936
NK
866 err = sample__resolve_callchain(iter->sample, &iter->parent,
867 iter->evsel, al, max_stack_depth);
69bcb019
NK
868 if (err)
869 return err;
870
69bcb019
NK
871 err = iter->ops->prepare_entry(iter, al);
872 if (err)
873 goto out;
874
875 err = iter->ops->add_single_entry(iter, al);
876 if (err)
877 goto out;
878
9d3c02d7
NK
879 if (iter->he && iter->add_entry_cb) {
880 err = iter->add_entry_cb(iter, al, true, arg);
881 if (err)
882 goto out;
883 }
884
69bcb019
NK
885 while (iter->ops->next_entry(iter, al)) {
886 err = iter->ops->add_next_entry(iter, al);
887 if (err)
888 break;
9d3c02d7
NK
889
890 if (iter->he && iter->add_entry_cb) {
891 err = iter->add_entry_cb(iter, al, false, arg);
892 if (err)
893 goto out;
894 }
69bcb019
NK
895 }
896
897out:
898 err2 = iter->ops->finish_entry(iter, al);
899 if (!err)
900 err = err2;
901
902 return err;
903}
904
3d1d07ec
JK
905int64_t
906hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
907{
093f0ef3 908 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
909 int64_t cmp = 0;
910
093f0ef3 911 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
912 if (perf_hpp__should_skip(fmt))
913 continue;
914
87bbdf76 915 cmp = fmt->cmp(fmt, left, right);
3d1d07ec
JK
916 if (cmp)
917 break;
918 }
919
920 return cmp;
921}
922
923int64_t
924hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
925{
093f0ef3 926 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
927 int64_t cmp = 0;
928
093f0ef3 929 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
930 if (perf_hpp__should_skip(fmt))
931 continue;
932
87bbdf76 933 cmp = fmt->collapse(fmt, left, right);
3d1d07ec
JK
934 if (cmp)
935 break;
936 }
937
938 return cmp;
939}
940
6733d1bf 941void hist_entry__delete(struct hist_entry *he)
3d1d07ec 942{
f3b623b8 943 thread__zput(he->thread);
5c24b67a
ACM
944 map__zput(he->ms.map);
945
946 if (he->branch_info) {
947 map__zput(he->branch_info->from.map);
948 map__zput(he->branch_info->to.map);
949 zfree(&he->branch_info);
950 }
951
952 if (he->mem_info) {
953 map__zput(he->mem_info->iaddr.map);
954 map__zput(he->mem_info->daddr.map);
955 zfree(&he->mem_info);
956 }
957
f8be1c8c 958 zfree(&he->stat_acc);
f048d548 959 free_srcline(he->srcline);
31191a85
AK
960 if (he->srcfile && he->srcfile[0])
961 free(he->srcfile);
d114960c 962 free_callchain(he->callchain);
3d1d07ec
JK
963 free(he);
964}
965
966/*
967 * collapse the histogram
968 */
969
1d037ca1 970static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1b3a0e95
FW
971 struct rb_root *root,
972 struct hist_entry *he)
3d1d07ec 973{
b9bf0892 974 struct rb_node **p = &root->rb_node;
3d1d07ec
JK
975 struct rb_node *parent = NULL;
976 struct hist_entry *iter;
977 int64_t cmp;
978
979 while (*p != NULL) {
980 parent = *p;
1980c2eb 981 iter = rb_entry(parent, struct hist_entry, rb_node_in);
3d1d07ec
JK
982
983 cmp = hist_entry__collapse(iter, he);
984
985 if (!cmp) {
139c0815 986 he_stat__add_stat(&iter->stat, &he->stat);
f8be1c8c
NK
987 if (symbol_conf.cumulate_callchain)
988 he_stat__add_stat(iter->stat_acc, he->stat_acc);
9ec60972 989
1b3a0e95 990 if (symbol_conf.use_callchain) {
47260645
NK
991 callchain_cursor_reset(&callchain_cursor);
992 callchain_merge(&callchain_cursor,
993 iter->callchain,
1b3a0e95
FW
994 he->callchain);
995 }
6733d1bf 996 hist_entry__delete(he);
fefb0b94 997 return false;
3d1d07ec
JK
998 }
999
1000 if (cmp < 0)
1001 p = &(*p)->rb_left;
1002 else
1003 p = &(*p)->rb_right;
1004 }
740b97f9 1005 hists->nr_entries++;
3d1d07ec 1006
1980c2eb
ACM
1007 rb_link_node(&he->rb_node_in, parent, p);
1008 rb_insert_color(&he->rb_node_in, root);
fefb0b94 1009 return true;
3d1d07ec
JK
1010}
1011
1980c2eb 1012static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
3d1d07ec 1013{
1980c2eb
ACM
1014 struct rb_root *root;
1015
1016 pthread_mutex_lock(&hists->lock);
1017
1018 root = hists->entries_in;
1019 if (++hists->entries_in > &hists->entries_in_array[1])
1020 hists->entries_in = &hists->entries_in_array[0];
1021
1022 pthread_mutex_unlock(&hists->lock);
1023
1024 return root;
1025}
1026
90cf1fb5
ACM
1027static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1028{
1029 hists__filter_entry_by_dso(hists, he);
1030 hists__filter_entry_by_thread(hists, he);
e94d53eb 1031 hists__filter_entry_by_symbol(hists, he);
21394d94 1032 hists__filter_entry_by_socket(hists, he);
90cf1fb5
ACM
1033}
1034
c1fb5651 1035void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1980c2eb
ACM
1036{
1037 struct rb_root *root;
3d1d07ec
JK
1038 struct rb_node *next;
1039 struct hist_entry *n;
1040
3a5714f8 1041 if (!sort__need_collapse)
3d1d07ec
JK
1042 return;
1043
740b97f9
NK
1044 hists->nr_entries = 0;
1045
1980c2eb 1046 root = hists__get_rotate_entries_in(hists);
740b97f9 1047
1980c2eb 1048 next = rb_first(root);
b9bf0892 1049
3d1d07ec 1050 while (next) {
33e940a2
ACM
1051 if (session_done())
1052 break;
1980c2eb
ACM
1053 n = rb_entry(next, struct hist_entry, rb_node_in);
1054 next = rb_next(&n->rb_node_in);
3d1d07ec 1055
1980c2eb 1056 rb_erase(&n->rb_node_in, root);
90cf1fb5
ACM
1057 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1058 /*
1059 * If it wasn't combined with one of the entries already
1060 * collapsed, we need to apply the filters that may have
1061 * been set by, say, the hist_browser.
1062 */
1063 hists__apply_filters(hists, n);
90cf1fb5 1064 }
c1fb5651
NK
1065 if (prog)
1066 ui_progress__update(prog, 1);
3d1d07ec 1067 }
1980c2eb 1068}
b9bf0892 1069
043ca389 1070static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
29d720ed 1071{
043ca389
NK
1072 struct perf_hpp_fmt *fmt;
1073 int64_t cmp = 0;
29d720ed 1074
26d8b338 1075 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
1076 if (perf_hpp__should_skip(fmt))
1077 continue;
1078
87bbdf76 1079 cmp = fmt->sort(fmt, a, b);
043ca389 1080 if (cmp)
29d720ed
NK
1081 break;
1082 }
1083
043ca389 1084 return cmp;
29d720ed
NK
1085}
1086
9283ba9b
NK
1087static void hists__reset_filter_stats(struct hists *hists)
1088{
1089 hists->nr_non_filtered_entries = 0;
1090 hists->stats.total_non_filtered_period = 0;
1091}
1092
1093void hists__reset_stats(struct hists *hists)
1094{
1095 hists->nr_entries = 0;
1096 hists->stats.total_period = 0;
1097
1098 hists__reset_filter_stats(hists);
1099}
1100
1101static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1102{
1103 hists->nr_non_filtered_entries++;
1104 hists->stats.total_non_filtered_period += h->stat.period;
1105}
1106
1107void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1108{
1109 if (!h->filtered)
1110 hists__inc_filter_stats(hists, h);
1111
1112 hists->nr_entries++;
1113 hists->stats.total_period += h->stat.period;
1114}
1115
1c02c4d2
ACM
1116static void __hists__insert_output_entry(struct rb_root *entries,
1117 struct hist_entry *he,
f9db0d0f
KL
1118 u64 min_callchain_hits,
1119 bool use_callchain)
3d1d07ec 1120{
1c02c4d2 1121 struct rb_node **p = &entries->rb_node;
3d1d07ec
JK
1122 struct rb_node *parent = NULL;
1123 struct hist_entry *iter;
1124
f9db0d0f 1125 if (use_callchain)
b9fb9304 1126 callchain_param.sort(&he->sorted_chain, he->callchain,
3d1d07ec
JK
1127 min_callchain_hits, &callchain_param);
1128
1129 while (*p != NULL) {
1130 parent = *p;
1131 iter = rb_entry(parent, struct hist_entry, rb_node);
1132
043ca389 1133 if (hist_entry__sort(he, iter) > 0)
3d1d07ec
JK
1134 p = &(*p)->rb_left;
1135 else
1136 p = &(*p)->rb_right;
1137 }
1138
1139 rb_link_node(&he->rb_node, parent, p);
1c02c4d2 1140 rb_insert_color(&he->rb_node, entries);
3d1d07ec
JK
1141}
1142
740b97f9 1143void hists__output_resort(struct hists *hists, struct ui_progress *prog)
3d1d07ec 1144{
1980c2eb 1145 struct rb_root *root;
3d1d07ec
JK
1146 struct rb_node *next;
1147 struct hist_entry *n;
3d1d07ec 1148 u64 min_callchain_hits;
f9db0d0f 1149 struct perf_evsel *evsel = hists_to_evsel(hists);
9e207ddf
KL
1150 bool use_callchain;
1151
1152 if (evsel && !symbol_conf.show_ref_callgraph)
1153 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1154 else
1155 use_callchain = symbol_conf.use_callchain;
3d1d07ec 1156
42b28ac0 1157 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
3d1d07ec 1158
3a5714f8 1159 if (sort__need_collapse)
1980c2eb
ACM
1160 root = &hists->entries_collapsed;
1161 else
1162 root = hists->entries_in;
1163
1164 next = rb_first(root);
1165 hists->entries = RB_ROOT;
3d1d07ec 1166
9283ba9b 1167 hists__reset_stats(hists);
42b28ac0 1168 hists__reset_col_len(hists);
fefb0b94 1169
3d1d07ec 1170 while (next) {
1980c2eb
ACM
1171 n = rb_entry(next, struct hist_entry, rb_node_in);
1172 next = rb_next(&n->rb_node_in);
3d1d07ec 1173
f9db0d0f 1174 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
6263835a 1175 hists__inc_stats(hists, n);
ae993efc
NK
1176
1177 if (!n->filtered)
1178 hists__calc_col_len(hists, n);
740b97f9
NK
1179
1180 if (prog)
1181 ui_progress__update(prog, 1);
3d1d07ec 1182 }
1980c2eb 1183}
b9bf0892 1184
42b28ac0 1185static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
cc5edb0e
ACM
1186 enum hist_filter filter)
1187{
1188 h->filtered &= ~(1 << filter);
1189 if (h->filtered)
1190 return;
1191
87e90f43 1192 /* force fold unfiltered entry for simplicity */
3698dab1 1193 h->unfolded = false;
0f0cbf7a 1194 h->row_offset = 0;
a8cd1f43 1195 h->nr_rows = 0;
9283ba9b 1196
1ab1fa5d 1197 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
cc5edb0e 1198
9283ba9b 1199 hists__inc_filter_stats(hists, h);
42b28ac0 1200 hists__calc_col_len(hists, h);
cc5edb0e
ACM
1201}
1202
90cf1fb5
ACM
1203
1204static bool hists__filter_entry_by_dso(struct hists *hists,
1205 struct hist_entry *he)
1206{
1207 if (hists->dso_filter != NULL &&
1208 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1209 he->filtered |= (1 << HIST_FILTER__DSO);
1210 return true;
1211 }
1212
1213 return false;
1214}
1215
d7b76f09 1216void hists__filter_by_dso(struct hists *hists)
b09e0190
ACM
1217{
1218 struct rb_node *nd;
1219
1ab1fa5d 1220 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1221
1222 hists__reset_filter_stats(hists);
42b28ac0 1223 hists__reset_col_len(hists);
b09e0190 1224
42b28ac0 1225 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1226 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1227
1228 if (symbol_conf.exclude_other && !h->parent)
1229 continue;
1230
90cf1fb5 1231 if (hists__filter_entry_by_dso(hists, h))
b09e0190 1232 continue;
b09e0190 1233
42b28ac0 1234 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
b09e0190
ACM
1235 }
1236}
1237
90cf1fb5
ACM
1238static bool hists__filter_entry_by_thread(struct hists *hists,
1239 struct hist_entry *he)
1240{
1241 if (hists->thread_filter != NULL &&
1242 he->thread != hists->thread_filter) {
1243 he->filtered |= (1 << HIST_FILTER__THREAD);
1244 return true;
1245 }
1246
1247 return false;
1248}
1249
d7b76f09 1250void hists__filter_by_thread(struct hists *hists)
b09e0190
ACM
1251{
1252 struct rb_node *nd;
1253
1ab1fa5d 1254 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1255
1256 hists__reset_filter_stats(hists);
42b28ac0 1257 hists__reset_col_len(hists);
b09e0190 1258
42b28ac0 1259 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1260 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1261
90cf1fb5 1262 if (hists__filter_entry_by_thread(hists, h))
b09e0190 1263 continue;
cc5edb0e 1264
42b28ac0 1265 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
b09e0190
ACM
1266 }
1267}
ef7b93a1 1268
e94d53eb
NK
1269static bool hists__filter_entry_by_symbol(struct hists *hists,
1270 struct hist_entry *he)
1271{
1272 if (hists->symbol_filter_str != NULL &&
1273 (!he->ms.sym || strstr(he->ms.sym->name,
1274 hists->symbol_filter_str) == NULL)) {
1275 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1276 return true;
1277 }
1278
1279 return false;
1280}
1281
1282void hists__filter_by_symbol(struct hists *hists)
1283{
1284 struct rb_node *nd;
1285
1ab1fa5d 1286 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1287
1288 hists__reset_filter_stats(hists);
e94d53eb
NK
1289 hists__reset_col_len(hists);
1290
1291 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1292 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1293
1294 if (hists__filter_entry_by_symbol(hists, h))
1295 continue;
1296
1297 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1298 }
1299}
1300
21394d94
KL
1301static bool hists__filter_entry_by_socket(struct hists *hists,
1302 struct hist_entry *he)
1303{
1304 if ((hists->socket_filter > -1) &&
1305 (he->socket != hists->socket_filter)) {
1306 he->filtered |= (1 << HIST_FILTER__SOCKET);
1307 return true;
1308 }
1309
1310 return false;
1311}
1312
84734b06
KL
1313void hists__filter_by_socket(struct hists *hists)
1314{
1315 struct rb_node *nd;
1316
1317 hists->stats.nr_non_filtered_samples = 0;
1318
1319 hists__reset_filter_stats(hists);
1320 hists__reset_col_len(hists);
1321
1322 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1323 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1324
1325 if (hists__filter_entry_by_socket(hists, h))
1326 continue;
1327
1328 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1329 }
1330}
1331
28a6b6aa
ACM
1332void events_stats__inc(struct events_stats *stats, u32 type)
1333{
1334 ++stats->nr_events[0];
1335 ++stats->nr_events[type];
1336}
1337
42b28ac0 1338void hists__inc_nr_events(struct hists *hists, u32 type)
c8446b9b 1339{
28a6b6aa 1340 events_stats__inc(&hists->stats, type);
c8446b9b 1341}
95529be4 1342
1844dbcb
NK
1343void hists__inc_nr_samples(struct hists *hists, bool filtered)
1344{
1345 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1346 if (!filtered)
1347 hists->stats.nr_non_filtered_samples++;
1348}
1349
494d70a1
ACM
1350static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1351 struct hist_entry *pair)
1352{
ce74f60e
NK
1353 struct rb_root *root;
1354 struct rb_node **p;
494d70a1
ACM
1355 struct rb_node *parent = NULL;
1356 struct hist_entry *he;
354cc40e 1357 int64_t cmp;
494d70a1 1358
ce74f60e
NK
1359 if (sort__need_collapse)
1360 root = &hists->entries_collapsed;
1361 else
1362 root = hists->entries_in;
1363
1364 p = &root->rb_node;
1365
494d70a1
ACM
1366 while (*p != NULL) {
1367 parent = *p;
ce74f60e 1368 he = rb_entry(parent, struct hist_entry, rb_node_in);
494d70a1 1369
ce74f60e 1370 cmp = hist_entry__collapse(he, pair);
494d70a1
ACM
1371
1372 if (!cmp)
1373 goto out;
1374
1375 if (cmp < 0)
1376 p = &(*p)->rb_left;
1377 else
1378 p = &(*p)->rb_right;
1379 }
1380
a0b51af3 1381 he = hist_entry__new(pair, true);
494d70a1 1382 if (he) {
30193d78
ACM
1383 memset(&he->stat, 0, sizeof(he->stat));
1384 he->hists = hists;
ce74f60e
NK
1385 rb_link_node(&he->rb_node_in, parent, p);
1386 rb_insert_color(&he->rb_node_in, root);
6263835a 1387 hists__inc_stats(hists, he);
e0af43d2 1388 he->dummy = true;
494d70a1
ACM
1389 }
1390out:
1391 return he;
1392}
1393
95529be4
ACM
1394static struct hist_entry *hists__find_entry(struct hists *hists,
1395 struct hist_entry *he)
1396{
ce74f60e
NK
1397 struct rb_node *n;
1398
1399 if (sort__need_collapse)
1400 n = hists->entries_collapsed.rb_node;
1401 else
1402 n = hists->entries_in->rb_node;
95529be4
ACM
1403
1404 while (n) {
ce74f60e
NK
1405 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1406 int64_t cmp = hist_entry__collapse(iter, he);
95529be4
ACM
1407
1408 if (cmp < 0)
1409 n = n->rb_left;
1410 else if (cmp > 0)
1411 n = n->rb_right;
1412 else
1413 return iter;
1414 }
1415
1416 return NULL;
1417}
1418
1419/*
1420 * Look for pairs to link to the leader buckets (hist_entries):
1421 */
1422void hists__match(struct hists *leader, struct hists *other)
1423{
ce74f60e 1424 struct rb_root *root;
95529be4
ACM
1425 struct rb_node *nd;
1426 struct hist_entry *pos, *pair;
1427
ce74f60e
NK
1428 if (sort__need_collapse)
1429 root = &leader->entries_collapsed;
1430 else
1431 root = leader->entries_in;
1432
1433 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1434 pos = rb_entry(nd, struct hist_entry, rb_node_in);
95529be4
ACM
1435 pair = hists__find_entry(other, pos);
1436
1437 if (pair)
5fa9041b 1438 hist_entry__add_pair(pair, pos);
95529be4
ACM
1439 }
1440}
494d70a1
ACM
1441
1442/*
1443 * Look for entries in the other hists that are not present in the leader, if
1444 * we find them, just add a dummy entry on the leader hists, with period=0,
1445 * nr_events=0, to serve as the list header.
1446 */
1447int hists__link(struct hists *leader, struct hists *other)
1448{
ce74f60e 1449 struct rb_root *root;
494d70a1
ACM
1450 struct rb_node *nd;
1451 struct hist_entry *pos, *pair;
1452
ce74f60e
NK
1453 if (sort__need_collapse)
1454 root = &other->entries_collapsed;
1455 else
1456 root = other->entries_in;
1457
1458 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1459 pos = rb_entry(nd, struct hist_entry, rb_node_in);
494d70a1
ACM
1460
1461 if (!hist_entry__has_pairs(pos)) {
1462 pair = hists__add_dummy_entry(leader, pos);
1463 if (pair == NULL)
1464 return -1;
5fa9041b 1465 hist_entry__add_pair(pos, pair);
494d70a1
ACM
1466 }
1467 }
1468
1469 return 0;
1470}
f2148330 1471
57849998
AK
1472void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1473 struct perf_sample *sample, bool nonany_branch_mode)
1474{
1475 struct branch_info *bi;
1476
1477 /* If we have branch cycles always annotate them. */
1478 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1479 int i;
1480
1481 bi = sample__resolve_bstack(sample, al);
1482 if (bi) {
1483 struct addr_map_symbol *prev = NULL;
1484
1485 /*
1486 * Ignore errors, still want to process the
1487 * other entries.
1488 *
1489 * For non standard branch modes always
1490 * force no IPC (prev == NULL)
1491 *
1492 * Note that perf stores branches reversed from
1493 * program order!
1494 */
1495 for (i = bs->nr - 1; i >= 0; i--) {
1496 addr_map_symbol__account_cycles(&bi[i].from,
1497 nonany_branch_mode ? NULL : prev,
1498 bi[i].flags.cycles);
1499 prev = &bi[i].to;
1500 }
1501 free(bi);
1502 }
1503 }
1504}
2a1731fb
ACM
1505
1506size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1507{
1508 struct perf_evsel *pos;
1509 size_t ret = 0;
1510
1511 evlist__for_each(evlist, pos) {
1512 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1513 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1514 }
1515
1516 return ret;
1517}
1518
1519
f2148330
NK
1520u64 hists__total_period(struct hists *hists)
1521{
1522 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1523 hists->stats.total_period;
1524}
33db4568
NK
1525
1526int parse_filter_percentage(const struct option *opt __maybe_unused,
1527 const char *arg, int unset __maybe_unused)
1528{
1529 if (!strcmp(arg, "relative"))
1530 symbol_conf.filter_relative = true;
1531 else if (!strcmp(arg, "absolute"))
1532 symbol_conf.filter_relative = false;
1533 else
1534 return -1;
1535
1536 return 0;
1537}
0b93da17
NK
1538
1539int perf_hist_config(const char *var, const char *value)
1540{
1541 if (!strcmp(var, "hist.percentage"))
1542 return parse_filter_percentage(NULL, value, 0);
1543
1544 return 0;
1545}
a635fc51
ACM
1546
1547static int hists_evsel__init(struct perf_evsel *evsel)
1548{
1549 struct hists *hists = evsel__hists(evsel);
1550
1551 memset(hists, 0, sizeof(*hists));
1552 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1553 hists->entries_in = &hists->entries_in_array[0];
1554 hists->entries_collapsed = RB_ROOT;
1555 hists->entries = RB_ROOT;
1556 pthread_mutex_init(&hists->lock, NULL);
21394d94 1557 hists->socket_filter = -1;
a635fc51
ACM
1558 return 0;
1559}
1560
1561/*
1562 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1563 * stored in the rbtree...
1564 */
1565
1566int hists__init(void)
1567{
1568 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1569 hists_evsel__init, NULL);
1570 if (err)
1571 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1572
1573 return err;
1574}