6 #include "namespaces.h"
11 #include "ui/progress.h"
14 static bool hists__filter_entry_by_dso(struct hists
*hists
,
15 struct hist_entry
*he
);
16 static bool hists__filter_entry_by_thread(struct hists
*hists
,
17 struct hist_entry
*he
);
18 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
19 struct hist_entry
*he
);
20 static bool hists__filter_entry_by_socket(struct hists
*hists
,
21 struct hist_entry
*he
);
23 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
25 return hists
->col_len
[col
];
28 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
30 hists
->col_len
[col
] = len
;
33 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
35 if (len
> hists__col_len(hists
, col
)) {
36 hists__set_col_len(hists
, col
, len
);
42 void hists__reset_col_len(struct hists
*hists
)
46 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
47 hists__set_col_len(hists
, col
, 0);
50 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
52 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
54 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
55 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
56 !symbol_conf
.dso_list
)
57 hists__set_col_len(hists
, dso
, unresolved_col_width
);
60 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
62 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
67 * +4 accounts for '[x] ' priv level info
68 * +2 accounts for 0x prefix on raw addresses
69 * +3 accounts for ' y ' symtab origin info
72 symlen
= h
->ms
.sym
->namelen
+ 4;
74 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
75 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
77 symlen
= unresolved_col_width
+ 4 + 2;
78 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
79 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
82 len
= thread__comm_len(h
->thread
);
83 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
84 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
87 len
= dso__name_len(h
->ms
.map
->dso
);
88 hists__new_col_len(hists
, HISTC_DSO
, len
);
92 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
95 if (h
->branch_info
->from
.sym
) {
96 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
98 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
99 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
101 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
102 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
104 symlen
= unresolved_col_width
+ 4 + 2;
105 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
106 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
109 if (h
->branch_info
->to
.sym
) {
110 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
112 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
113 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
115 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
116 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
118 symlen
= unresolved_col_width
+ 4 + 2;
119 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
120 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
123 if (h
->branch_info
->srcline_from
)
124 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
125 strlen(h
->branch_info
->srcline_from
));
126 if (h
->branch_info
->srcline_to
)
127 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
128 strlen(h
->branch_info
->srcline_to
));
132 if (h
->mem_info
->daddr
.sym
) {
133 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
134 + unresolved_col_width
+ 2;
135 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
137 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
140 symlen
= unresolved_col_width
+ 4 + 2;
141 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
143 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
147 if (h
->mem_info
->iaddr
.sym
) {
148 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
149 + unresolved_col_width
+ 2;
150 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
153 symlen
= unresolved_col_width
+ 4 + 2;
154 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
158 if (h
->mem_info
->daddr
.map
) {
159 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
160 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
163 symlen
= unresolved_col_width
+ 4 + 2;
164 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
167 symlen
= unresolved_col_width
+ 4 + 2;
168 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
169 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
170 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
173 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
174 hists__new_col_len(hists
, HISTC_CPU
, 3);
175 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
176 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
177 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
178 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
179 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
180 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
181 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
184 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
185 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
189 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
192 hists__new_col_len(hists
, HISTC_TRANSACTION
,
193 hist_entry__transaction_len());
196 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
199 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
201 struct rb_node
*next
= rb_first(&hists
->entries
);
202 struct hist_entry
*n
;
205 hists__reset_col_len(hists
);
207 while (next
&& row
++ < max_rows
) {
208 n
= rb_entry(next
, struct hist_entry
, rb_node
);
210 hists__calc_col_len(hists
, n
);
211 next
= rb_next(&n
->rb_node
);
215 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
216 unsigned int cpumode
, u64 period
)
219 case PERF_RECORD_MISC_KERNEL
:
220 he_stat
->period_sys
+= period
;
222 case PERF_RECORD_MISC_USER
:
223 he_stat
->period_us
+= period
;
225 case PERF_RECORD_MISC_GUEST_KERNEL
:
226 he_stat
->period_guest_sys
+= period
;
228 case PERF_RECORD_MISC_GUEST_USER
:
229 he_stat
->period_guest_us
+= period
;
236 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
240 he_stat
->period
+= period
;
241 he_stat
->weight
+= weight
;
242 he_stat
->nr_events
+= 1;
245 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
247 dest
->period
+= src
->period
;
248 dest
->period_sys
+= src
->period_sys
;
249 dest
->period_us
+= src
->period_us
;
250 dest
->period_guest_sys
+= src
->period_guest_sys
;
251 dest
->period_guest_us
+= src
->period_guest_us
;
252 dest
->nr_events
+= src
->nr_events
;
253 dest
->weight
+= src
->weight
;
256 static void he_stat__decay(struct he_stat
*he_stat
)
258 he_stat
->period
= (he_stat
->period
* 7) / 8;
259 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
260 /* XXX need decay for weight too? */
263 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
265 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
267 u64 prev_period
= he
->stat
.period
;
270 if (prev_period
== 0)
273 he_stat__decay(&he
->stat
);
274 if (symbol_conf
.cumulate_callchain
)
275 he_stat__decay(he
->stat_acc
);
276 decay_callchain(he
->callchain
);
278 diff
= prev_period
- he
->stat
.period
;
281 hists
->stats
.total_period
-= diff
;
283 hists
->stats
.total_non_filtered_period
-= diff
;
287 struct hist_entry
*child
;
288 struct rb_node
*node
= rb_first(&he
->hroot_out
);
290 child
= rb_entry(node
, struct hist_entry
, rb_node
);
291 node
= rb_next(node
);
293 if (hists__decay_entry(hists
, child
))
294 hists__delete_entry(hists
, child
);
298 return he
->stat
.period
== 0;
301 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
303 struct rb_root
*root_in
;
304 struct rb_root
*root_out
;
307 root_in
= &he
->parent_he
->hroot_in
;
308 root_out
= &he
->parent_he
->hroot_out
;
310 if (hists__has(hists
, need_collapse
))
311 root_in
= &hists
->entries_collapsed
;
313 root_in
= hists
->entries_in
;
314 root_out
= &hists
->entries
;
317 rb_erase(&he
->rb_node_in
, root_in
);
318 rb_erase(&he
->rb_node
, root_out
);
322 --hists
->nr_non_filtered_entries
;
324 hist_entry__delete(he
);
327 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
329 struct rb_node
*next
= rb_first(&hists
->entries
);
330 struct hist_entry
*n
;
333 n
= rb_entry(next
, struct hist_entry
, rb_node
);
334 next
= rb_next(&n
->rb_node
);
335 if (((zap_user
&& n
->level
== '.') ||
336 (zap_kernel
&& n
->level
!= '.') ||
337 hists__decay_entry(hists
, n
))) {
338 hists__delete_entry(hists
, n
);
343 void hists__delete_entries(struct hists
*hists
)
345 struct rb_node
*next
= rb_first(&hists
->entries
);
346 struct hist_entry
*n
;
349 n
= rb_entry(next
, struct hist_entry
, rb_node
);
350 next
= rb_next(&n
->rb_node
);
352 hists__delete_entry(hists
, n
);
357 * histogram, sorted on item, collects periods
360 static int hist_entry__init(struct hist_entry
*he
,
361 struct hist_entry
*template,
366 if (symbol_conf
.cumulate_callchain
) {
367 he
->stat_acc
= malloc(sizeof(he
->stat
));
368 if (he
->stat_acc
== NULL
)
370 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
372 memset(&he
->stat
, 0, sizeof(he
->stat
));
375 map__get(he
->ms
.map
);
377 if (he
->branch_info
) {
379 * This branch info is (a part of) allocated from
380 * sample__resolve_bstack() and will be freed after
381 * adding new entries. So we need to save a copy.
383 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
384 if (he
->branch_info
== NULL
) {
385 map__zput(he
->ms
.map
);
390 memcpy(he
->branch_info
, template->branch_info
,
391 sizeof(*he
->branch_info
));
393 map__get(he
->branch_info
->from
.map
);
394 map__get(he
->branch_info
->to
.map
);
398 map__get(he
->mem_info
->iaddr
.map
);
399 map__get(he
->mem_info
->daddr
.map
);
402 if (symbol_conf
.use_callchain
)
403 callchain_init(he
->callchain
);
406 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
408 if (he
->raw_data
== NULL
) {
409 map__put(he
->ms
.map
);
410 if (he
->branch_info
) {
411 map__put(he
->branch_info
->from
.map
);
412 map__put(he
->branch_info
->to
.map
);
413 free(he
->branch_info
);
416 map__put(he
->mem_info
->iaddr
.map
);
417 map__put(he
->mem_info
->daddr
.map
);
423 INIT_LIST_HEAD(&he
->pairs
.node
);
424 thread__get(he
->thread
);
425 he
->hroot_in
= RB_ROOT
;
426 he
->hroot_out
= RB_ROOT
;
428 if (!symbol_conf
.report_hierarchy
)
434 static void *hist_entry__zalloc(size_t size
)
436 return zalloc(size
+ sizeof(struct hist_entry
));
439 static void hist_entry__free(void *ptr
)
444 static struct hist_entry_ops default_ops
= {
445 .new = hist_entry__zalloc
,
446 .free
= hist_entry__free
,
449 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
452 struct hist_entry_ops
*ops
= template->ops
;
453 size_t callchain_size
= 0;
454 struct hist_entry
*he
;
458 ops
= template->ops
= &default_ops
;
460 if (symbol_conf
.use_callchain
)
461 callchain_size
= sizeof(struct callchain_root
);
463 he
= ops
->new(callchain_size
);
465 err
= hist_entry__init(he
, template, sample_self
);
475 static u8
symbol__parent_filter(const struct symbol
*parent
)
477 if (symbol_conf
.exclude_other
&& parent
== NULL
)
478 return 1 << HIST_FILTER__PARENT
;
482 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
484 if (!symbol_conf
.use_callchain
)
487 he
->hists
->callchain_period
+= period
;
489 he
->hists
->callchain_non_filtered_period
+= period
;
492 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
493 struct hist_entry
*entry
,
494 struct addr_location
*al
,
498 struct rb_node
*parent
= NULL
;
499 struct hist_entry
*he
;
501 u64 period
= entry
->stat
.period
;
502 u64 weight
= entry
->stat
.weight
;
504 p
= &hists
->entries_in
->rb_node
;
508 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
511 * Make sure that it receives arguments in a same order as
512 * hist_entry__collapse() so that we can use an appropriate
513 * function when searching an entry regardless which sort
516 cmp
= hist_entry__cmp(he
, entry
);
520 he_stat__add_period(&he
->stat
, period
, weight
);
521 hist_entry__add_callchain_period(he
, period
);
523 if (symbol_conf
.cumulate_callchain
)
524 he_stat__add_period(he
->stat_acc
, period
, weight
);
527 * This mem info was allocated from sample__resolve_mem
528 * and will not be used anymore.
530 zfree(&entry
->mem_info
);
532 /* If the map of an existing hist_entry has
533 * become out-of-date due to an exec() or
534 * similar, update it. Otherwise we will
535 * mis-adjust symbol addresses when computing
536 * the history counter to increment.
538 if (he
->ms
.map
!= entry
->ms
.map
) {
539 map__put(he
->ms
.map
);
540 he
->ms
.map
= map__get(entry
->ms
.map
);
551 he
= hist_entry__new(entry
, sample_self
);
556 hist_entry__add_callchain_period(he
, period
);
559 rb_link_node(&he
->rb_node_in
, parent
, p
);
560 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
563 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
564 if (symbol_conf
.cumulate_callchain
)
565 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
569 static struct hist_entry
*
570 __hists__add_entry(struct hists
*hists
,
571 struct addr_location
*al
,
572 struct symbol
*sym_parent
,
573 struct branch_info
*bi
,
575 struct perf_sample
*sample
,
577 struct hist_entry_ops
*ops
)
579 struct namespaces
*ns
= thread__namespaces(al
->thread
);
580 struct hist_entry entry
= {
581 .thread
= al
->thread
,
582 .comm
= thread__comm(al
->thread
),
584 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
585 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
591 .socket
= al
->socket
,
593 .cpumode
= al
->cpumode
,
598 .period
= sample
->period
,
599 .weight
= sample
->weight
,
601 .parent
= sym_parent
,
602 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
606 .transaction
= sample
->transaction
,
607 .raw_data
= sample
->raw_data
,
608 .raw_size
= sample
->raw_size
,
612 return hists__findnew_entry(hists
, &entry
, al
, sample_self
);
615 struct hist_entry
*hists__add_entry(struct hists
*hists
,
616 struct addr_location
*al
,
617 struct symbol
*sym_parent
,
618 struct branch_info
*bi
,
620 struct perf_sample
*sample
,
623 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
624 sample
, sample_self
, NULL
);
627 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
628 struct hist_entry_ops
*ops
,
629 struct addr_location
*al
,
630 struct symbol
*sym_parent
,
631 struct branch_info
*bi
,
633 struct perf_sample
*sample
,
636 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
637 sample
, sample_self
, ops
);
641 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
642 struct addr_location
*al __maybe_unused
)
648 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
649 struct addr_location
*al __maybe_unused
)
655 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
657 struct perf_sample
*sample
= iter
->sample
;
660 mi
= sample__resolve_mem(sample
, al
);
669 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
672 struct mem_info
*mi
= iter
->priv
;
673 struct hists
*hists
= evsel__hists(iter
->evsel
);
674 struct perf_sample
*sample
= iter
->sample
;
675 struct hist_entry
*he
;
680 cost
= sample
->weight
;
685 * must pass period=weight in order to get the correct
686 * sorting from hists__collapse_resort() which is solely
687 * based on periods. We want sorting be done on nr_events * weight
688 * and this is indirectly achieved by passing period=weight here
689 * and the he_stat__add_period() function.
691 sample
->period
= cost
;
693 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
703 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
704 struct addr_location
*al __maybe_unused
)
706 struct perf_evsel
*evsel
= iter
->evsel
;
707 struct hists
*hists
= evsel__hists(evsel
);
708 struct hist_entry
*he
= iter
->he
;
714 hists__inc_nr_samples(hists
, he
->filtered
);
716 err
= hist_entry__append_callchain(he
, iter
->sample
);
720 * We don't need to free iter->priv (mem_info) here since the mem info
721 * was either already freed in hists__findnew_entry() or passed to a
722 * new hist entry by hist_entry__new().
731 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
733 struct branch_info
*bi
;
734 struct perf_sample
*sample
= iter
->sample
;
736 bi
= sample__resolve_bstack(sample
, al
);
741 iter
->total
= sample
->branch_stack
->nr
;
748 iter_add_single_branch_entry(struct hist_entry_iter
*iter
,
749 struct addr_location
*al __maybe_unused
)
751 /* to avoid calling callback function */
758 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
760 struct branch_info
*bi
= iter
->priv
;
766 if (iter
->curr
>= iter
->total
)
769 al
->map
= bi
[i
].to
.map
;
770 al
->sym
= bi
[i
].to
.sym
;
771 al
->addr
= bi
[i
].to
.addr
;
776 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
778 struct branch_info
*bi
;
779 struct perf_evsel
*evsel
= iter
->evsel
;
780 struct hists
*hists
= evsel__hists(evsel
);
781 struct perf_sample
*sample
= iter
->sample
;
782 struct hist_entry
*he
= NULL
;
788 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
792 * The report shows the percentage of total branches captured
793 * and not events sampled. Thus we use a pseudo period of 1.
796 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
798 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
803 hists__inc_nr_samples(hists
, he
->filtered
);
812 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
813 struct addr_location
*al __maybe_unused
)
818 return iter
->curr
>= iter
->total
? 0 : -1;
822 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
823 struct addr_location
*al __maybe_unused
)
829 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
831 struct perf_evsel
*evsel
= iter
->evsel
;
832 struct perf_sample
*sample
= iter
->sample
;
833 struct hist_entry
*he
;
835 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
845 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
846 struct addr_location
*al __maybe_unused
)
848 struct hist_entry
*he
= iter
->he
;
849 struct perf_evsel
*evsel
= iter
->evsel
;
850 struct perf_sample
*sample
= iter
->sample
;
857 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
859 return hist_entry__append_callchain(he
, sample
);
863 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
864 struct addr_location
*al __maybe_unused
)
866 struct hist_entry
**he_cache
;
868 callchain_cursor_commit(&callchain_cursor
);
871 * This is for detecting cycles or recursions so that they're
872 * cumulated only one time to prevent entries more than 100%
875 he_cache
= malloc(sizeof(*he_cache
) * (iter
->max_stack
+ 1));
876 if (he_cache
== NULL
)
879 iter
->priv
= he_cache
;
886 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
887 struct addr_location
*al
)
889 struct perf_evsel
*evsel
= iter
->evsel
;
890 struct hists
*hists
= evsel__hists(evsel
);
891 struct perf_sample
*sample
= iter
->sample
;
892 struct hist_entry
**he_cache
= iter
->priv
;
893 struct hist_entry
*he
;
896 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
902 he_cache
[iter
->curr
++] = he
;
904 hist_entry__append_callchain(he
, sample
);
907 * We need to re-initialize the cursor since callchain_append()
908 * advanced the cursor to the end.
910 callchain_cursor_commit(&callchain_cursor
);
912 hists__inc_nr_samples(hists
, he
->filtered
);
918 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
919 struct addr_location
*al
)
921 struct callchain_cursor_node
*node
;
923 node
= callchain_cursor_current(&callchain_cursor
);
927 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
931 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
932 struct addr_location
*al
)
934 struct perf_evsel
*evsel
= iter
->evsel
;
935 struct perf_sample
*sample
= iter
->sample
;
936 struct hist_entry
**he_cache
= iter
->priv
;
937 struct hist_entry
*he
;
938 struct hist_entry he_tmp
= {
939 .hists
= evsel__hists(evsel
),
941 .thread
= al
->thread
,
942 .comm
= thread__comm(al
->thread
),
948 .parent
= iter
->parent
,
949 .raw_data
= sample
->raw_data
,
950 .raw_size
= sample
->raw_size
,
953 struct callchain_cursor cursor
;
955 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
957 callchain_cursor_advance(&callchain_cursor
);
960 * Check if there's duplicate entries in the callchain.
961 * It's possible that it has cycles or recursive calls.
963 for (i
= 0; i
< iter
->curr
; i
++) {
964 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
965 /* to avoid calling callback function */
971 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
977 he_cache
[iter
->curr
++] = he
;
979 if (symbol_conf
.use_callchain
)
980 callchain_append(he
->callchain
, &cursor
, sample
->period
);
985 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
986 struct addr_location
*al __maybe_unused
)
994 const struct hist_iter_ops hist_iter_mem
= {
995 .prepare_entry
= iter_prepare_mem_entry
,
996 .add_single_entry
= iter_add_single_mem_entry
,
997 .next_entry
= iter_next_nop_entry
,
998 .add_next_entry
= iter_add_next_nop_entry
,
999 .finish_entry
= iter_finish_mem_entry
,
1002 const struct hist_iter_ops hist_iter_branch
= {
1003 .prepare_entry
= iter_prepare_branch_entry
,
1004 .add_single_entry
= iter_add_single_branch_entry
,
1005 .next_entry
= iter_next_branch_entry
,
1006 .add_next_entry
= iter_add_next_branch_entry
,
1007 .finish_entry
= iter_finish_branch_entry
,
1010 const struct hist_iter_ops hist_iter_normal
= {
1011 .prepare_entry
= iter_prepare_normal_entry
,
1012 .add_single_entry
= iter_add_single_normal_entry
,
1013 .next_entry
= iter_next_nop_entry
,
1014 .add_next_entry
= iter_add_next_nop_entry
,
1015 .finish_entry
= iter_finish_normal_entry
,
1018 const struct hist_iter_ops hist_iter_cumulative
= {
1019 .prepare_entry
= iter_prepare_cumulative_entry
,
1020 .add_single_entry
= iter_add_single_cumulative_entry
,
1021 .next_entry
= iter_next_cumulative_entry
,
1022 .add_next_entry
= iter_add_next_cumulative_entry
,
1023 .finish_entry
= iter_finish_cumulative_entry
,
1026 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1027 int max_stack_depth
, void *arg
)
1030 struct map
*alm
= NULL
;
1033 alm
= map__get(al
->map
);
1035 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1036 iter
->evsel
, al
, max_stack_depth
);
1040 iter
->max_stack
= max_stack_depth
;
1042 err
= iter
->ops
->prepare_entry(iter
, al
);
1046 err
= iter
->ops
->add_single_entry(iter
, al
);
1050 if (iter
->he
&& iter
->add_entry_cb
) {
1051 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1056 while (iter
->ops
->next_entry(iter
, al
)) {
1057 err
= iter
->ops
->add_next_entry(iter
, al
);
1061 if (iter
->he
&& iter
->add_entry_cb
) {
1062 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1069 err2
= iter
->ops
->finish_entry(iter
, al
);
1079 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1081 struct hists
*hists
= left
->hists
;
1082 struct perf_hpp_fmt
*fmt
;
1085 hists__for_each_sort_list(hists
, fmt
) {
1086 if (perf_hpp__is_dynamic_entry(fmt
) &&
1087 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1090 cmp
= fmt
->cmp(fmt
, left
, right
);
1099 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1101 struct hists
*hists
= left
->hists
;
1102 struct perf_hpp_fmt
*fmt
;
1105 hists__for_each_sort_list(hists
, fmt
) {
1106 if (perf_hpp__is_dynamic_entry(fmt
) &&
1107 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1110 cmp
= fmt
->collapse(fmt
, left
, right
);
1118 void hist_entry__delete(struct hist_entry
*he
)
1120 struct hist_entry_ops
*ops
= he
->ops
;
1122 thread__zput(he
->thread
);
1123 map__zput(he
->ms
.map
);
1125 if (he
->branch_info
) {
1126 map__zput(he
->branch_info
->from
.map
);
1127 map__zput(he
->branch_info
->to
.map
);
1128 free_srcline(he
->branch_info
->srcline_from
);
1129 free_srcline(he
->branch_info
->srcline_to
);
1130 zfree(&he
->branch_info
);
1134 map__zput(he
->mem_info
->iaddr
.map
);
1135 map__zput(he
->mem_info
->daddr
.map
);
1136 zfree(&he
->mem_info
);
1139 zfree(&he
->stat_acc
);
1140 free_srcline(he
->srcline
);
1141 if (he
->srcfile
&& he
->srcfile
[0])
1143 free_callchain(he
->callchain
);
1144 free(he
->trace_output
);
1150 * If this is not the last column, then we need to pad it according to the
1151 * pre-calculated max lenght for this column, otherwise don't bother adding
1152 * spaces because that would break viewing this with, for instance, 'less',
1153 * that would show tons of trailing spaces when a long C++ demangled method
1156 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1157 struct perf_hpp_fmt
*fmt
, int printed
)
1159 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1160 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1161 if (printed
< width
) {
1162 advance_hpp(hpp
, printed
);
1163 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1171 * collapse the histogram
1174 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1175 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1176 enum hist_filter type
);
1178 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1180 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1182 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1185 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1186 enum hist_filter type
,
1189 struct perf_hpp_fmt
*fmt
;
1190 bool type_match
= false;
1191 struct hist_entry
*parent
= he
->parent_he
;
1194 case HIST_FILTER__THREAD
:
1195 if (symbol_conf
.comm_list
== NULL
&&
1196 symbol_conf
.pid_list
== NULL
&&
1197 symbol_conf
.tid_list
== NULL
)
1200 case HIST_FILTER__DSO
:
1201 if (symbol_conf
.dso_list
== NULL
)
1204 case HIST_FILTER__SYMBOL
:
1205 if (symbol_conf
.sym_list
== NULL
)
1208 case HIST_FILTER__PARENT
:
1209 case HIST_FILTER__GUEST
:
1210 case HIST_FILTER__HOST
:
1211 case HIST_FILTER__SOCKET
:
1212 case HIST_FILTER__C2C
:
1217 /* if it's filtered by own fmt, it has to have filter bits */
1218 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1227 * If the filter is for current level entry, propagate
1228 * filter marker to parents. The marker bit was
1229 * already set by default so it only needs to clear
1230 * non-filtered entries.
1232 if (!(he
->filtered
& (1 << type
))) {
1234 parent
->filtered
&= ~(1 << type
);
1235 parent
= parent
->parent_he
;
1240 * If current entry doesn't have matching formats, set
1241 * filter marker for upper level entries. it will be
1242 * cleared if its lower level entries is not filtered.
1244 * For lower-level entries, it inherits parent's
1245 * filter bit so that lower level entries of a
1246 * non-filtered entry won't set the filter marker.
1249 he
->filtered
|= (1 << type
);
1251 he
->filtered
|= (parent
->filtered
& (1 << type
));
1255 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1257 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1258 check_thread_entry
);
1260 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1261 perf_hpp__is_dso_entry
);
1263 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1264 perf_hpp__is_sym_entry
);
1266 hists__apply_filters(he
->hists
, he
);
1269 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1270 struct rb_root
*root
,
1271 struct hist_entry
*he
,
1272 struct hist_entry
*parent_he
,
1273 struct perf_hpp_list
*hpp_list
)
1275 struct rb_node
**p
= &root
->rb_node
;
1276 struct rb_node
*parent
= NULL
;
1277 struct hist_entry
*iter
, *new;
1278 struct perf_hpp_fmt
*fmt
;
1281 while (*p
!= NULL
) {
1283 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1286 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1287 cmp
= fmt
->collapse(fmt
, iter
, he
);
1293 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1298 p
= &parent
->rb_left
;
1300 p
= &parent
->rb_right
;
1303 new = hist_entry__new(he
, true);
1307 hists
->nr_entries
++;
1309 /* save related format list for output */
1310 new->hpp_list
= hpp_list
;
1311 new->parent_he
= parent_he
;
1313 hist_entry__apply_hierarchy_filters(new);
1315 /* some fields are now passed to 'new' */
1316 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1317 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1318 he
->trace_output
= NULL
;
1320 new->trace_output
= NULL
;
1322 if (perf_hpp__is_srcline_entry(fmt
))
1325 new->srcline
= NULL
;
1327 if (perf_hpp__is_srcfile_entry(fmt
))
1330 new->srcfile
= NULL
;
1333 rb_link_node(&new->rb_node_in
, parent
, p
);
1334 rb_insert_color(&new->rb_node_in
, root
);
1338 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1339 struct rb_root
*root
,
1340 struct hist_entry
*he
)
1342 struct perf_hpp_list_node
*node
;
1343 struct hist_entry
*new_he
= NULL
;
1344 struct hist_entry
*parent
= NULL
;
1348 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1349 /* skip period (overhead) and elided columns */
1350 if (node
->level
== 0 || node
->skip
)
1353 /* insert copy of 'he' for each fmt into the hierarchy */
1354 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1355 if (new_he
== NULL
) {
1360 root
= &new_he
->hroot_in
;
1361 new_he
->depth
= depth
++;
1366 new_he
->leaf
= true;
1368 if (symbol_conf
.use_callchain
) {
1369 callchain_cursor_reset(&callchain_cursor
);
1370 if (callchain_merge(&callchain_cursor
,
1377 /* 'he' is no longer used */
1378 hist_entry__delete(he
);
1380 /* return 0 (or -1) since it already applied filters */
1384 static int hists__collapse_insert_entry(struct hists
*hists
,
1385 struct rb_root
*root
,
1386 struct hist_entry
*he
)
1388 struct rb_node
**p
= &root
->rb_node
;
1389 struct rb_node
*parent
= NULL
;
1390 struct hist_entry
*iter
;
1393 if (symbol_conf
.report_hierarchy
)
1394 return hists__hierarchy_insert_entry(hists
, root
, he
);
1396 while (*p
!= NULL
) {
1398 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1400 cmp
= hist_entry__collapse(iter
, he
);
1405 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1406 if (symbol_conf
.cumulate_callchain
)
1407 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1409 if (symbol_conf
.use_callchain
) {
1410 callchain_cursor_reset(&callchain_cursor
);
1411 if (callchain_merge(&callchain_cursor
,
1416 hist_entry__delete(he
);
1423 p
= &(*p
)->rb_right
;
1425 hists
->nr_entries
++;
1427 rb_link_node(&he
->rb_node_in
, parent
, p
);
1428 rb_insert_color(&he
->rb_node_in
, root
);
1432 struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1434 struct rb_root
*root
;
1436 pthread_mutex_lock(&hists
->lock
);
1438 root
= hists
->entries_in
;
1439 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1440 hists
->entries_in
= &hists
->entries_in_array
[0];
1442 pthread_mutex_unlock(&hists
->lock
);
1447 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1449 hists__filter_entry_by_dso(hists
, he
);
1450 hists__filter_entry_by_thread(hists
, he
);
1451 hists__filter_entry_by_symbol(hists
, he
);
1452 hists__filter_entry_by_socket(hists
, he
);
1455 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1457 struct rb_root
*root
;
1458 struct rb_node
*next
;
1459 struct hist_entry
*n
;
1462 if (!hists__has(hists
, need_collapse
))
1465 hists
->nr_entries
= 0;
1467 root
= hists__get_rotate_entries_in(hists
);
1469 next
= rb_first(root
);
1474 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1475 next
= rb_next(&n
->rb_node_in
);
1477 rb_erase(&n
->rb_node_in
, root
);
1478 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1484 * If it wasn't combined with one of the entries already
1485 * collapsed, we need to apply the filters that may have
1486 * been set by, say, the hist_browser.
1488 hists__apply_filters(hists
, n
);
1491 ui_progress__update(prog
, 1);
1496 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1498 struct hists
*hists
= a
->hists
;
1499 struct perf_hpp_fmt
*fmt
;
1502 hists__for_each_sort_list(hists
, fmt
) {
1503 if (perf_hpp__should_skip(fmt
, a
->hists
))
1506 cmp
= fmt
->sort(fmt
, a
, b
);
1514 static void hists__reset_filter_stats(struct hists
*hists
)
1516 hists
->nr_non_filtered_entries
= 0;
1517 hists
->stats
.total_non_filtered_period
= 0;
1520 void hists__reset_stats(struct hists
*hists
)
1522 hists
->nr_entries
= 0;
1523 hists
->stats
.total_period
= 0;
1525 hists__reset_filter_stats(hists
);
1528 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1530 hists
->nr_non_filtered_entries
++;
1531 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1534 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1537 hists__inc_filter_stats(hists
, h
);
1539 hists
->nr_entries
++;
1540 hists
->stats
.total_period
+= h
->stat
.period
;
1543 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1545 struct rb_node
*node
;
1546 struct hist_entry
*he
;
1548 node
= rb_first(&hists
->entries
);
1550 hists
->stats
.total_period
= 0;
1551 hists
->stats
.total_non_filtered_period
= 0;
1554 * recalculate total period using top-level entries only
1555 * since lower level entries only see non-filtered entries
1556 * but upper level entries have sum of both entries.
1559 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1560 node
= rb_next(node
);
1562 hists
->stats
.total_period
+= he
->stat
.period
;
1564 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1568 static void hierarchy_insert_output_entry(struct rb_root
*root
,
1569 struct hist_entry
*he
)
1571 struct rb_node
**p
= &root
->rb_node
;
1572 struct rb_node
*parent
= NULL
;
1573 struct hist_entry
*iter
;
1574 struct perf_hpp_fmt
*fmt
;
1576 while (*p
!= NULL
) {
1578 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1580 if (hist_entry__sort(he
, iter
) > 0)
1581 p
= &parent
->rb_left
;
1583 p
= &parent
->rb_right
;
1586 rb_link_node(&he
->rb_node
, parent
, p
);
1587 rb_insert_color(&he
->rb_node
, root
);
1589 /* update column width of dynamic entry */
1590 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1591 if (perf_hpp__is_dynamic_entry(fmt
))
1592 fmt
->sort(fmt
, he
, NULL
);
1596 static void hists__hierarchy_output_resort(struct hists
*hists
,
1597 struct ui_progress
*prog
,
1598 struct rb_root
*root_in
,
1599 struct rb_root
*root_out
,
1600 u64 min_callchain_hits
,
1603 struct rb_node
*node
;
1604 struct hist_entry
*he
;
1606 *root_out
= RB_ROOT
;
1607 node
= rb_first(root_in
);
1610 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1611 node
= rb_next(node
);
1613 hierarchy_insert_output_entry(root_out
, he
);
1616 ui_progress__update(prog
, 1);
1618 hists
->nr_entries
++;
1619 if (!he
->filtered
) {
1620 hists
->nr_non_filtered_entries
++;
1621 hists__calc_col_len(hists
, he
);
1625 hists__hierarchy_output_resort(hists
, prog
,
1636 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1637 u64 total
= he
->stat
.period
;
1639 if (symbol_conf
.cumulate_callchain
)
1640 total
= he
->stat_acc
->period
;
1642 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1645 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1646 min_callchain_hits
, &callchain_param
);
1650 static void __hists__insert_output_entry(struct rb_root
*entries
,
1651 struct hist_entry
*he
,
1652 u64 min_callchain_hits
,
1655 struct rb_node
**p
= &entries
->rb_node
;
1656 struct rb_node
*parent
= NULL
;
1657 struct hist_entry
*iter
;
1658 struct perf_hpp_fmt
*fmt
;
1660 if (use_callchain
) {
1661 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1662 u64 total
= he
->stat
.period
;
1664 if (symbol_conf
.cumulate_callchain
)
1665 total
= he
->stat_acc
->period
;
1667 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1669 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1670 min_callchain_hits
, &callchain_param
);
1673 while (*p
!= NULL
) {
1675 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1677 if (hist_entry__sort(he
, iter
) > 0)
1680 p
= &(*p
)->rb_right
;
1683 rb_link_node(&he
->rb_node
, parent
, p
);
1684 rb_insert_color(&he
->rb_node
, entries
);
1686 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1687 if (perf_hpp__is_dynamic_entry(fmt
) &&
1688 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1689 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1693 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1694 bool use_callchain
, hists__resort_cb_t cb
)
1696 struct rb_root
*root
;
1697 struct rb_node
*next
;
1698 struct hist_entry
*n
;
1699 u64 callchain_total
;
1700 u64 min_callchain_hits
;
1702 callchain_total
= hists
->callchain_period
;
1703 if (symbol_conf
.filter_relative
)
1704 callchain_total
= hists
->callchain_non_filtered_period
;
1706 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1708 hists__reset_stats(hists
);
1709 hists__reset_col_len(hists
);
1711 if (symbol_conf
.report_hierarchy
) {
1712 hists__hierarchy_output_resort(hists
, prog
,
1713 &hists
->entries_collapsed
,
1717 hierarchy_recalc_total_periods(hists
);
1721 if (hists__has(hists
, need_collapse
))
1722 root
= &hists
->entries_collapsed
;
1724 root
= hists
->entries_in
;
1726 next
= rb_first(root
);
1727 hists
->entries
= RB_ROOT
;
1730 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1731 next
= rb_next(&n
->rb_node_in
);
1736 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1737 hists__inc_stats(hists
, n
);
1740 hists__calc_col_len(hists
, n
);
1743 ui_progress__update(prog
, 1);
1747 void perf_evsel__output_resort(struct perf_evsel
*evsel
, struct ui_progress
*prog
)
1751 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1752 use_callchain
= evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
;
1754 use_callchain
= symbol_conf
.use_callchain
;
1756 output_resort(evsel__hists(evsel
), prog
, use_callchain
, NULL
);
1759 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1761 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
);
1764 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1765 hists__resort_cb_t cb
)
1767 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
);
1770 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1772 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1775 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1781 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1783 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1785 while (can_goto_child(he
, HMD_NORMAL
)) {
1786 node
= rb_last(&he
->hroot_out
);
1787 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1792 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1794 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1796 if (can_goto_child(he
, hmd
))
1797 node
= rb_first(&he
->hroot_out
);
1799 node
= rb_next(node
);
1801 while (node
== NULL
) {
1806 node
= rb_next(&he
->rb_node
);
1811 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1813 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1815 node
= rb_prev(node
);
1817 return rb_hierarchy_last(node
);
1823 return &he
->rb_node
;
1826 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1828 struct rb_node
*node
;
1829 struct hist_entry
*child
;
1835 node
= rb_first(&he
->hroot_out
);
1836 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1838 while (node
&& child
->filtered
) {
1839 node
= rb_next(node
);
1840 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1844 percent
= hist_entry__get_percent_limit(child
);
1848 return node
&& percent
>= limit
;
1851 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1852 enum hist_filter filter
)
1854 h
->filtered
&= ~(1 << filter
);
1856 if (symbol_conf
.report_hierarchy
) {
1857 struct hist_entry
*parent
= h
->parent_he
;
1860 he_stat__add_stat(&parent
->stat
, &h
->stat
);
1862 parent
->filtered
&= ~(1 << filter
);
1864 if (parent
->filtered
)
1867 /* force fold unfiltered entry for simplicity */
1868 parent
->unfolded
= false;
1869 parent
->has_no_entry
= false;
1870 parent
->row_offset
= 0;
1871 parent
->nr_rows
= 0;
1873 parent
= parent
->parent_he
;
1880 /* force fold unfiltered entry for simplicity */
1881 h
->unfolded
= false;
1882 h
->has_no_entry
= false;
1886 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1888 hists__inc_filter_stats(hists
, h
);
1889 hists__calc_col_len(hists
, h
);
1893 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1894 struct hist_entry
*he
)
1896 if (hists
->dso_filter
!= NULL
&&
1897 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1898 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1905 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1906 struct hist_entry
*he
)
1908 if (hists
->thread_filter
!= NULL
&&
1909 he
->thread
!= hists
->thread_filter
) {
1910 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1917 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1918 struct hist_entry
*he
)
1920 if (hists
->symbol_filter_str
!= NULL
&&
1921 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1922 hists
->symbol_filter_str
) == NULL
)) {
1923 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1930 static bool hists__filter_entry_by_socket(struct hists
*hists
,
1931 struct hist_entry
*he
)
1933 if ((hists
->socket_filter
> -1) &&
1934 (he
->socket
!= hists
->socket_filter
)) {
1935 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
1942 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
1944 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
1948 hists
->stats
.nr_non_filtered_samples
= 0;
1950 hists__reset_filter_stats(hists
);
1951 hists__reset_col_len(hists
);
1953 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1954 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1956 if (filter(hists
, h
))
1959 hists__remove_entry_filter(hists
, h
, type
);
1963 static void resort_filtered_entry(struct rb_root
*root
, struct hist_entry
*he
)
1965 struct rb_node
**p
= &root
->rb_node
;
1966 struct rb_node
*parent
= NULL
;
1967 struct hist_entry
*iter
;
1968 struct rb_root new_root
= RB_ROOT
;
1971 while (*p
!= NULL
) {
1973 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1975 if (hist_entry__sort(he
, iter
) > 0)
1978 p
= &(*p
)->rb_right
;
1981 rb_link_node(&he
->rb_node
, parent
, p
);
1982 rb_insert_color(&he
->rb_node
, root
);
1984 if (he
->leaf
|| he
->filtered
)
1987 nd
= rb_first(&he
->hroot_out
);
1989 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1992 rb_erase(&h
->rb_node
, &he
->hroot_out
);
1994 resort_filtered_entry(&new_root
, h
);
1997 he
->hroot_out
= new_root
;
2000 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2003 struct rb_root new_root
= RB_ROOT
;
2005 hists
->stats
.nr_non_filtered_samples
= 0;
2007 hists__reset_filter_stats(hists
);
2008 hists__reset_col_len(hists
);
2010 nd
= rb_first(&hists
->entries
);
2012 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2015 ret
= hist_entry__filter(h
, type
, arg
);
2018 * case 1. non-matching type
2019 * zero out the period, set filter marker and move to child
2022 memset(&h
->stat
, 0, sizeof(h
->stat
));
2023 h
->filtered
|= (1 << type
);
2025 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2028 * case 2. matched type (filter out)
2029 * set filter marker and move to next
2031 else if (ret
== 1) {
2032 h
->filtered
|= (1 << type
);
2034 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2037 * case 3. ok (not filtered)
2038 * add period to hists and parents, erase the filter marker
2039 * and move to next sibling
2042 hists__remove_entry_filter(hists
, h
, type
);
2044 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2048 hierarchy_recalc_total_periods(hists
);
2051 * resort output after applying a new filter since filter in a lower
2052 * hierarchy can change periods in a upper hierarchy.
2054 nd
= rb_first(&hists
->entries
);
2056 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2059 rb_erase(&h
->rb_node
, &hists
->entries
);
2061 resort_filtered_entry(&new_root
, h
);
2064 hists
->entries
= new_root
;
2067 void hists__filter_by_thread(struct hists
*hists
)
2069 if (symbol_conf
.report_hierarchy
)
2070 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2071 hists
->thread_filter
);
2073 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2074 hists__filter_entry_by_thread
);
2077 void hists__filter_by_dso(struct hists
*hists
)
2079 if (symbol_conf
.report_hierarchy
)
2080 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2083 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2084 hists__filter_entry_by_dso
);
2087 void hists__filter_by_symbol(struct hists
*hists
)
2089 if (symbol_conf
.report_hierarchy
)
2090 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2091 hists
->symbol_filter_str
);
2093 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2094 hists__filter_entry_by_symbol
);
2097 void hists__filter_by_socket(struct hists
*hists
)
2099 if (symbol_conf
.report_hierarchy
)
2100 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2101 &hists
->socket_filter
);
2103 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2104 hists__filter_entry_by_socket
);
2107 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2109 ++stats
->nr_events
[0];
2110 ++stats
->nr_events
[type
];
2113 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2115 events_stats__inc(&hists
->stats
, type
);
2118 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2120 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2122 hists
->stats
.nr_non_filtered_samples
++;
2125 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2126 struct hist_entry
*pair
)
2128 struct rb_root
*root
;
2130 struct rb_node
*parent
= NULL
;
2131 struct hist_entry
*he
;
2134 if (hists__has(hists
, need_collapse
))
2135 root
= &hists
->entries_collapsed
;
2137 root
= hists
->entries_in
;
2141 while (*p
!= NULL
) {
2143 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2145 cmp
= hist_entry__collapse(he
, pair
);
2153 p
= &(*p
)->rb_right
;
2156 he
= hist_entry__new(pair
, true);
2158 memset(&he
->stat
, 0, sizeof(he
->stat
));
2160 if (symbol_conf
.cumulate_callchain
)
2161 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2162 rb_link_node(&he
->rb_node_in
, parent
, p
);
2163 rb_insert_color(&he
->rb_node_in
, root
);
2164 hists__inc_stats(hists
, he
);
2171 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2172 struct rb_root
*root
,
2173 struct hist_entry
*pair
)
2176 struct rb_node
*parent
= NULL
;
2177 struct hist_entry
*he
;
2178 struct perf_hpp_fmt
*fmt
;
2181 while (*p
!= NULL
) {
2185 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2187 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2188 cmp
= fmt
->collapse(fmt
, he
, pair
);
2196 p
= &parent
->rb_left
;
2198 p
= &parent
->rb_right
;
2201 he
= hist_entry__new(pair
, true);
2203 rb_link_node(&he
->rb_node_in
, parent
, p
);
2204 rb_insert_color(&he
->rb_node_in
, root
);
2208 memset(&he
->stat
, 0, sizeof(he
->stat
));
2209 hists__inc_stats(hists
, he
);
2215 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2216 struct hist_entry
*he
)
2220 if (hists__has(hists
, need_collapse
))
2221 n
= hists
->entries_collapsed
.rb_node
;
2223 n
= hists
->entries_in
->rb_node
;
2226 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2227 int64_t cmp
= hist_entry__collapse(iter
, he
);
2240 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root
*root
,
2241 struct hist_entry
*he
)
2243 struct rb_node
*n
= root
->rb_node
;
2246 struct hist_entry
*iter
;
2247 struct perf_hpp_fmt
*fmt
;
2250 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2251 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2252 cmp
= fmt
->collapse(fmt
, iter
, he
);
2268 static void hists__match_hierarchy(struct rb_root
*leader_root
,
2269 struct rb_root
*other_root
)
2272 struct hist_entry
*pos
, *pair
;
2274 for (nd
= rb_first(leader_root
); nd
; nd
= rb_next(nd
)) {
2275 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2276 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2279 hist_entry__add_pair(pair
, pos
);
2280 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2286 * Look for pairs to link to the leader buckets (hist_entries):
2288 void hists__match(struct hists
*leader
, struct hists
*other
)
2290 struct rb_root
*root
;
2292 struct hist_entry
*pos
, *pair
;
2294 if (symbol_conf
.report_hierarchy
) {
2295 /* hierarchy report always collapses entries */
2296 return hists__match_hierarchy(&leader
->entries_collapsed
,
2297 &other
->entries_collapsed
);
2300 if (hists__has(leader
, need_collapse
))
2301 root
= &leader
->entries_collapsed
;
2303 root
= leader
->entries_in
;
2305 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2306 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2307 pair
= hists__find_entry(other
, pos
);
2310 hist_entry__add_pair(pair
, pos
);
2314 static int hists__link_hierarchy(struct hists
*leader_hists
,
2315 struct hist_entry
*parent
,
2316 struct rb_root
*leader_root
,
2317 struct rb_root
*other_root
)
2320 struct hist_entry
*pos
, *leader
;
2322 for (nd
= rb_first(other_root
); nd
; nd
= rb_next(nd
)) {
2323 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2325 if (hist_entry__has_pairs(pos
)) {
2328 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2329 if (leader
->hists
== leader_hists
) {
2337 leader
= add_dummy_hierarchy_entry(leader_hists
,
2342 /* do not point parent in the pos */
2343 leader
->parent_he
= parent
;
2345 hist_entry__add_pair(pos
, leader
);
2349 if (hists__link_hierarchy(leader_hists
, leader
,
2351 &pos
->hroot_in
) < 0)
2359 * Look for entries in the other hists that are not present in the leader, if
2360 * we find them, just add a dummy entry on the leader hists, with period=0,
2361 * nr_events=0, to serve as the list header.
2363 int hists__link(struct hists
*leader
, struct hists
*other
)
2365 struct rb_root
*root
;
2367 struct hist_entry
*pos
, *pair
;
2369 if (symbol_conf
.report_hierarchy
) {
2370 /* hierarchy report always collapses entries */
2371 return hists__link_hierarchy(leader
, NULL
,
2372 &leader
->entries_collapsed
,
2373 &other
->entries_collapsed
);
2376 if (hists__has(other
, need_collapse
))
2377 root
= &other
->entries_collapsed
;
2379 root
= other
->entries_in
;
2381 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2382 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2384 if (!hist_entry__has_pairs(pos
)) {
2385 pair
= hists__add_dummy_entry(leader
, pos
);
2388 hist_entry__add_pair(pos
, pair
);
2395 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2396 struct perf_sample
*sample
, bool nonany_branch_mode
)
2398 struct branch_info
*bi
;
2400 /* If we have branch cycles always annotate them. */
2401 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2404 bi
= sample__resolve_bstack(sample
, al
);
2406 struct addr_map_symbol
*prev
= NULL
;
2409 * Ignore errors, still want to process the
2412 * For non standard branch modes always
2413 * force no IPC (prev == NULL)
2415 * Note that perf stores branches reversed from
2418 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2419 addr_map_symbol__account_cycles(&bi
[i
].from
,
2420 nonany_branch_mode
? NULL
: prev
,
2421 bi
[i
].flags
.cycles
);
2429 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
2431 struct perf_evsel
*pos
;
2434 evlist__for_each_entry(evlist
, pos
) {
2435 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2436 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2443 u64
hists__total_period(struct hists
*hists
)
2445 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2446 hists
->stats
.total_period
;
2449 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2450 const char *arg
, int unset __maybe_unused
)
2452 if (!strcmp(arg
, "relative"))
2453 symbol_conf
.filter_relative
= true;
2454 else if (!strcmp(arg
, "absolute"))
2455 symbol_conf
.filter_relative
= false;
2457 pr_debug("Invalud percentage: %s\n", arg
);
2464 int perf_hist_config(const char *var
, const char *value
)
2466 if (!strcmp(var
, "hist.percentage"))
2467 return parse_filter_percentage(NULL
, value
, 0);
2472 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2474 memset(hists
, 0, sizeof(*hists
));
2475 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
2476 hists
->entries_in
= &hists
->entries_in_array
[0];
2477 hists
->entries_collapsed
= RB_ROOT
;
2478 hists
->entries
= RB_ROOT
;
2479 pthread_mutex_init(&hists
->lock
, NULL
);
2480 hists
->socket_filter
= -1;
2481 hists
->hpp_list
= hpp_list
;
2482 INIT_LIST_HEAD(&hists
->hpp_formats
);
2486 static void hists__delete_remaining_entries(struct rb_root
*root
)
2488 struct rb_node
*node
;
2489 struct hist_entry
*he
;
2491 while (!RB_EMPTY_ROOT(root
)) {
2492 node
= rb_first(root
);
2493 rb_erase(node
, root
);
2495 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2496 hist_entry__delete(he
);
2500 static void hists__delete_all_entries(struct hists
*hists
)
2502 hists__delete_entries(hists
);
2503 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2504 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2505 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2508 static void hists_evsel__exit(struct perf_evsel
*evsel
)
2510 struct hists
*hists
= evsel__hists(evsel
);
2511 struct perf_hpp_fmt
*fmt
, *pos
;
2512 struct perf_hpp_list_node
*node
, *tmp
;
2514 hists__delete_all_entries(hists
);
2516 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2517 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2518 list_del(&fmt
->list
);
2521 list_del(&node
->list
);
2526 static int hists_evsel__init(struct perf_evsel
*evsel
)
2528 struct hists
*hists
= evsel__hists(evsel
);
2530 __hists__init(hists
, &perf_hpp_list
);
2535 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2536 * stored in the rbtree...
2539 int hists__init(void)
2541 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2545 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2550 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2552 INIT_LIST_HEAD(&list
->fields
);
2553 INIT_LIST_HEAD(&list
->sorts
);