10 static bool hists__filter_entry_by_dso(struct hists
*hists
,
11 struct hist_entry
*he
);
12 static bool hists__filter_entry_by_thread(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
15 struct hist_entry
*he
);
24 struct callchain_param callchain_param
= {
25 .mode
= CHAIN_GRAPH_REL
,
30 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
32 return hists
->col_len
[col
];
35 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
37 hists
->col_len
[col
] = len
;
40 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
42 if (len
> hists__col_len(hists
, col
)) {
43 hists__set_col_len(hists
, col
, len
);
49 void hists__reset_col_len(struct hists
*hists
)
53 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
54 hists__set_col_len(hists
, col
, 0);
57 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
59 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
61 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
62 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
63 !symbol_conf
.dso_list
)
64 hists__set_col_len(hists
, dso
, unresolved_col_width
);
67 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
69 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
74 hists__new_col_len(hists
, HISTC_SYMBOL
, h
->ms
.sym
->namelen
+ 4);
76 symlen
= unresolved_col_width
+ 4 + 2;
77 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
78 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
81 len
= thread__comm_len(h
->thread
);
82 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
83 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
86 len
= dso__name_len(h
->ms
.map
->dso
);
87 hists__new_col_len(hists
, HISTC_DSO
, len
);
91 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
95 * +4 accounts for '[x] ' priv level info
96 * +2 account of 0x prefix on raw addresses
98 if (h
->branch_info
->from
.sym
) {
99 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
100 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
102 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
103 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
105 symlen
= unresolved_col_width
+ 4 + 2;
106 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
107 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
110 if (h
->branch_info
->to
.sym
) {
111 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
112 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
114 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
115 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
117 symlen
= unresolved_col_width
+ 4 + 2;
118 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
119 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
125 * +4 accounts for '[x] ' priv level info
126 * +2 account of 0x prefix on raw addresses
128 if (h
->mem_info
->daddr
.sym
) {
129 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
130 + unresolved_col_width
+ 2;
131 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
134 symlen
= unresolved_col_width
+ 4 + 2;
135 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
138 if (h
->mem_info
->daddr
.map
) {
139 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
140 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
143 symlen
= unresolved_col_width
+ 4 + 2;
144 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
147 symlen
= unresolved_col_width
+ 4 + 2;
148 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
149 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
152 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
153 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
154 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
155 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
156 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
157 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
160 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
162 struct rb_node
*next
= rb_first(&hists
->entries
);
163 struct hist_entry
*n
;
166 hists__reset_col_len(hists
);
168 while (next
&& row
++ < max_rows
) {
169 n
= rb_entry(next
, struct hist_entry
, rb_node
);
171 hists__calc_col_len(hists
, n
);
172 next
= rb_next(&n
->rb_node
);
176 static void hist_entry__add_cpumode_period(struct hist_entry
*he
,
177 unsigned int cpumode
, u64 period
)
180 case PERF_RECORD_MISC_KERNEL
:
181 he
->stat
.period_sys
+= period
;
183 case PERF_RECORD_MISC_USER
:
184 he
->stat
.period_us
+= period
;
186 case PERF_RECORD_MISC_GUEST_KERNEL
:
187 he
->stat
.period_guest_sys
+= period
;
189 case PERF_RECORD_MISC_GUEST_USER
:
190 he
->stat
.period_guest_us
+= period
;
197 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
201 he_stat
->period
+= period
;
202 he_stat
->weight
+= weight
;
203 he_stat
->nr_events
+= 1;
206 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
208 dest
->period
+= src
->period
;
209 dest
->period_sys
+= src
->period_sys
;
210 dest
->period_us
+= src
->period_us
;
211 dest
->period_guest_sys
+= src
->period_guest_sys
;
212 dest
->period_guest_us
+= src
->period_guest_us
;
213 dest
->nr_events
+= src
->nr_events
;
214 dest
->weight
+= src
->weight
;
217 static void hist_entry__decay(struct hist_entry
*he
)
219 he
->stat
.period
= (he
->stat
.period
* 7) / 8;
220 he
->stat
.nr_events
= (he
->stat
.nr_events
* 7) / 8;
221 /* XXX need decay for weight too? */
224 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
226 u64 prev_period
= he
->stat
.period
;
228 if (prev_period
== 0)
231 hist_entry__decay(he
);
234 hists
->stats
.total_period
-= prev_period
- he
->stat
.period
;
236 return he
->stat
.period
== 0;
239 static void __hists__decay_entries(struct hists
*hists
, bool zap_user
,
240 bool zap_kernel
, bool threaded
)
242 struct rb_node
*next
= rb_first(&hists
->entries
);
243 struct hist_entry
*n
;
246 n
= rb_entry(next
, struct hist_entry
, rb_node
);
247 next
= rb_next(&n
->rb_node
);
249 * We may be annotating this, for instance, so keep it here in
250 * case some it gets new samples, we'll eventually free it when
251 * the user stops browsing and it agains gets fully decayed.
253 if (((zap_user
&& n
->level
== '.') ||
254 (zap_kernel
&& n
->level
!= '.') ||
255 hists__decay_entry(hists
, n
)) &&
257 rb_erase(&n
->rb_node
, &hists
->entries
);
259 if (sort__need_collapse
|| threaded
)
260 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
268 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
270 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, false);
273 void hists__decay_entries_threaded(struct hists
*hists
,
274 bool zap_user
, bool zap_kernel
)
276 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, true);
280 * histogram, sorted on item, collects periods
283 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
285 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
286 struct hist_entry
*he
= zalloc(sizeof(*he
) + callchain_size
);
292 he
->ms
.map
->referenced
= true;
294 if (he
->branch_info
) {
295 if (he
->branch_info
->from
.map
)
296 he
->branch_info
->from
.map
->referenced
= true;
297 if (he
->branch_info
->to
.map
)
298 he
->branch_info
->to
.map
->referenced
= true;
302 if (he
->mem_info
->iaddr
.map
)
303 he
->mem_info
->iaddr
.map
->referenced
= true;
304 if (he
->mem_info
->daddr
.map
)
305 he
->mem_info
->daddr
.map
->referenced
= true;
308 if (symbol_conf
.use_callchain
)
309 callchain_init(he
->callchain
);
311 INIT_LIST_HEAD(&he
->pairs
.node
);
317 void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
320 hists__calc_col_len(hists
, h
);
322 hists
->stats
.total_period
+= h
->stat
.period
;
326 static u8
symbol__parent_filter(const struct symbol
*parent
)
328 if (symbol_conf
.exclude_other
&& parent
== NULL
)
329 return 1 << HIST_FILTER__PARENT
;
333 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
334 struct hist_entry
*entry
,
335 struct addr_location
*al
,
340 struct rb_node
*parent
= NULL
;
341 struct hist_entry
*he
;
344 pthread_mutex_lock(&hists
->lock
);
346 p
= &hists
->entries_in
->rb_node
;
350 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
353 * Make sure that it receives arguments in a same order as
354 * hist_entry__collapse() so that we can use an appropriate
355 * function when searching an entry regardless which sort
358 cmp
= hist_entry__cmp(he
, entry
);
361 he_stat__add_period(&he
->stat
, period
, weight
);
363 /* If the map of an existing hist_entry has
364 * become out-of-date due to an exec() or
365 * similar, update it. Otherwise we will
366 * mis-adjust symbol addresses when computing
367 * the history counter to increment.
369 if (he
->ms
.map
!= entry
->ms
.map
) {
370 he
->ms
.map
= entry
->ms
.map
;
372 he
->ms
.map
->referenced
= true;
383 he
= hist_entry__new(entry
);
387 rb_link_node(&he
->rb_node_in
, parent
, p
);
388 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
390 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
392 pthread_mutex_unlock(&hists
->lock
);
396 struct hist_entry
*__hists__add_mem_entry(struct hists
*self
,
397 struct addr_location
*al
,
398 struct symbol
*sym_parent
,
403 struct hist_entry entry
= {
404 .thread
= al
->thread
,
417 .parent
= sym_parent
,
418 .filtered
= symbol__parent_filter(sym_parent
),
423 return add_hist_entry(self
, &entry
, al
, period
, weight
);
426 struct hist_entry
*__hists__add_branch_entry(struct hists
*self
,
427 struct addr_location
*al
,
428 struct symbol
*sym_parent
,
429 struct branch_info
*bi
,
433 struct hist_entry entry
= {
434 .thread
= al
->thread
,
447 .parent
= sym_parent
,
448 .filtered
= symbol__parent_filter(sym_parent
),
454 return add_hist_entry(self
, &entry
, al
, period
, weight
);
457 struct hist_entry
*__hists__add_entry(struct hists
*self
,
458 struct addr_location
*al
,
459 struct symbol
*sym_parent
, u64 period
,
462 struct hist_entry entry
= {
463 .thread
= al
->thread
,
476 .parent
= sym_parent
,
477 .filtered
= symbol__parent_filter(sym_parent
),
483 return add_hist_entry(self
, &entry
, al
, period
, weight
);
487 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
489 struct sort_entry
*se
;
492 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
493 cmp
= se
->se_cmp(left
, right
);
502 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
504 struct sort_entry
*se
;
507 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
508 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
510 f
= se
->se_collapse
?: se
->se_cmp
;
512 cmp
= f(left
, right
);
520 void hist_entry__free(struct hist_entry
*he
)
522 free(he
->branch_info
);
527 * collapse the histogram
530 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
531 struct rb_root
*root
,
532 struct hist_entry
*he
)
534 struct rb_node
**p
= &root
->rb_node
;
535 struct rb_node
*parent
= NULL
;
536 struct hist_entry
*iter
;
541 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
543 cmp
= hist_entry__collapse(iter
, he
);
546 he_stat__add_stat(&iter
->stat
, &he
->stat
);
548 if (symbol_conf
.use_callchain
) {
549 callchain_cursor_reset(&callchain_cursor
);
550 callchain_merge(&callchain_cursor
,
554 hist_entry__free(he
);
564 rb_link_node(&he
->rb_node_in
, parent
, p
);
565 rb_insert_color(&he
->rb_node_in
, root
);
569 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
571 struct rb_root
*root
;
573 pthread_mutex_lock(&hists
->lock
);
575 root
= hists
->entries_in
;
576 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
577 hists
->entries_in
= &hists
->entries_in_array
[0];
579 pthread_mutex_unlock(&hists
->lock
);
584 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
586 hists__filter_entry_by_dso(hists
, he
);
587 hists__filter_entry_by_thread(hists
, he
);
588 hists__filter_entry_by_symbol(hists
, he
);
591 static void __hists__collapse_resort(struct hists
*hists
, bool threaded
)
593 struct rb_root
*root
;
594 struct rb_node
*next
;
595 struct hist_entry
*n
;
597 if (!sort__need_collapse
&& !threaded
)
600 root
= hists__get_rotate_entries_in(hists
);
601 next
= rb_first(root
);
604 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
605 next
= rb_next(&n
->rb_node_in
);
607 rb_erase(&n
->rb_node_in
, root
);
608 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
610 * If it wasn't combined with one of the entries already
611 * collapsed, we need to apply the filters that may have
612 * been set by, say, the hist_browser.
614 hists__apply_filters(hists
, n
);
619 void hists__collapse_resort(struct hists
*hists
)
621 return __hists__collapse_resort(hists
, false);
624 void hists__collapse_resort_threaded(struct hists
*hists
)
626 return __hists__collapse_resort(hists
, true);
630 * reverse the map, sort on period.
633 static int period_cmp(u64 period_a
, u64 period_b
)
635 if (period_a
> period_b
)
637 if (period_a
< period_b
)
642 static int hist_entry__sort_on_period(struct hist_entry
*a
,
643 struct hist_entry
*b
)
647 struct perf_evsel
*evsel
;
648 struct hist_entry
*pair
;
649 u64
*periods_a
, *periods_b
;
651 ret
= period_cmp(a
->stat
.period
, b
->stat
.period
);
652 if (ret
|| !symbol_conf
.event_group
)
655 evsel
= hists_to_evsel(a
->hists
);
656 nr_members
= evsel
->nr_members
;
660 periods_a
= zalloc(sizeof(periods_a
) * nr_members
);
661 periods_b
= zalloc(sizeof(periods_b
) * nr_members
);
663 if (!periods_a
|| !periods_b
)
666 list_for_each_entry(pair
, &a
->pairs
.head
, pairs
.node
) {
667 evsel
= hists_to_evsel(pair
->hists
);
668 periods_a
[perf_evsel__group_idx(evsel
)] = pair
->stat
.period
;
671 list_for_each_entry(pair
, &b
->pairs
.head
, pairs
.node
) {
672 evsel
= hists_to_evsel(pair
->hists
);
673 periods_b
[perf_evsel__group_idx(evsel
)] = pair
->stat
.period
;
676 for (i
= 1; i
< nr_members
; i
++) {
677 ret
= period_cmp(periods_a
[i
], periods_b
[i
]);
689 static void __hists__insert_output_entry(struct rb_root
*entries
,
690 struct hist_entry
*he
,
691 u64 min_callchain_hits
)
693 struct rb_node
**p
= &entries
->rb_node
;
694 struct rb_node
*parent
= NULL
;
695 struct hist_entry
*iter
;
697 if (symbol_conf
.use_callchain
)
698 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
699 min_callchain_hits
, &callchain_param
);
703 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
705 if (hist_entry__sort_on_period(he
, iter
) > 0)
711 rb_link_node(&he
->rb_node
, parent
, p
);
712 rb_insert_color(&he
->rb_node
, entries
);
715 static void __hists__output_resort(struct hists
*hists
, bool threaded
)
717 struct rb_root
*root
;
718 struct rb_node
*next
;
719 struct hist_entry
*n
;
720 u64 min_callchain_hits
;
722 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
724 if (sort__need_collapse
|| threaded
)
725 root
= &hists
->entries_collapsed
;
727 root
= hists
->entries_in
;
729 next
= rb_first(root
);
730 hists
->entries
= RB_ROOT
;
732 hists
->nr_entries
= 0;
733 hists
->stats
.total_period
= 0;
734 hists__reset_col_len(hists
);
737 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
738 next
= rb_next(&n
->rb_node_in
);
740 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
741 hists__inc_nr_entries(hists
, n
);
745 void hists__output_resort(struct hists
*hists
)
747 return __hists__output_resort(hists
, false);
750 void hists__output_resort_threaded(struct hists
*hists
)
752 return __hists__output_resort(hists
, true);
755 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
756 enum hist_filter filter
)
758 h
->filtered
&= ~(1 << filter
);
764 hists
->nr_entries
+= h
->nr_rows
;
766 hists
->stats
.total_period
+= h
->stat
.period
;
767 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->stat
.nr_events
;
769 hists__calc_col_len(hists
, h
);
773 static bool hists__filter_entry_by_dso(struct hists
*hists
,
774 struct hist_entry
*he
)
776 if (hists
->dso_filter
!= NULL
&&
777 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
778 he
->filtered
|= (1 << HIST_FILTER__DSO
);
785 void hists__filter_by_dso(struct hists
*hists
)
789 hists
->nr_entries
= hists
->stats
.total_period
= 0;
790 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
791 hists__reset_col_len(hists
);
793 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
794 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
796 if (symbol_conf
.exclude_other
&& !h
->parent
)
799 if (hists__filter_entry_by_dso(hists
, h
))
802 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
806 static bool hists__filter_entry_by_thread(struct hists
*hists
,
807 struct hist_entry
*he
)
809 if (hists
->thread_filter
!= NULL
&&
810 he
->thread
!= hists
->thread_filter
) {
811 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
818 void hists__filter_by_thread(struct hists
*hists
)
822 hists
->nr_entries
= hists
->stats
.total_period
= 0;
823 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
824 hists__reset_col_len(hists
);
826 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
827 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
829 if (hists__filter_entry_by_thread(hists
, h
))
832 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
836 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
837 struct hist_entry
*he
)
839 if (hists
->symbol_filter_str
!= NULL
&&
840 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
841 hists
->symbol_filter_str
) == NULL
)) {
842 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
849 void hists__filter_by_symbol(struct hists
*hists
)
853 hists
->nr_entries
= hists
->stats
.total_period
= 0;
854 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
855 hists__reset_col_len(hists
);
857 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
858 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
860 if (hists__filter_entry_by_symbol(hists
, h
))
863 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
867 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
869 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
872 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
874 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
877 void events_stats__inc(struct events_stats
*stats
, u32 type
)
879 ++stats
->nr_events
[0];
880 ++stats
->nr_events
[type
];
883 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
885 events_stats__inc(&hists
->stats
, type
);
888 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
889 struct hist_entry
*pair
)
891 struct rb_root
*root
;
893 struct rb_node
*parent
= NULL
;
894 struct hist_entry
*he
;
897 if (sort__need_collapse
)
898 root
= &hists
->entries_collapsed
;
900 root
= hists
->entries_in
;
906 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
908 cmp
= hist_entry__collapse(he
, pair
);
919 he
= hist_entry__new(pair
);
921 memset(&he
->stat
, 0, sizeof(he
->stat
));
923 rb_link_node(&he
->rb_node_in
, parent
, p
);
924 rb_insert_color(&he
->rb_node_in
, root
);
925 hists__inc_nr_entries(hists
, he
);
931 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
932 struct hist_entry
*he
)
936 if (sort__need_collapse
)
937 n
= hists
->entries_collapsed
.rb_node
;
939 n
= hists
->entries_in
->rb_node
;
942 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
943 int64_t cmp
= hist_entry__collapse(iter
, he
);
957 * Look for pairs to link to the leader buckets (hist_entries):
959 void hists__match(struct hists
*leader
, struct hists
*other
)
961 struct rb_root
*root
;
963 struct hist_entry
*pos
, *pair
;
965 if (sort__need_collapse
)
966 root
= &leader
->entries_collapsed
;
968 root
= leader
->entries_in
;
970 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
971 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
972 pair
= hists__find_entry(other
, pos
);
975 hist_entry__add_pair(pair
, pos
);
980 * Look for entries in the other hists that are not present in the leader, if
981 * we find them, just add a dummy entry on the leader hists, with period=0,
982 * nr_events=0, to serve as the list header.
984 int hists__link(struct hists
*leader
, struct hists
*other
)
986 struct rb_root
*root
;
988 struct hist_entry
*pos
, *pair
;
990 if (sort__need_collapse
)
991 root
= &other
->entries_collapsed
;
993 root
= other
->entries_in
;
995 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
996 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
998 if (!hist_entry__has_pairs(pos
)) {
999 pair
= hists__add_dummy_entry(leader
, pos
);
1002 hist_entry__add_pair(pos
, pair
);