]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/perf/util/hist.c
perf tools: Add mem access sampling core support
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
16
17 enum hist_filter {
18 HIST_FILTER__DSO,
19 HIST_FILTER__THREAD,
20 HIST_FILTER__PARENT,
21 HIST_FILTER__SYMBOL,
22 };
23
24 struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
26 .min_percent = 0.5,
27 .order = ORDER_CALLEE
28 };
29
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 {
32 return hists->col_len[col];
33 }
34
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 {
37 hists->col_len[col] = len;
38 }
39
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 {
42 if (len > hists__col_len(hists, col)) {
43 hists__set_col_len(hists, col, len);
44 return true;
45 }
46 return false;
47 }
48
49 void hists__reset_col_len(struct hists *hists)
50 {
51 enum hist_column col;
52
53 for (col = 0; col < HISTC_NR_COLS; ++col)
54 hists__set_col_len(hists, col, 0);
55 }
56
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 {
59 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60
61 if (hists__col_len(hists, dso) < unresolved_col_width &&
62 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 !symbol_conf.dso_list)
64 hists__set_col_len(hists, dso, unresolved_col_width);
65 }
66
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 {
69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70 int symlen;
71 u16 len;
72
73 if (h->ms.sym)
74 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
75 else {
76 symlen = unresolved_col_width + 4 + 2;
77 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
78 hists__set_unres_dso_col_len(hists, HISTC_DSO);
79 }
80
81 len = thread__comm_len(h->thread);
82 if (hists__new_col_len(hists, HISTC_COMM, len))
83 hists__set_col_len(hists, HISTC_THREAD, len + 6);
84
85 if (h->ms.map) {
86 len = dso__name_len(h->ms.map->dso);
87 hists__new_col_len(hists, HISTC_DSO, len);
88 }
89
90 if (h->parent)
91 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
92
93 if (h->branch_info) {
94 /*
95 * +4 accounts for '[x] ' priv level info
96 * +2 account of 0x prefix on raw addresses
97 */
98 if (h->branch_info->from.sym) {
99 symlen = (int)h->branch_info->from.sym->namelen + 4;
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101
102 symlen = dso__name_len(h->branch_info->from.map->dso);
103 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
104 } else {
105 symlen = unresolved_col_width + 4 + 2;
106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
108 }
109
110 if (h->branch_info->to.sym) {
111 symlen = (int)h->branch_info->to.sym->namelen + 4;
112 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
113
114 symlen = dso__name_len(h->branch_info->to.map->dso);
115 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
116 } else {
117 symlen = unresolved_col_width + 4 + 2;
118 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
119 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
120 }
121 }
122
123 if (h->mem_info) {
124 /*
125 * +4 accounts for '[x] ' priv level info
126 * +2 account of 0x prefix on raw addresses
127 */
128 if (h->mem_info->daddr.sym) {
129 symlen = (int)h->mem_info->daddr.sym->namelen + 4
130 + unresolved_col_width + 2;
131 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
132 symlen);
133 } else {
134 symlen = unresolved_col_width + 4 + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 symlen);
137 }
138 if (h->mem_info->daddr.map) {
139 symlen = dso__name_len(h->mem_info->daddr.map->dso);
140 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
141 symlen);
142 } else {
143 symlen = unresolved_col_width + 4 + 2;
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 }
146 } else {
147 symlen = unresolved_col_width + 4 + 2;
148 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150 }
151
152 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
153 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
154 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
155 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
156 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
158 }
159
160 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
161 {
162 struct rb_node *next = rb_first(&hists->entries);
163 struct hist_entry *n;
164 int row = 0;
165
166 hists__reset_col_len(hists);
167
168 while (next && row++ < max_rows) {
169 n = rb_entry(next, struct hist_entry, rb_node);
170 if (!n->filtered)
171 hists__calc_col_len(hists, n);
172 next = rb_next(&n->rb_node);
173 }
174 }
175
176 static void hist_entry__add_cpumode_period(struct hist_entry *he,
177 unsigned int cpumode, u64 period)
178 {
179 switch (cpumode) {
180 case PERF_RECORD_MISC_KERNEL:
181 he->stat.period_sys += period;
182 break;
183 case PERF_RECORD_MISC_USER:
184 he->stat.period_us += period;
185 break;
186 case PERF_RECORD_MISC_GUEST_KERNEL:
187 he->stat.period_guest_sys += period;
188 break;
189 case PERF_RECORD_MISC_GUEST_USER:
190 he->stat.period_guest_us += period;
191 break;
192 default:
193 break;
194 }
195 }
196
197 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
198 u64 weight)
199 {
200
201 he_stat->period += period;
202 he_stat->weight += weight;
203 he_stat->nr_events += 1;
204 }
205
206 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
207 {
208 dest->period += src->period;
209 dest->period_sys += src->period_sys;
210 dest->period_us += src->period_us;
211 dest->period_guest_sys += src->period_guest_sys;
212 dest->period_guest_us += src->period_guest_us;
213 dest->nr_events += src->nr_events;
214 dest->weight += src->weight;
215 }
216
217 static void hist_entry__decay(struct hist_entry *he)
218 {
219 he->stat.period = (he->stat.period * 7) / 8;
220 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
221 /* XXX need decay for weight too? */
222 }
223
224 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
225 {
226 u64 prev_period = he->stat.period;
227
228 if (prev_period == 0)
229 return true;
230
231 hist_entry__decay(he);
232
233 if (!he->filtered)
234 hists->stats.total_period -= prev_period - he->stat.period;
235
236 return he->stat.period == 0;
237 }
238
239 static void __hists__decay_entries(struct hists *hists, bool zap_user,
240 bool zap_kernel, bool threaded)
241 {
242 struct rb_node *next = rb_first(&hists->entries);
243 struct hist_entry *n;
244
245 while (next) {
246 n = rb_entry(next, struct hist_entry, rb_node);
247 next = rb_next(&n->rb_node);
248 /*
249 * We may be annotating this, for instance, so keep it here in
250 * case some it gets new samples, we'll eventually free it when
251 * the user stops browsing and it agains gets fully decayed.
252 */
253 if (((zap_user && n->level == '.') ||
254 (zap_kernel && n->level != '.') ||
255 hists__decay_entry(hists, n)) &&
256 !n->used) {
257 rb_erase(&n->rb_node, &hists->entries);
258
259 if (sort__need_collapse || threaded)
260 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
261
262 hist_entry__free(n);
263 --hists->nr_entries;
264 }
265 }
266 }
267
268 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
269 {
270 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
271 }
272
273 void hists__decay_entries_threaded(struct hists *hists,
274 bool zap_user, bool zap_kernel)
275 {
276 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
277 }
278
279 /*
280 * histogram, sorted on item, collects periods
281 */
282
283 static struct hist_entry *hist_entry__new(struct hist_entry *template)
284 {
285 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
286 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
287
288 if (he != NULL) {
289 *he = *template;
290
291 if (he->ms.map)
292 he->ms.map->referenced = true;
293
294 if (he->branch_info) {
295 if (he->branch_info->from.map)
296 he->branch_info->from.map->referenced = true;
297 if (he->branch_info->to.map)
298 he->branch_info->to.map->referenced = true;
299 }
300
301 if (he->mem_info) {
302 if (he->mem_info->iaddr.map)
303 he->mem_info->iaddr.map->referenced = true;
304 if (he->mem_info->daddr.map)
305 he->mem_info->daddr.map->referenced = true;
306 }
307
308 if (symbol_conf.use_callchain)
309 callchain_init(he->callchain);
310
311 INIT_LIST_HEAD(&he->pairs.node);
312 }
313
314 return he;
315 }
316
317 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
318 {
319 if (!h->filtered) {
320 hists__calc_col_len(hists, h);
321 ++hists->nr_entries;
322 hists->stats.total_period += h->stat.period;
323 }
324 }
325
326 static u8 symbol__parent_filter(const struct symbol *parent)
327 {
328 if (symbol_conf.exclude_other && parent == NULL)
329 return 1 << HIST_FILTER__PARENT;
330 return 0;
331 }
332
333 static struct hist_entry *add_hist_entry(struct hists *hists,
334 struct hist_entry *entry,
335 struct addr_location *al,
336 u64 period,
337 u64 weight)
338 {
339 struct rb_node **p;
340 struct rb_node *parent = NULL;
341 struct hist_entry *he;
342 int cmp;
343
344 pthread_mutex_lock(&hists->lock);
345
346 p = &hists->entries_in->rb_node;
347
348 while (*p != NULL) {
349 parent = *p;
350 he = rb_entry(parent, struct hist_entry, rb_node_in);
351
352 /*
353 * Make sure that it receives arguments in a same order as
354 * hist_entry__collapse() so that we can use an appropriate
355 * function when searching an entry regardless which sort
356 * keys were used.
357 */
358 cmp = hist_entry__cmp(he, entry);
359
360 if (!cmp) {
361 he_stat__add_period(&he->stat, period, weight);
362
363 /* If the map of an existing hist_entry has
364 * become out-of-date due to an exec() or
365 * similar, update it. Otherwise we will
366 * mis-adjust symbol addresses when computing
367 * the history counter to increment.
368 */
369 if (he->ms.map != entry->ms.map) {
370 he->ms.map = entry->ms.map;
371 if (he->ms.map)
372 he->ms.map->referenced = true;
373 }
374 goto out;
375 }
376
377 if (cmp < 0)
378 p = &(*p)->rb_left;
379 else
380 p = &(*p)->rb_right;
381 }
382
383 he = hist_entry__new(entry);
384 if (!he)
385 goto out_unlock;
386
387 rb_link_node(&he->rb_node_in, parent, p);
388 rb_insert_color(&he->rb_node_in, hists->entries_in);
389 out:
390 hist_entry__add_cpumode_period(he, al->cpumode, period);
391 out_unlock:
392 pthread_mutex_unlock(&hists->lock);
393 return he;
394 }
395
396 struct hist_entry *__hists__add_mem_entry(struct hists *self,
397 struct addr_location *al,
398 struct symbol *sym_parent,
399 struct mem_info *mi,
400 u64 period,
401 u64 weight)
402 {
403 struct hist_entry entry = {
404 .thread = al->thread,
405 .ms = {
406 .map = al->map,
407 .sym = al->sym,
408 },
409 .stat = {
410 .period = period,
411 .weight = weight,
412 .nr_events = 1,
413 },
414 .cpu = al->cpu,
415 .ip = al->addr,
416 .level = al->level,
417 .parent = sym_parent,
418 .filtered = symbol__parent_filter(sym_parent),
419 .hists = self,
420 .mem_info = mi,
421 .branch_info = NULL,
422 };
423 return add_hist_entry(self, &entry, al, period, weight);
424 }
425
426 struct hist_entry *__hists__add_branch_entry(struct hists *self,
427 struct addr_location *al,
428 struct symbol *sym_parent,
429 struct branch_info *bi,
430 u64 period,
431 u64 weight)
432 {
433 struct hist_entry entry = {
434 .thread = al->thread,
435 .ms = {
436 .map = bi->to.map,
437 .sym = bi->to.sym,
438 },
439 .cpu = al->cpu,
440 .ip = bi->to.addr,
441 .level = al->level,
442 .stat = {
443 .period = period,
444 .nr_events = 1,
445 .weight = weight,
446 },
447 .parent = sym_parent,
448 .filtered = symbol__parent_filter(sym_parent),
449 .branch_info = bi,
450 .hists = self,
451 .mem_info = NULL,
452 };
453
454 return add_hist_entry(self, &entry, al, period, weight);
455 }
456
457 struct hist_entry *__hists__add_entry(struct hists *self,
458 struct addr_location *al,
459 struct symbol *sym_parent, u64 period,
460 u64 weight)
461 {
462 struct hist_entry entry = {
463 .thread = al->thread,
464 .ms = {
465 .map = al->map,
466 .sym = al->sym,
467 },
468 .cpu = al->cpu,
469 .ip = al->addr,
470 .level = al->level,
471 .stat = {
472 .period = period,
473 .nr_events = 1,
474 .weight = weight,
475 },
476 .parent = sym_parent,
477 .filtered = symbol__parent_filter(sym_parent),
478 .hists = self,
479 .branch_info = NULL,
480 .mem_info = NULL,
481 };
482
483 return add_hist_entry(self, &entry, al, period, weight);
484 }
485
486 int64_t
487 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
488 {
489 struct sort_entry *se;
490 int64_t cmp = 0;
491
492 list_for_each_entry(se, &hist_entry__sort_list, list) {
493 cmp = se->se_cmp(left, right);
494 if (cmp)
495 break;
496 }
497
498 return cmp;
499 }
500
501 int64_t
502 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
503 {
504 struct sort_entry *se;
505 int64_t cmp = 0;
506
507 list_for_each_entry(se, &hist_entry__sort_list, list) {
508 int64_t (*f)(struct hist_entry *, struct hist_entry *);
509
510 f = se->se_collapse ?: se->se_cmp;
511
512 cmp = f(left, right);
513 if (cmp)
514 break;
515 }
516
517 return cmp;
518 }
519
520 void hist_entry__free(struct hist_entry *he)
521 {
522 free(he->branch_info);
523 free(he);
524 }
525
526 /*
527 * collapse the histogram
528 */
529
530 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
531 struct rb_root *root,
532 struct hist_entry *he)
533 {
534 struct rb_node **p = &root->rb_node;
535 struct rb_node *parent = NULL;
536 struct hist_entry *iter;
537 int64_t cmp;
538
539 while (*p != NULL) {
540 parent = *p;
541 iter = rb_entry(parent, struct hist_entry, rb_node_in);
542
543 cmp = hist_entry__collapse(iter, he);
544
545 if (!cmp) {
546 he_stat__add_stat(&iter->stat, &he->stat);
547
548 if (symbol_conf.use_callchain) {
549 callchain_cursor_reset(&callchain_cursor);
550 callchain_merge(&callchain_cursor,
551 iter->callchain,
552 he->callchain);
553 }
554 hist_entry__free(he);
555 return false;
556 }
557
558 if (cmp < 0)
559 p = &(*p)->rb_left;
560 else
561 p = &(*p)->rb_right;
562 }
563
564 rb_link_node(&he->rb_node_in, parent, p);
565 rb_insert_color(&he->rb_node_in, root);
566 return true;
567 }
568
569 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
570 {
571 struct rb_root *root;
572
573 pthread_mutex_lock(&hists->lock);
574
575 root = hists->entries_in;
576 if (++hists->entries_in > &hists->entries_in_array[1])
577 hists->entries_in = &hists->entries_in_array[0];
578
579 pthread_mutex_unlock(&hists->lock);
580
581 return root;
582 }
583
584 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
585 {
586 hists__filter_entry_by_dso(hists, he);
587 hists__filter_entry_by_thread(hists, he);
588 hists__filter_entry_by_symbol(hists, he);
589 }
590
591 static void __hists__collapse_resort(struct hists *hists, bool threaded)
592 {
593 struct rb_root *root;
594 struct rb_node *next;
595 struct hist_entry *n;
596
597 if (!sort__need_collapse && !threaded)
598 return;
599
600 root = hists__get_rotate_entries_in(hists);
601 next = rb_first(root);
602
603 while (next) {
604 n = rb_entry(next, struct hist_entry, rb_node_in);
605 next = rb_next(&n->rb_node_in);
606
607 rb_erase(&n->rb_node_in, root);
608 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
609 /*
610 * If it wasn't combined with one of the entries already
611 * collapsed, we need to apply the filters that may have
612 * been set by, say, the hist_browser.
613 */
614 hists__apply_filters(hists, n);
615 }
616 }
617 }
618
619 void hists__collapse_resort(struct hists *hists)
620 {
621 return __hists__collapse_resort(hists, false);
622 }
623
624 void hists__collapse_resort_threaded(struct hists *hists)
625 {
626 return __hists__collapse_resort(hists, true);
627 }
628
629 /*
630 * reverse the map, sort on period.
631 */
632
633 static int period_cmp(u64 period_a, u64 period_b)
634 {
635 if (period_a > period_b)
636 return 1;
637 if (period_a < period_b)
638 return -1;
639 return 0;
640 }
641
642 static int hist_entry__sort_on_period(struct hist_entry *a,
643 struct hist_entry *b)
644 {
645 int ret;
646 int i, nr_members;
647 struct perf_evsel *evsel;
648 struct hist_entry *pair;
649 u64 *periods_a, *periods_b;
650
651 ret = period_cmp(a->stat.period, b->stat.period);
652 if (ret || !symbol_conf.event_group)
653 return ret;
654
655 evsel = hists_to_evsel(a->hists);
656 nr_members = evsel->nr_members;
657 if (nr_members <= 1)
658 return ret;
659
660 periods_a = zalloc(sizeof(periods_a) * nr_members);
661 periods_b = zalloc(sizeof(periods_b) * nr_members);
662
663 if (!periods_a || !periods_b)
664 goto out;
665
666 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
667 evsel = hists_to_evsel(pair->hists);
668 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
669 }
670
671 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
672 evsel = hists_to_evsel(pair->hists);
673 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
674 }
675
676 for (i = 1; i < nr_members; i++) {
677 ret = period_cmp(periods_a[i], periods_b[i]);
678 if (ret)
679 break;
680 }
681
682 out:
683 free(periods_a);
684 free(periods_b);
685
686 return ret;
687 }
688
689 static void __hists__insert_output_entry(struct rb_root *entries,
690 struct hist_entry *he,
691 u64 min_callchain_hits)
692 {
693 struct rb_node **p = &entries->rb_node;
694 struct rb_node *parent = NULL;
695 struct hist_entry *iter;
696
697 if (symbol_conf.use_callchain)
698 callchain_param.sort(&he->sorted_chain, he->callchain,
699 min_callchain_hits, &callchain_param);
700
701 while (*p != NULL) {
702 parent = *p;
703 iter = rb_entry(parent, struct hist_entry, rb_node);
704
705 if (hist_entry__sort_on_period(he, iter) > 0)
706 p = &(*p)->rb_left;
707 else
708 p = &(*p)->rb_right;
709 }
710
711 rb_link_node(&he->rb_node, parent, p);
712 rb_insert_color(&he->rb_node, entries);
713 }
714
715 static void __hists__output_resort(struct hists *hists, bool threaded)
716 {
717 struct rb_root *root;
718 struct rb_node *next;
719 struct hist_entry *n;
720 u64 min_callchain_hits;
721
722 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
723
724 if (sort__need_collapse || threaded)
725 root = &hists->entries_collapsed;
726 else
727 root = hists->entries_in;
728
729 next = rb_first(root);
730 hists->entries = RB_ROOT;
731
732 hists->nr_entries = 0;
733 hists->stats.total_period = 0;
734 hists__reset_col_len(hists);
735
736 while (next) {
737 n = rb_entry(next, struct hist_entry, rb_node_in);
738 next = rb_next(&n->rb_node_in);
739
740 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
741 hists__inc_nr_entries(hists, n);
742 }
743 }
744
745 void hists__output_resort(struct hists *hists)
746 {
747 return __hists__output_resort(hists, false);
748 }
749
750 void hists__output_resort_threaded(struct hists *hists)
751 {
752 return __hists__output_resort(hists, true);
753 }
754
755 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
756 enum hist_filter filter)
757 {
758 h->filtered &= ~(1 << filter);
759 if (h->filtered)
760 return;
761
762 ++hists->nr_entries;
763 if (h->ms.unfolded)
764 hists->nr_entries += h->nr_rows;
765 h->row_offset = 0;
766 hists->stats.total_period += h->stat.period;
767 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
768
769 hists__calc_col_len(hists, h);
770 }
771
772
773 static bool hists__filter_entry_by_dso(struct hists *hists,
774 struct hist_entry *he)
775 {
776 if (hists->dso_filter != NULL &&
777 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
778 he->filtered |= (1 << HIST_FILTER__DSO);
779 return true;
780 }
781
782 return false;
783 }
784
785 void hists__filter_by_dso(struct hists *hists)
786 {
787 struct rb_node *nd;
788
789 hists->nr_entries = hists->stats.total_period = 0;
790 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
791 hists__reset_col_len(hists);
792
793 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
794 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
795
796 if (symbol_conf.exclude_other && !h->parent)
797 continue;
798
799 if (hists__filter_entry_by_dso(hists, h))
800 continue;
801
802 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
803 }
804 }
805
806 static bool hists__filter_entry_by_thread(struct hists *hists,
807 struct hist_entry *he)
808 {
809 if (hists->thread_filter != NULL &&
810 he->thread != hists->thread_filter) {
811 he->filtered |= (1 << HIST_FILTER__THREAD);
812 return true;
813 }
814
815 return false;
816 }
817
818 void hists__filter_by_thread(struct hists *hists)
819 {
820 struct rb_node *nd;
821
822 hists->nr_entries = hists->stats.total_period = 0;
823 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
824 hists__reset_col_len(hists);
825
826 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
827 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
828
829 if (hists__filter_entry_by_thread(hists, h))
830 continue;
831
832 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
833 }
834 }
835
836 static bool hists__filter_entry_by_symbol(struct hists *hists,
837 struct hist_entry *he)
838 {
839 if (hists->symbol_filter_str != NULL &&
840 (!he->ms.sym || strstr(he->ms.sym->name,
841 hists->symbol_filter_str) == NULL)) {
842 he->filtered |= (1 << HIST_FILTER__SYMBOL);
843 return true;
844 }
845
846 return false;
847 }
848
849 void hists__filter_by_symbol(struct hists *hists)
850 {
851 struct rb_node *nd;
852
853 hists->nr_entries = hists->stats.total_period = 0;
854 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
855 hists__reset_col_len(hists);
856
857 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
858 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
859
860 if (hists__filter_entry_by_symbol(hists, h))
861 continue;
862
863 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
864 }
865 }
866
867 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
868 {
869 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
870 }
871
872 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
873 {
874 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
875 }
876
877 void events_stats__inc(struct events_stats *stats, u32 type)
878 {
879 ++stats->nr_events[0];
880 ++stats->nr_events[type];
881 }
882
883 void hists__inc_nr_events(struct hists *hists, u32 type)
884 {
885 events_stats__inc(&hists->stats, type);
886 }
887
888 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
889 struct hist_entry *pair)
890 {
891 struct rb_root *root;
892 struct rb_node **p;
893 struct rb_node *parent = NULL;
894 struct hist_entry *he;
895 int cmp;
896
897 if (sort__need_collapse)
898 root = &hists->entries_collapsed;
899 else
900 root = hists->entries_in;
901
902 p = &root->rb_node;
903
904 while (*p != NULL) {
905 parent = *p;
906 he = rb_entry(parent, struct hist_entry, rb_node_in);
907
908 cmp = hist_entry__collapse(he, pair);
909
910 if (!cmp)
911 goto out;
912
913 if (cmp < 0)
914 p = &(*p)->rb_left;
915 else
916 p = &(*p)->rb_right;
917 }
918
919 he = hist_entry__new(pair);
920 if (he) {
921 memset(&he->stat, 0, sizeof(he->stat));
922 he->hists = hists;
923 rb_link_node(&he->rb_node_in, parent, p);
924 rb_insert_color(&he->rb_node_in, root);
925 hists__inc_nr_entries(hists, he);
926 }
927 out:
928 return he;
929 }
930
931 static struct hist_entry *hists__find_entry(struct hists *hists,
932 struct hist_entry *he)
933 {
934 struct rb_node *n;
935
936 if (sort__need_collapse)
937 n = hists->entries_collapsed.rb_node;
938 else
939 n = hists->entries_in->rb_node;
940
941 while (n) {
942 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
943 int64_t cmp = hist_entry__collapse(iter, he);
944
945 if (cmp < 0)
946 n = n->rb_left;
947 else if (cmp > 0)
948 n = n->rb_right;
949 else
950 return iter;
951 }
952
953 return NULL;
954 }
955
956 /*
957 * Look for pairs to link to the leader buckets (hist_entries):
958 */
959 void hists__match(struct hists *leader, struct hists *other)
960 {
961 struct rb_root *root;
962 struct rb_node *nd;
963 struct hist_entry *pos, *pair;
964
965 if (sort__need_collapse)
966 root = &leader->entries_collapsed;
967 else
968 root = leader->entries_in;
969
970 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
971 pos = rb_entry(nd, struct hist_entry, rb_node_in);
972 pair = hists__find_entry(other, pos);
973
974 if (pair)
975 hist_entry__add_pair(pair, pos);
976 }
977 }
978
979 /*
980 * Look for entries in the other hists that are not present in the leader, if
981 * we find them, just add a dummy entry on the leader hists, with period=0,
982 * nr_events=0, to serve as the list header.
983 */
984 int hists__link(struct hists *leader, struct hists *other)
985 {
986 struct rb_root *root;
987 struct rb_node *nd;
988 struct hist_entry *pos, *pair;
989
990 if (sort__need_collapse)
991 root = &other->entries_collapsed;
992 else
993 root = other->entries_in;
994
995 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
996 pos = rb_entry(nd, struct hist_entry, rb_node_in);
997
998 if (!hist_entry__has_pairs(pos)) {
999 pair = hists__add_dummy_entry(leader, pos);
1000 if (pair == NULL)
1001 return -1;
1002 hist_entry__add_pair(pos, pair);
1003 }
1004 }
1005
1006 return 0;
1007 }