]>
Commit | Line | Data |
---|---|---|
78f7defe | 1 | #include "annotate.h" |
8a0ecfb8 | 2 | #include "util.h" |
598357eb | 3 | #include "build-id.h" |
3d1d07ec | 4 | #include "hist.h" |
4e4f06e4 ACM |
5 | #include "session.h" |
6 | #include "sort.h" | |
29d720ed | 7 | #include "evsel.h" |
9b33827d | 8 | #include <math.h> |
3d1d07ec | 9 | |
90cf1fb5 ACM |
10 | static bool hists__filter_entry_by_dso(struct hists *hists, |
11 | struct hist_entry *he); | |
12 | static bool hists__filter_entry_by_thread(struct hists *hists, | |
13 | struct hist_entry *he); | |
e94d53eb NK |
14 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
15 | struct hist_entry *he); | |
90cf1fb5 | 16 | |
7a007ca9 ACM |
17 | enum hist_filter { |
18 | HIST_FILTER__DSO, | |
19 | HIST_FILTER__THREAD, | |
20 | HIST_FILTER__PARENT, | |
e94d53eb | 21 | HIST_FILTER__SYMBOL, |
7a007ca9 ACM |
22 | }; |
23 | ||
3d1d07ec JK |
24 | struct callchain_param callchain_param = { |
25 | .mode = CHAIN_GRAPH_REL, | |
d797fdc5 SL |
26 | .min_percent = 0.5, |
27 | .order = ORDER_CALLEE | |
3d1d07ec JK |
28 | }; |
29 | ||
42b28ac0 | 30 | u16 hists__col_len(struct hists *hists, enum hist_column col) |
8a6c5b26 | 31 | { |
42b28ac0 | 32 | return hists->col_len[col]; |
8a6c5b26 ACM |
33 | } |
34 | ||
42b28ac0 | 35 | void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 36 | { |
42b28ac0 | 37 | hists->col_len[col] = len; |
8a6c5b26 ACM |
38 | } |
39 | ||
42b28ac0 | 40 | bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 41 | { |
42b28ac0 ACM |
42 | if (len > hists__col_len(hists, col)) { |
43 | hists__set_col_len(hists, col, len); | |
8a6c5b26 ACM |
44 | return true; |
45 | } | |
46 | return false; | |
47 | } | |
48 | ||
7ccf4f90 | 49 | void hists__reset_col_len(struct hists *hists) |
8a6c5b26 ACM |
50 | { |
51 | enum hist_column col; | |
52 | ||
53 | for (col = 0; col < HISTC_NR_COLS; ++col) | |
42b28ac0 | 54 | hists__set_col_len(hists, col, 0); |
8a6c5b26 ACM |
55 | } |
56 | ||
b5387528 RAV |
57 | static void hists__set_unres_dso_col_len(struct hists *hists, int dso) |
58 | { | |
59 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | |
60 | ||
61 | if (hists__col_len(hists, dso) < unresolved_col_width && | |
62 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | |
63 | !symbol_conf.dso_list) | |
64 | hists__set_col_len(hists, dso, unresolved_col_width); | |
65 | } | |
66 | ||
7ccf4f90 | 67 | void hists__calc_col_len(struct hists *hists, struct hist_entry *h) |
8a6c5b26 | 68 | { |
b5387528 | 69 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; |
8a6c5b26 ACM |
70 | u16 len; |
71 | ||
72 | if (h->ms.sym) | |
b5387528 RAV |
73 | hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); |
74 | else | |
75 | hists__set_unres_dso_col_len(hists, HISTC_DSO); | |
8a6c5b26 ACM |
76 | |
77 | len = thread__comm_len(h->thread); | |
42b28ac0 ACM |
78 | if (hists__new_col_len(hists, HISTC_COMM, len)) |
79 | hists__set_col_len(hists, HISTC_THREAD, len + 6); | |
8a6c5b26 ACM |
80 | |
81 | if (h->ms.map) { | |
82 | len = dso__name_len(h->ms.map->dso); | |
42b28ac0 | 83 | hists__new_col_len(hists, HISTC_DSO, len); |
8a6c5b26 | 84 | } |
b5387528 | 85 | |
cb993744 NK |
86 | if (h->parent) |
87 | hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); | |
88 | ||
b5387528 RAV |
89 | if (h->branch_info) { |
90 | int symlen; | |
91 | /* | |
92 | * +4 accounts for '[x] ' priv level info | |
93 | * +2 account of 0x prefix on raw addresses | |
94 | */ | |
95 | if (h->branch_info->from.sym) { | |
96 | symlen = (int)h->branch_info->from.sym->namelen + 4; | |
97 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
98 | ||
99 | symlen = dso__name_len(h->branch_info->from.map->dso); | |
100 | hists__new_col_len(hists, HISTC_DSO_FROM, symlen); | |
101 | } else { | |
102 | symlen = unresolved_col_width + 4 + 2; | |
103 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
104 | hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); | |
105 | } | |
106 | ||
107 | if (h->branch_info->to.sym) { | |
108 | symlen = (int)h->branch_info->to.sym->namelen + 4; | |
109 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
110 | ||
111 | symlen = dso__name_len(h->branch_info->to.map->dso); | |
112 | hists__new_col_len(hists, HISTC_DSO_TO, symlen); | |
113 | } else { | |
114 | symlen = unresolved_col_width + 4 + 2; | |
115 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
116 | hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); | |
117 | } | |
118 | } | |
8a6c5b26 ACM |
119 | } |
120 | ||
7ccf4f90 NK |
121 | void hists__output_recalc_col_len(struct hists *hists, int max_rows) |
122 | { | |
123 | struct rb_node *next = rb_first(&hists->entries); | |
124 | struct hist_entry *n; | |
125 | int row = 0; | |
126 | ||
127 | hists__reset_col_len(hists); | |
128 | ||
129 | while (next && row++ < max_rows) { | |
130 | n = rb_entry(next, struct hist_entry, rb_node); | |
131 | if (!n->filtered) | |
132 | hists__calc_col_len(hists, n); | |
133 | next = rb_next(&n->rb_node); | |
134 | } | |
135 | } | |
136 | ||
12c14278 | 137 | static void hist_entry__add_cpumode_period(struct hist_entry *he, |
c82ee828 | 138 | unsigned int cpumode, u64 period) |
a1645ce1 | 139 | { |
28e2a106 | 140 | switch (cpumode) { |
a1645ce1 | 141 | case PERF_RECORD_MISC_KERNEL: |
b24c28f7 | 142 | he->stat.period_sys += period; |
a1645ce1 ZY |
143 | break; |
144 | case PERF_RECORD_MISC_USER: | |
b24c28f7 | 145 | he->stat.period_us += period; |
a1645ce1 ZY |
146 | break; |
147 | case PERF_RECORD_MISC_GUEST_KERNEL: | |
b24c28f7 | 148 | he->stat.period_guest_sys += period; |
a1645ce1 ZY |
149 | break; |
150 | case PERF_RECORD_MISC_GUEST_USER: | |
b24c28f7 | 151 | he->stat.period_guest_us += period; |
a1645ce1 ZY |
152 | break; |
153 | default: | |
154 | break; | |
155 | } | |
156 | } | |
157 | ||
05484298 AK |
158 | static void he_stat__add_period(struct he_stat *he_stat, u64 period, |
159 | u64 weight) | |
139c0815 NK |
160 | { |
161 | he_stat->period += period; | |
05484298 | 162 | he_stat->weight += weight; |
139c0815 NK |
163 | he_stat->nr_events += 1; |
164 | } | |
165 | ||
166 | static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) | |
167 | { | |
168 | dest->period += src->period; | |
169 | dest->period_sys += src->period_sys; | |
170 | dest->period_us += src->period_us; | |
171 | dest->period_guest_sys += src->period_guest_sys; | |
172 | dest->period_guest_us += src->period_guest_us; | |
173 | dest->nr_events += src->nr_events; | |
05484298 | 174 | dest->weight += src->weight; |
139c0815 NK |
175 | } |
176 | ||
ab81f3fd ACM |
177 | static void hist_entry__decay(struct hist_entry *he) |
178 | { | |
b24c28f7 NK |
179 | he->stat.period = (he->stat.period * 7) / 8; |
180 | he->stat.nr_events = (he->stat.nr_events * 7) / 8; | |
05484298 | 181 | /* XXX need decay for weight too? */ |
ab81f3fd ACM |
182 | } |
183 | ||
184 | static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) | |
185 | { | |
b24c28f7 | 186 | u64 prev_period = he->stat.period; |
c64550cf ACM |
187 | |
188 | if (prev_period == 0) | |
df71d95f | 189 | return true; |
c64550cf | 190 | |
ab81f3fd | 191 | hist_entry__decay(he); |
c64550cf ACM |
192 | |
193 | if (!he->filtered) | |
b24c28f7 | 194 | hists->stats.total_period -= prev_period - he->stat.period; |
c64550cf | 195 | |
b24c28f7 | 196 | return he->stat.period == 0; |
ab81f3fd ACM |
197 | } |
198 | ||
b079d4e9 ACM |
199 | static void __hists__decay_entries(struct hists *hists, bool zap_user, |
200 | bool zap_kernel, bool threaded) | |
ab81f3fd ACM |
201 | { |
202 | struct rb_node *next = rb_first(&hists->entries); | |
203 | struct hist_entry *n; | |
204 | ||
205 | while (next) { | |
206 | n = rb_entry(next, struct hist_entry, rb_node); | |
207 | next = rb_next(&n->rb_node); | |
df71d95f ACM |
208 | /* |
209 | * We may be annotating this, for instance, so keep it here in | |
210 | * case some it gets new samples, we'll eventually free it when | |
211 | * the user stops browsing and it agains gets fully decayed. | |
212 | */ | |
b079d4e9 ACM |
213 | if (((zap_user && n->level == '.') || |
214 | (zap_kernel && n->level != '.') || | |
215 | hists__decay_entry(hists, n)) && | |
216 | !n->used) { | |
ab81f3fd ACM |
217 | rb_erase(&n->rb_node, &hists->entries); |
218 | ||
e345fa18 | 219 | if (sort__need_collapse || threaded) |
ab81f3fd ACM |
220 | rb_erase(&n->rb_node_in, &hists->entries_collapsed); |
221 | ||
222 | hist_entry__free(n); | |
223 | --hists->nr_entries; | |
224 | } | |
225 | } | |
226 | } | |
227 | ||
b079d4e9 | 228 | void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) |
e345fa18 | 229 | { |
b079d4e9 | 230 | return __hists__decay_entries(hists, zap_user, zap_kernel, false); |
e345fa18 ACM |
231 | } |
232 | ||
b079d4e9 ACM |
233 | void hists__decay_entries_threaded(struct hists *hists, |
234 | bool zap_user, bool zap_kernel) | |
e345fa18 | 235 | { |
b079d4e9 | 236 | return __hists__decay_entries(hists, zap_user, zap_kernel, true); |
e345fa18 ACM |
237 | } |
238 | ||
3d1d07ec | 239 | /* |
c82ee828 | 240 | * histogram, sorted on item, collects periods |
3d1d07ec JK |
241 | */ |
242 | ||
28e2a106 ACM |
243 | static struct hist_entry *hist_entry__new(struct hist_entry *template) |
244 | { | |
d2009c51 | 245 | size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; |
12c14278 | 246 | struct hist_entry *he = malloc(sizeof(*he) + callchain_size); |
28e2a106 | 247 | |
12c14278 ACM |
248 | if (he != NULL) { |
249 | *he = *template; | |
c4b35351 | 250 | |
12c14278 ACM |
251 | if (he->ms.map) |
252 | he->ms.map->referenced = true; | |
3cf0cb1f SE |
253 | |
254 | if (he->branch_info) { | |
255 | if (he->branch_info->from.map) | |
256 | he->branch_info->from.map->referenced = true; | |
257 | if (he->branch_info->to.map) | |
258 | he->branch_info->to.map->referenced = true; | |
259 | } | |
260 | ||
28e2a106 | 261 | if (symbol_conf.use_callchain) |
12c14278 | 262 | callchain_init(he->callchain); |
b821c732 ACM |
263 | |
264 | INIT_LIST_HEAD(&he->pairs.node); | |
28e2a106 ACM |
265 | } |
266 | ||
12c14278 | 267 | return he; |
28e2a106 ACM |
268 | } |
269 | ||
66f97ed3 | 270 | void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) |
fefb0b94 | 271 | { |
8a6c5b26 | 272 | if (!h->filtered) { |
42b28ac0 ACM |
273 | hists__calc_col_len(hists, h); |
274 | ++hists->nr_entries; | |
b24c28f7 | 275 | hists->stats.total_period += h->stat.period; |
8a6c5b26 | 276 | } |
fefb0b94 ACM |
277 | } |
278 | ||
7a007ca9 ACM |
279 | static u8 symbol__parent_filter(const struct symbol *parent) |
280 | { | |
281 | if (symbol_conf.exclude_other && parent == NULL) | |
282 | return 1 << HIST_FILTER__PARENT; | |
283 | return 0; | |
284 | } | |
285 | ||
b5387528 RAV |
286 | static struct hist_entry *add_hist_entry(struct hists *hists, |
287 | struct hist_entry *entry, | |
1c02c4d2 | 288 | struct addr_location *al, |
05484298 AK |
289 | u64 period, |
290 | u64 weight) | |
9735abf1 | 291 | { |
1980c2eb | 292 | struct rb_node **p; |
9735abf1 ACM |
293 | struct rb_node *parent = NULL; |
294 | struct hist_entry *he; | |
9735abf1 ACM |
295 | int cmp; |
296 | ||
1980c2eb ACM |
297 | pthread_mutex_lock(&hists->lock); |
298 | ||
299 | p = &hists->entries_in->rb_node; | |
300 | ||
9735abf1 ACM |
301 | while (*p != NULL) { |
302 | parent = *p; | |
1980c2eb | 303 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
9735abf1 | 304 | |
9afcf930 NK |
305 | /* |
306 | * Make sure that it receives arguments in a same order as | |
307 | * hist_entry__collapse() so that we can use an appropriate | |
308 | * function when searching an entry regardless which sort | |
309 | * keys were used. | |
310 | */ | |
311 | cmp = hist_entry__cmp(he, entry); | |
9735abf1 ACM |
312 | |
313 | if (!cmp) { | |
05484298 | 314 | he_stat__add_period(&he->stat, period, weight); |
63fa471d DM |
315 | |
316 | /* If the map of an existing hist_entry has | |
317 | * become out-of-date due to an exec() or | |
318 | * similar, update it. Otherwise we will | |
319 | * mis-adjust symbol addresses when computing | |
320 | * the history counter to increment. | |
321 | */ | |
322 | if (he->ms.map != entry->ms.map) { | |
323 | he->ms.map = entry->ms.map; | |
324 | if (he->ms.map) | |
325 | he->ms.map->referenced = true; | |
326 | } | |
28e2a106 | 327 | goto out; |
9735abf1 ACM |
328 | } |
329 | ||
330 | if (cmp < 0) | |
331 | p = &(*p)->rb_left; | |
332 | else | |
333 | p = &(*p)->rb_right; | |
334 | } | |
335 | ||
b5387528 | 336 | he = hist_entry__new(entry); |
9735abf1 | 337 | if (!he) |
1980c2eb ACM |
338 | goto out_unlock; |
339 | ||
340 | rb_link_node(&he->rb_node_in, parent, p); | |
341 | rb_insert_color(&he->rb_node_in, hists->entries_in); | |
28e2a106 | 342 | out: |
c82ee828 | 343 | hist_entry__add_cpumode_period(he, al->cpumode, period); |
1980c2eb ACM |
344 | out_unlock: |
345 | pthread_mutex_unlock(&hists->lock); | |
9735abf1 ACM |
346 | return he; |
347 | } | |
348 | ||
b5387528 RAV |
349 | struct hist_entry *__hists__add_branch_entry(struct hists *self, |
350 | struct addr_location *al, | |
351 | struct symbol *sym_parent, | |
352 | struct branch_info *bi, | |
05484298 AK |
353 | u64 period, |
354 | u64 weight) | |
b5387528 RAV |
355 | { |
356 | struct hist_entry entry = { | |
357 | .thread = al->thread, | |
358 | .ms = { | |
359 | .map = bi->to.map, | |
360 | .sym = bi->to.sym, | |
361 | }, | |
362 | .cpu = al->cpu, | |
363 | .ip = bi->to.addr, | |
364 | .level = al->level, | |
b24c28f7 NK |
365 | .stat = { |
366 | .period = period, | |
c4b35351 | 367 | .nr_events = 1, |
05484298 | 368 | .weight = weight, |
b24c28f7 | 369 | }, |
b5387528 RAV |
370 | .parent = sym_parent, |
371 | .filtered = symbol__parent_filter(sym_parent), | |
372 | .branch_info = bi, | |
ae359f19 | 373 | .hists = self, |
b5387528 RAV |
374 | }; |
375 | ||
05484298 | 376 | return add_hist_entry(self, &entry, al, period, weight); |
b5387528 RAV |
377 | } |
378 | ||
379 | struct hist_entry *__hists__add_entry(struct hists *self, | |
380 | struct addr_location *al, | |
05484298 AK |
381 | struct symbol *sym_parent, u64 period, |
382 | u64 weight) | |
b5387528 RAV |
383 | { |
384 | struct hist_entry entry = { | |
385 | .thread = al->thread, | |
386 | .ms = { | |
387 | .map = al->map, | |
388 | .sym = al->sym, | |
389 | }, | |
390 | .cpu = al->cpu, | |
391 | .ip = al->addr, | |
392 | .level = al->level, | |
b24c28f7 NK |
393 | .stat = { |
394 | .period = period, | |
c4b35351 | 395 | .nr_events = 1, |
05484298 | 396 | .weight = weight, |
b24c28f7 | 397 | }, |
b5387528 RAV |
398 | .parent = sym_parent, |
399 | .filtered = symbol__parent_filter(sym_parent), | |
ae359f19 | 400 | .hists = self, |
b5387528 RAV |
401 | }; |
402 | ||
05484298 | 403 | return add_hist_entry(self, &entry, al, period, weight); |
b5387528 RAV |
404 | } |
405 | ||
3d1d07ec JK |
406 | int64_t |
407 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | |
408 | { | |
409 | struct sort_entry *se; | |
410 | int64_t cmp = 0; | |
411 | ||
412 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
fcd14984 | 413 | cmp = se->se_cmp(left, right); |
3d1d07ec JK |
414 | if (cmp) |
415 | break; | |
416 | } | |
417 | ||
418 | return cmp; | |
419 | } | |
420 | ||
421 | int64_t | |
422 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | |
423 | { | |
424 | struct sort_entry *se; | |
425 | int64_t cmp = 0; | |
426 | ||
427 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
428 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | |
429 | ||
fcd14984 | 430 | f = se->se_collapse ?: se->se_cmp; |
3d1d07ec JK |
431 | |
432 | cmp = f(left, right); | |
433 | if (cmp) | |
434 | break; | |
435 | } | |
436 | ||
437 | return cmp; | |
438 | } | |
439 | ||
440 | void hist_entry__free(struct hist_entry *he) | |
441 | { | |
580e338d | 442 | free(he->branch_info); |
3d1d07ec JK |
443 | free(he); |
444 | } | |
445 | ||
446 | /* | |
447 | * collapse the histogram | |
448 | */ | |
449 | ||
1d037ca1 | 450 | static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, |
1b3a0e95 FW |
451 | struct rb_root *root, |
452 | struct hist_entry *he) | |
3d1d07ec | 453 | { |
b9bf0892 | 454 | struct rb_node **p = &root->rb_node; |
3d1d07ec JK |
455 | struct rb_node *parent = NULL; |
456 | struct hist_entry *iter; | |
457 | int64_t cmp; | |
458 | ||
459 | while (*p != NULL) { | |
460 | parent = *p; | |
1980c2eb | 461 | iter = rb_entry(parent, struct hist_entry, rb_node_in); |
3d1d07ec JK |
462 | |
463 | cmp = hist_entry__collapse(iter, he); | |
464 | ||
465 | if (!cmp) { | |
139c0815 | 466 | he_stat__add_stat(&iter->stat, &he->stat); |
9ec60972 | 467 | |
1b3a0e95 | 468 | if (symbol_conf.use_callchain) { |
47260645 NK |
469 | callchain_cursor_reset(&callchain_cursor); |
470 | callchain_merge(&callchain_cursor, | |
471 | iter->callchain, | |
1b3a0e95 FW |
472 | he->callchain); |
473 | } | |
3d1d07ec | 474 | hist_entry__free(he); |
fefb0b94 | 475 | return false; |
3d1d07ec JK |
476 | } |
477 | ||
478 | if (cmp < 0) | |
479 | p = &(*p)->rb_left; | |
480 | else | |
481 | p = &(*p)->rb_right; | |
482 | } | |
483 | ||
1980c2eb ACM |
484 | rb_link_node(&he->rb_node_in, parent, p); |
485 | rb_insert_color(&he->rb_node_in, root); | |
fefb0b94 | 486 | return true; |
3d1d07ec JK |
487 | } |
488 | ||
1980c2eb | 489 | static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) |
3d1d07ec | 490 | { |
1980c2eb ACM |
491 | struct rb_root *root; |
492 | ||
493 | pthread_mutex_lock(&hists->lock); | |
494 | ||
495 | root = hists->entries_in; | |
496 | if (++hists->entries_in > &hists->entries_in_array[1]) | |
497 | hists->entries_in = &hists->entries_in_array[0]; | |
498 | ||
499 | pthread_mutex_unlock(&hists->lock); | |
500 | ||
501 | return root; | |
502 | } | |
503 | ||
90cf1fb5 ACM |
504 | static void hists__apply_filters(struct hists *hists, struct hist_entry *he) |
505 | { | |
506 | hists__filter_entry_by_dso(hists, he); | |
507 | hists__filter_entry_by_thread(hists, he); | |
e94d53eb | 508 | hists__filter_entry_by_symbol(hists, he); |
90cf1fb5 ACM |
509 | } |
510 | ||
1980c2eb ACM |
511 | static void __hists__collapse_resort(struct hists *hists, bool threaded) |
512 | { | |
513 | struct rb_root *root; | |
3d1d07ec JK |
514 | struct rb_node *next; |
515 | struct hist_entry *n; | |
516 | ||
1980c2eb | 517 | if (!sort__need_collapse && !threaded) |
3d1d07ec JK |
518 | return; |
519 | ||
1980c2eb ACM |
520 | root = hists__get_rotate_entries_in(hists); |
521 | next = rb_first(root); | |
b9bf0892 | 522 | |
3d1d07ec | 523 | while (next) { |
1980c2eb ACM |
524 | n = rb_entry(next, struct hist_entry, rb_node_in); |
525 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 526 | |
1980c2eb | 527 | rb_erase(&n->rb_node_in, root); |
90cf1fb5 ACM |
528 | if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { |
529 | /* | |
530 | * If it wasn't combined with one of the entries already | |
531 | * collapsed, we need to apply the filters that may have | |
532 | * been set by, say, the hist_browser. | |
533 | */ | |
534 | hists__apply_filters(hists, n); | |
90cf1fb5 | 535 | } |
3d1d07ec | 536 | } |
1980c2eb | 537 | } |
b9bf0892 | 538 | |
1980c2eb ACM |
539 | void hists__collapse_resort(struct hists *hists) |
540 | { | |
541 | return __hists__collapse_resort(hists, false); | |
542 | } | |
543 | ||
544 | void hists__collapse_resort_threaded(struct hists *hists) | |
545 | { | |
546 | return __hists__collapse_resort(hists, true); | |
3d1d07ec JK |
547 | } |
548 | ||
549 | /* | |
c82ee828 | 550 | * reverse the map, sort on period. |
3d1d07ec JK |
551 | */ |
552 | ||
29d720ed NK |
553 | static int period_cmp(u64 period_a, u64 period_b) |
554 | { | |
555 | if (period_a > period_b) | |
556 | return 1; | |
557 | if (period_a < period_b) | |
558 | return -1; | |
559 | return 0; | |
560 | } | |
561 | ||
562 | static int hist_entry__sort_on_period(struct hist_entry *a, | |
563 | struct hist_entry *b) | |
564 | { | |
565 | int ret; | |
566 | int i, nr_members; | |
567 | struct perf_evsel *evsel; | |
568 | struct hist_entry *pair; | |
569 | u64 *periods_a, *periods_b; | |
570 | ||
571 | ret = period_cmp(a->stat.period, b->stat.period); | |
572 | if (ret || !symbol_conf.event_group) | |
573 | return ret; | |
574 | ||
575 | evsel = hists_to_evsel(a->hists); | |
576 | nr_members = evsel->nr_members; | |
577 | if (nr_members <= 1) | |
578 | return ret; | |
579 | ||
580 | periods_a = zalloc(sizeof(periods_a) * nr_members); | |
581 | periods_b = zalloc(sizeof(periods_b) * nr_members); | |
582 | ||
583 | if (!periods_a || !periods_b) | |
584 | goto out; | |
585 | ||
586 | list_for_each_entry(pair, &a->pairs.head, pairs.node) { | |
587 | evsel = hists_to_evsel(pair->hists); | |
588 | periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period; | |
589 | } | |
590 | ||
591 | list_for_each_entry(pair, &b->pairs.head, pairs.node) { | |
592 | evsel = hists_to_evsel(pair->hists); | |
593 | periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period; | |
594 | } | |
595 | ||
596 | for (i = 1; i < nr_members; i++) { | |
597 | ret = period_cmp(periods_a[i], periods_b[i]); | |
598 | if (ret) | |
599 | break; | |
600 | } | |
601 | ||
602 | out: | |
603 | free(periods_a); | |
604 | free(periods_b); | |
605 | ||
606 | return ret; | |
607 | } | |
608 | ||
1c02c4d2 ACM |
609 | static void __hists__insert_output_entry(struct rb_root *entries, |
610 | struct hist_entry *he, | |
611 | u64 min_callchain_hits) | |
3d1d07ec | 612 | { |
1c02c4d2 | 613 | struct rb_node **p = &entries->rb_node; |
3d1d07ec JK |
614 | struct rb_node *parent = NULL; |
615 | struct hist_entry *iter; | |
616 | ||
d599db3f | 617 | if (symbol_conf.use_callchain) |
b9fb9304 | 618 | callchain_param.sort(&he->sorted_chain, he->callchain, |
3d1d07ec JK |
619 | min_callchain_hits, &callchain_param); |
620 | ||
621 | while (*p != NULL) { | |
622 | parent = *p; | |
623 | iter = rb_entry(parent, struct hist_entry, rb_node); | |
624 | ||
29d720ed | 625 | if (hist_entry__sort_on_period(he, iter) > 0) |
3d1d07ec JK |
626 | p = &(*p)->rb_left; |
627 | else | |
628 | p = &(*p)->rb_right; | |
629 | } | |
630 | ||
631 | rb_link_node(&he->rb_node, parent, p); | |
1c02c4d2 | 632 | rb_insert_color(&he->rb_node, entries); |
3d1d07ec JK |
633 | } |
634 | ||
1980c2eb | 635 | static void __hists__output_resort(struct hists *hists, bool threaded) |
3d1d07ec | 636 | { |
1980c2eb | 637 | struct rb_root *root; |
3d1d07ec JK |
638 | struct rb_node *next; |
639 | struct hist_entry *n; | |
3d1d07ec JK |
640 | u64 min_callchain_hits; |
641 | ||
42b28ac0 | 642 | min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); |
3d1d07ec | 643 | |
1980c2eb ACM |
644 | if (sort__need_collapse || threaded) |
645 | root = &hists->entries_collapsed; | |
646 | else | |
647 | root = hists->entries_in; | |
648 | ||
649 | next = rb_first(root); | |
650 | hists->entries = RB_ROOT; | |
3d1d07ec | 651 | |
42b28ac0 | 652 | hists->nr_entries = 0; |
7928631a | 653 | hists->stats.total_period = 0; |
42b28ac0 | 654 | hists__reset_col_len(hists); |
fefb0b94 | 655 | |
3d1d07ec | 656 | while (next) { |
1980c2eb ACM |
657 | n = rb_entry(next, struct hist_entry, rb_node_in); |
658 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 659 | |
1980c2eb | 660 | __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); |
42b28ac0 | 661 | hists__inc_nr_entries(hists, n); |
3d1d07ec | 662 | } |
1980c2eb | 663 | } |
b9bf0892 | 664 | |
1980c2eb ACM |
665 | void hists__output_resort(struct hists *hists) |
666 | { | |
667 | return __hists__output_resort(hists, false); | |
668 | } | |
669 | ||
670 | void hists__output_resort_threaded(struct hists *hists) | |
671 | { | |
672 | return __hists__output_resort(hists, true); | |
3d1d07ec | 673 | } |
4ecf84d0 | 674 | |
42b28ac0 | 675 | static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, |
cc5edb0e ACM |
676 | enum hist_filter filter) |
677 | { | |
678 | h->filtered &= ~(1 << filter); | |
679 | if (h->filtered) | |
680 | return; | |
681 | ||
42b28ac0 | 682 | ++hists->nr_entries; |
0f0cbf7a | 683 | if (h->ms.unfolded) |
42b28ac0 | 684 | hists->nr_entries += h->nr_rows; |
0f0cbf7a | 685 | h->row_offset = 0; |
b24c28f7 NK |
686 | hists->stats.total_period += h->stat.period; |
687 | hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events; | |
cc5edb0e | 688 | |
42b28ac0 | 689 | hists__calc_col_len(hists, h); |
cc5edb0e ACM |
690 | } |
691 | ||
90cf1fb5 ACM |
692 | |
693 | static bool hists__filter_entry_by_dso(struct hists *hists, | |
694 | struct hist_entry *he) | |
695 | { | |
696 | if (hists->dso_filter != NULL && | |
697 | (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { | |
698 | he->filtered |= (1 << HIST_FILTER__DSO); | |
699 | return true; | |
700 | } | |
701 | ||
702 | return false; | |
703 | } | |
704 | ||
d7b76f09 | 705 | void hists__filter_by_dso(struct hists *hists) |
b09e0190 ACM |
706 | { |
707 | struct rb_node *nd; | |
708 | ||
42b28ac0 ACM |
709 | hists->nr_entries = hists->stats.total_period = 0; |
710 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
711 | hists__reset_col_len(hists); | |
b09e0190 | 712 | |
42b28ac0 | 713 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
714 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
715 | ||
716 | if (symbol_conf.exclude_other && !h->parent) | |
717 | continue; | |
718 | ||
90cf1fb5 | 719 | if (hists__filter_entry_by_dso(hists, h)) |
b09e0190 | 720 | continue; |
b09e0190 | 721 | |
42b28ac0 | 722 | hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); |
b09e0190 ACM |
723 | } |
724 | } | |
725 | ||
90cf1fb5 ACM |
726 | static bool hists__filter_entry_by_thread(struct hists *hists, |
727 | struct hist_entry *he) | |
728 | { | |
729 | if (hists->thread_filter != NULL && | |
730 | he->thread != hists->thread_filter) { | |
731 | he->filtered |= (1 << HIST_FILTER__THREAD); | |
732 | return true; | |
733 | } | |
734 | ||
735 | return false; | |
736 | } | |
737 | ||
d7b76f09 | 738 | void hists__filter_by_thread(struct hists *hists) |
b09e0190 ACM |
739 | { |
740 | struct rb_node *nd; | |
741 | ||
42b28ac0 ACM |
742 | hists->nr_entries = hists->stats.total_period = 0; |
743 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
744 | hists__reset_col_len(hists); | |
b09e0190 | 745 | |
42b28ac0 | 746 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
747 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
748 | ||
90cf1fb5 | 749 | if (hists__filter_entry_by_thread(hists, h)) |
b09e0190 | 750 | continue; |
cc5edb0e | 751 | |
42b28ac0 | 752 | hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); |
b09e0190 ACM |
753 | } |
754 | } | |
ef7b93a1 | 755 | |
e94d53eb NK |
756 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
757 | struct hist_entry *he) | |
758 | { | |
759 | if (hists->symbol_filter_str != NULL && | |
760 | (!he->ms.sym || strstr(he->ms.sym->name, | |
761 | hists->symbol_filter_str) == NULL)) { | |
762 | he->filtered |= (1 << HIST_FILTER__SYMBOL); | |
763 | return true; | |
764 | } | |
765 | ||
766 | return false; | |
767 | } | |
768 | ||
769 | void hists__filter_by_symbol(struct hists *hists) | |
770 | { | |
771 | struct rb_node *nd; | |
772 | ||
773 | hists->nr_entries = hists->stats.total_period = 0; | |
774 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
775 | hists__reset_col_len(hists); | |
776 | ||
777 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | |
778 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | |
779 | ||
780 | if (hists__filter_entry_by_symbol(hists, h)) | |
781 | continue; | |
782 | ||
783 | hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); | |
784 | } | |
785 | } | |
786 | ||
2f525d01 | 787 | int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) |
ef7b93a1 | 788 | { |
2f525d01 | 789 | return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); |
ef7b93a1 ACM |
790 | } |
791 | ||
ce6f4fab | 792 | int hist_entry__annotate(struct hist_entry *he, size_t privsize) |
ef7b93a1 | 793 | { |
ce6f4fab | 794 | return symbol__annotate(he->ms.sym, he->ms.map, privsize); |
ef7b93a1 | 795 | } |
c8446b9b | 796 | |
28a6b6aa ACM |
797 | void events_stats__inc(struct events_stats *stats, u32 type) |
798 | { | |
799 | ++stats->nr_events[0]; | |
800 | ++stats->nr_events[type]; | |
801 | } | |
802 | ||
42b28ac0 | 803 | void hists__inc_nr_events(struct hists *hists, u32 type) |
c8446b9b | 804 | { |
28a6b6aa | 805 | events_stats__inc(&hists->stats, type); |
c8446b9b | 806 | } |
95529be4 | 807 | |
494d70a1 ACM |
808 | static struct hist_entry *hists__add_dummy_entry(struct hists *hists, |
809 | struct hist_entry *pair) | |
810 | { | |
ce74f60e NK |
811 | struct rb_root *root; |
812 | struct rb_node **p; | |
494d70a1 ACM |
813 | struct rb_node *parent = NULL; |
814 | struct hist_entry *he; | |
815 | int cmp; | |
816 | ||
ce74f60e NK |
817 | if (sort__need_collapse) |
818 | root = &hists->entries_collapsed; | |
819 | else | |
820 | root = hists->entries_in; | |
821 | ||
822 | p = &root->rb_node; | |
823 | ||
494d70a1 ACM |
824 | while (*p != NULL) { |
825 | parent = *p; | |
ce74f60e | 826 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
494d70a1 | 827 | |
ce74f60e | 828 | cmp = hist_entry__collapse(he, pair); |
494d70a1 ACM |
829 | |
830 | if (!cmp) | |
831 | goto out; | |
832 | ||
833 | if (cmp < 0) | |
834 | p = &(*p)->rb_left; | |
835 | else | |
836 | p = &(*p)->rb_right; | |
837 | } | |
838 | ||
839 | he = hist_entry__new(pair); | |
840 | if (he) { | |
30193d78 ACM |
841 | memset(&he->stat, 0, sizeof(he->stat)); |
842 | he->hists = hists; | |
ce74f60e NK |
843 | rb_link_node(&he->rb_node_in, parent, p); |
844 | rb_insert_color(&he->rb_node_in, root); | |
494d70a1 ACM |
845 | hists__inc_nr_entries(hists, he); |
846 | } | |
847 | out: | |
848 | return he; | |
849 | } | |
850 | ||
95529be4 ACM |
851 | static struct hist_entry *hists__find_entry(struct hists *hists, |
852 | struct hist_entry *he) | |
853 | { | |
ce74f60e NK |
854 | struct rb_node *n; |
855 | ||
856 | if (sort__need_collapse) | |
857 | n = hists->entries_collapsed.rb_node; | |
858 | else | |
859 | n = hists->entries_in->rb_node; | |
95529be4 ACM |
860 | |
861 | while (n) { | |
ce74f60e NK |
862 | struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); |
863 | int64_t cmp = hist_entry__collapse(iter, he); | |
95529be4 ACM |
864 | |
865 | if (cmp < 0) | |
866 | n = n->rb_left; | |
867 | else if (cmp > 0) | |
868 | n = n->rb_right; | |
869 | else | |
870 | return iter; | |
871 | } | |
872 | ||
873 | return NULL; | |
874 | } | |
875 | ||
876 | /* | |
877 | * Look for pairs to link to the leader buckets (hist_entries): | |
878 | */ | |
879 | void hists__match(struct hists *leader, struct hists *other) | |
880 | { | |
ce74f60e | 881 | struct rb_root *root; |
95529be4 ACM |
882 | struct rb_node *nd; |
883 | struct hist_entry *pos, *pair; | |
884 | ||
ce74f60e NK |
885 | if (sort__need_collapse) |
886 | root = &leader->entries_collapsed; | |
887 | else | |
888 | root = leader->entries_in; | |
889 | ||
890 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
891 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
95529be4 ACM |
892 | pair = hists__find_entry(other, pos); |
893 | ||
894 | if (pair) | |
5fa9041b | 895 | hist_entry__add_pair(pair, pos); |
95529be4 ACM |
896 | } |
897 | } | |
494d70a1 ACM |
898 | |
899 | /* | |
900 | * Look for entries in the other hists that are not present in the leader, if | |
901 | * we find them, just add a dummy entry on the leader hists, with period=0, | |
902 | * nr_events=0, to serve as the list header. | |
903 | */ | |
904 | int hists__link(struct hists *leader, struct hists *other) | |
905 | { | |
ce74f60e | 906 | struct rb_root *root; |
494d70a1 ACM |
907 | struct rb_node *nd; |
908 | struct hist_entry *pos, *pair; | |
909 | ||
ce74f60e NK |
910 | if (sort__need_collapse) |
911 | root = &other->entries_collapsed; | |
912 | else | |
913 | root = other->entries_in; | |
914 | ||
915 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
916 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
494d70a1 ACM |
917 | |
918 | if (!hist_entry__has_pairs(pos)) { | |
919 | pair = hists__add_dummy_entry(leader, pos); | |
920 | if (pair == NULL) | |
921 | return -1; | |
5fa9041b | 922 | hist_entry__add_pair(pos, pair); |
494d70a1 ACM |
923 | } |
924 | } | |
925 | ||
926 | return 0; | |
927 | } |