]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - tools/perf/util/callchain.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / tools / perf / util / callchain.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4 *
5 * Handle the callchains from the stream in an ad-hoc radix tree and then
6 * sort them in an rbtree.
7 *
8 * Using a radix for code path provides a fast retrieval and factorizes
9 * memory use. Also that lets us use the paths in a hierarchical graph view.
10 *
11 */
12
13 #include <inttypes.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <errno.h>
18 #include <math.h>
19
20 #include "asm/bug.h"
21
22 #include "hist.h"
23 #include "util.h"
24 #include "sort.h"
25 #include "machine.h"
26 #include "callchain.h"
27 #include "branch.h"
28
29 #define CALLCHAIN_PARAM_DEFAULT \
30 .mode = CHAIN_GRAPH_ABS, \
31 .min_percent = 0.5, \
32 .order = ORDER_CALLEE, \
33 .key = CCKEY_FUNCTION, \
34 .value = CCVAL_PERCENT, \
35
36 struct callchain_param callchain_param = {
37 CALLCHAIN_PARAM_DEFAULT
38 };
39
40 struct callchain_param callchain_param_default = {
41 CALLCHAIN_PARAM_DEFAULT
42 };
43
44 __thread struct callchain_cursor callchain_cursor;
45
46 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
47 {
48 return parse_callchain_record(arg, param);
49 }
50
51 static int parse_callchain_mode(const char *value)
52 {
53 if (!strncmp(value, "graph", strlen(value))) {
54 callchain_param.mode = CHAIN_GRAPH_ABS;
55 return 0;
56 }
57 if (!strncmp(value, "flat", strlen(value))) {
58 callchain_param.mode = CHAIN_FLAT;
59 return 0;
60 }
61 if (!strncmp(value, "fractal", strlen(value))) {
62 callchain_param.mode = CHAIN_GRAPH_REL;
63 return 0;
64 }
65 if (!strncmp(value, "folded", strlen(value))) {
66 callchain_param.mode = CHAIN_FOLDED;
67 return 0;
68 }
69 return -1;
70 }
71
72 static int parse_callchain_order(const char *value)
73 {
74 if (!strncmp(value, "caller", strlen(value))) {
75 callchain_param.order = ORDER_CALLER;
76 callchain_param.order_set = true;
77 return 0;
78 }
79 if (!strncmp(value, "callee", strlen(value))) {
80 callchain_param.order = ORDER_CALLEE;
81 callchain_param.order_set = true;
82 return 0;
83 }
84 return -1;
85 }
86
87 static int parse_callchain_sort_key(const char *value)
88 {
89 if (!strncmp(value, "function", strlen(value))) {
90 callchain_param.key = CCKEY_FUNCTION;
91 return 0;
92 }
93 if (!strncmp(value, "address", strlen(value))) {
94 callchain_param.key = CCKEY_ADDRESS;
95 return 0;
96 }
97 if (!strncmp(value, "srcline", strlen(value))) {
98 callchain_param.key = CCKEY_SRCLINE;
99 return 0;
100 }
101 if (!strncmp(value, "branch", strlen(value))) {
102 callchain_param.branch_callstack = 1;
103 return 0;
104 }
105 return -1;
106 }
107
108 static int parse_callchain_value(const char *value)
109 {
110 if (!strncmp(value, "percent", strlen(value))) {
111 callchain_param.value = CCVAL_PERCENT;
112 return 0;
113 }
114 if (!strncmp(value, "period", strlen(value))) {
115 callchain_param.value = CCVAL_PERIOD;
116 return 0;
117 }
118 if (!strncmp(value, "count", strlen(value))) {
119 callchain_param.value = CCVAL_COUNT;
120 return 0;
121 }
122 return -1;
123 }
124
125 static int get_stack_size(const char *str, unsigned long *_size)
126 {
127 char *endptr;
128 unsigned long size;
129 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
130
131 size = strtoul(str, &endptr, 0);
132
133 do {
134 if (*endptr)
135 break;
136
137 size = round_up(size, sizeof(u64));
138 if (!size || size > max_size)
139 break;
140
141 *_size = size;
142 return 0;
143
144 } while (0);
145
146 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
147 max_size, str);
148 return -1;
149 }
150
151 static int
152 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
153 {
154 char *tok;
155 char *endptr, *saveptr = NULL;
156 bool minpcnt_set = false;
157 bool record_opt_set = false;
158 bool try_stack_size = false;
159
160 callchain_param.enabled = true;
161 symbol_conf.use_callchain = true;
162
163 if (!arg)
164 return 0;
165
166 while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
167 if (!strncmp(tok, "none", strlen(tok))) {
168 callchain_param.mode = CHAIN_NONE;
169 callchain_param.enabled = false;
170 symbol_conf.use_callchain = false;
171 return 0;
172 }
173
174 if (!parse_callchain_mode(tok) ||
175 !parse_callchain_order(tok) ||
176 !parse_callchain_sort_key(tok) ||
177 !parse_callchain_value(tok)) {
178 /* parsing ok - move on to the next */
179 try_stack_size = false;
180 goto next;
181 } else if (allow_record_opt && !record_opt_set) {
182 if (parse_callchain_record(tok, &callchain_param))
183 goto try_numbers;
184
185 /* assume that number followed by 'dwarf' is stack size */
186 if (callchain_param.record_mode == CALLCHAIN_DWARF)
187 try_stack_size = true;
188
189 record_opt_set = true;
190 goto next;
191 }
192
193 try_numbers:
194 if (try_stack_size) {
195 unsigned long size = 0;
196
197 if (get_stack_size(tok, &size) < 0)
198 return -1;
199 callchain_param.dump_size = size;
200 try_stack_size = false;
201 } else if (!minpcnt_set) {
202 /* try to get the min percent */
203 callchain_param.min_percent = strtod(tok, &endptr);
204 if (tok == endptr)
205 return -1;
206 minpcnt_set = true;
207 } else {
208 /* try print limit at last */
209 callchain_param.print_limit = strtoul(tok, &endptr, 0);
210 if (tok == endptr)
211 return -1;
212 }
213 next:
214 arg = NULL;
215 }
216
217 if (callchain_register_param(&callchain_param) < 0) {
218 pr_err("Can't register callchain params\n");
219 return -1;
220 }
221 return 0;
222 }
223
224 int parse_callchain_report_opt(const char *arg)
225 {
226 return __parse_callchain_report_opt(arg, false);
227 }
228
229 int parse_callchain_top_opt(const char *arg)
230 {
231 return __parse_callchain_report_opt(arg, true);
232 }
233
234 int parse_callchain_record(const char *arg, struct callchain_param *param)
235 {
236 char *tok, *name, *saveptr = NULL;
237 char *buf;
238 int ret = -1;
239
240 /* We need buffer that we know we can write to. */
241 buf = malloc(strlen(arg) + 1);
242 if (!buf)
243 return -ENOMEM;
244
245 strcpy(buf, arg);
246
247 tok = strtok_r((char *)buf, ",", &saveptr);
248 name = tok ? : (char *)buf;
249
250 do {
251 /* Framepointer style */
252 if (!strncmp(name, "fp", sizeof("fp"))) {
253 if (!strtok_r(NULL, ",", &saveptr)) {
254 param->record_mode = CALLCHAIN_FP;
255 ret = 0;
256 } else
257 pr_err("callchain: No more arguments "
258 "needed for --call-graph fp\n");
259 break;
260
261 /* Dwarf style */
262 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
263 const unsigned long default_stack_dump_size = 8192;
264
265 ret = 0;
266 param->record_mode = CALLCHAIN_DWARF;
267 param->dump_size = default_stack_dump_size;
268
269 tok = strtok_r(NULL, ",", &saveptr);
270 if (tok) {
271 unsigned long size = 0;
272
273 ret = get_stack_size(tok, &size);
274 param->dump_size = size;
275 }
276 } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
277 if (!strtok_r(NULL, ",", &saveptr)) {
278 param->record_mode = CALLCHAIN_LBR;
279 ret = 0;
280 } else
281 pr_err("callchain: No more arguments "
282 "needed for --call-graph lbr\n");
283 break;
284 } else {
285 pr_err("callchain: Unknown --call-graph option "
286 "value: %s\n", arg);
287 break;
288 }
289
290 } while (0);
291
292 free(buf);
293 return ret;
294 }
295
296 int perf_callchain_config(const char *var, const char *value)
297 {
298 char *endptr;
299
300 if (!strstarts(var, "call-graph."))
301 return 0;
302 var += sizeof("call-graph.") - 1;
303
304 if (!strcmp(var, "record-mode"))
305 return parse_callchain_record_opt(value, &callchain_param);
306 if (!strcmp(var, "dump-size")) {
307 unsigned long size = 0;
308 int ret;
309
310 ret = get_stack_size(value, &size);
311 callchain_param.dump_size = size;
312
313 return ret;
314 }
315 if (!strcmp(var, "print-type")){
316 int ret;
317 ret = parse_callchain_mode(value);
318 if (ret == -1)
319 pr_err("Invalid callchain mode: %s\n", value);
320 return ret;
321 }
322 if (!strcmp(var, "order")){
323 int ret;
324 ret = parse_callchain_order(value);
325 if (ret == -1)
326 pr_err("Invalid callchain order: %s\n", value);
327 return ret;
328 }
329 if (!strcmp(var, "sort-key")){
330 int ret;
331 ret = parse_callchain_sort_key(value);
332 if (ret == -1)
333 pr_err("Invalid callchain sort key: %s\n", value);
334 return ret;
335 }
336 if (!strcmp(var, "threshold")) {
337 callchain_param.min_percent = strtod(value, &endptr);
338 if (value == endptr) {
339 pr_err("Invalid callchain threshold: %s\n", value);
340 return -1;
341 }
342 }
343 if (!strcmp(var, "print-limit")) {
344 callchain_param.print_limit = strtod(value, &endptr);
345 if (value == endptr) {
346 pr_err("Invalid callchain print limit: %s\n", value);
347 return -1;
348 }
349 }
350
351 return 0;
352 }
353
354 static void
355 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
356 enum chain_mode mode)
357 {
358 struct rb_node **p = &root->rb_node;
359 struct rb_node *parent = NULL;
360 struct callchain_node *rnode;
361 u64 chain_cumul = callchain_cumul_hits(chain);
362
363 while (*p) {
364 u64 rnode_cumul;
365
366 parent = *p;
367 rnode = rb_entry(parent, struct callchain_node, rb_node);
368 rnode_cumul = callchain_cumul_hits(rnode);
369
370 switch (mode) {
371 case CHAIN_FLAT:
372 case CHAIN_FOLDED:
373 if (rnode->hit < chain->hit)
374 p = &(*p)->rb_left;
375 else
376 p = &(*p)->rb_right;
377 break;
378 case CHAIN_GRAPH_ABS: /* Falldown */
379 case CHAIN_GRAPH_REL:
380 if (rnode_cumul < chain_cumul)
381 p = &(*p)->rb_left;
382 else
383 p = &(*p)->rb_right;
384 break;
385 case CHAIN_NONE:
386 default:
387 break;
388 }
389 }
390
391 rb_link_node(&chain->rb_node, parent, p);
392 rb_insert_color(&chain->rb_node, root);
393 }
394
395 static void
396 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
397 u64 min_hit)
398 {
399 struct rb_node *n;
400 struct callchain_node *child;
401
402 n = rb_first(&node->rb_root_in);
403 while (n) {
404 child = rb_entry(n, struct callchain_node, rb_node_in);
405 n = rb_next(n);
406
407 __sort_chain_flat(rb_root, child, min_hit);
408 }
409
410 if (node->hit && node->hit >= min_hit)
411 rb_insert_callchain(rb_root, node, CHAIN_FLAT);
412 }
413
414 /*
415 * Once we get every callchains from the stream, we can now
416 * sort them by hit
417 */
418 static void
419 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
420 u64 min_hit, struct callchain_param *param __maybe_unused)
421 {
422 *rb_root = RB_ROOT;
423 __sort_chain_flat(rb_root, &root->node, min_hit);
424 }
425
426 static void __sort_chain_graph_abs(struct callchain_node *node,
427 u64 min_hit)
428 {
429 struct rb_node *n;
430 struct callchain_node *child;
431
432 node->rb_root = RB_ROOT;
433 n = rb_first(&node->rb_root_in);
434
435 while (n) {
436 child = rb_entry(n, struct callchain_node, rb_node_in);
437 n = rb_next(n);
438
439 __sort_chain_graph_abs(child, min_hit);
440 if (callchain_cumul_hits(child) >= min_hit)
441 rb_insert_callchain(&node->rb_root, child,
442 CHAIN_GRAPH_ABS);
443 }
444 }
445
446 static void
447 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
448 u64 min_hit, struct callchain_param *param __maybe_unused)
449 {
450 __sort_chain_graph_abs(&chain_root->node, min_hit);
451 rb_root->rb_node = chain_root->node.rb_root.rb_node;
452 }
453
454 static void __sort_chain_graph_rel(struct callchain_node *node,
455 double min_percent)
456 {
457 struct rb_node *n;
458 struct callchain_node *child;
459 u64 min_hit;
460
461 node->rb_root = RB_ROOT;
462 min_hit = ceil(node->children_hit * min_percent);
463
464 n = rb_first(&node->rb_root_in);
465 while (n) {
466 child = rb_entry(n, struct callchain_node, rb_node_in);
467 n = rb_next(n);
468
469 __sort_chain_graph_rel(child, min_percent);
470 if (callchain_cumul_hits(child) >= min_hit)
471 rb_insert_callchain(&node->rb_root, child,
472 CHAIN_GRAPH_REL);
473 }
474 }
475
476 static void
477 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
478 u64 min_hit __maybe_unused, struct callchain_param *param)
479 {
480 __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
481 rb_root->rb_node = chain_root->node.rb_root.rb_node;
482 }
483
484 int callchain_register_param(struct callchain_param *param)
485 {
486 switch (param->mode) {
487 case CHAIN_GRAPH_ABS:
488 param->sort = sort_chain_graph_abs;
489 break;
490 case CHAIN_GRAPH_REL:
491 param->sort = sort_chain_graph_rel;
492 break;
493 case CHAIN_FLAT:
494 case CHAIN_FOLDED:
495 param->sort = sort_chain_flat;
496 break;
497 case CHAIN_NONE:
498 default:
499 return -1;
500 }
501 return 0;
502 }
503
504 /*
505 * Create a child for a parent. If inherit_children, then the new child
506 * will become the new parent of it's parent children
507 */
508 static struct callchain_node *
509 create_child(struct callchain_node *parent, bool inherit_children)
510 {
511 struct callchain_node *new;
512
513 new = zalloc(sizeof(*new));
514 if (!new) {
515 perror("not enough memory to create child for code path tree");
516 return NULL;
517 }
518 new->parent = parent;
519 INIT_LIST_HEAD(&new->val);
520 INIT_LIST_HEAD(&new->parent_val);
521
522 if (inherit_children) {
523 struct rb_node *n;
524 struct callchain_node *child;
525
526 new->rb_root_in = parent->rb_root_in;
527 parent->rb_root_in = RB_ROOT;
528
529 n = rb_first(&new->rb_root_in);
530 while (n) {
531 child = rb_entry(n, struct callchain_node, rb_node_in);
532 child->parent = new;
533 n = rb_next(n);
534 }
535
536 /* make it the first child */
537 rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
538 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
539 }
540
541 return new;
542 }
543
544
545 /*
546 * Fill the node with callchain values
547 */
548 static int
549 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
550 {
551 struct callchain_cursor_node *cursor_node;
552
553 node->val_nr = cursor->nr - cursor->pos;
554 if (!node->val_nr)
555 pr_warning("Warning: empty node in callchain tree\n");
556
557 cursor_node = callchain_cursor_current(cursor);
558
559 while (cursor_node) {
560 struct callchain_list *call;
561
562 call = zalloc(sizeof(*call));
563 if (!call) {
564 perror("not enough memory for the code path tree");
565 return -1;
566 }
567 call->ip = cursor_node->ip;
568 call->ms.sym = cursor_node->sym;
569 call->ms.map = map__get(cursor_node->map);
570
571 if (cursor_node->branch) {
572 call->branch_count = 1;
573
574 if (cursor_node->branch_from) {
575 /*
576 * branch_from is set with value somewhere else
577 * to imply it's "to" of a branch.
578 */
579 call->brtype_stat.branch_to = true;
580
581 if (cursor_node->branch_flags.predicted)
582 call->predicted_count = 1;
583
584 if (cursor_node->branch_flags.abort)
585 call->abort_count = 1;
586
587 branch_type_count(&call->brtype_stat,
588 &cursor_node->branch_flags,
589 cursor_node->branch_from,
590 cursor_node->ip);
591 } else {
592 /*
593 * It's "from" of a branch
594 */
595 call->brtype_stat.branch_to = false;
596 call->cycles_count =
597 cursor_node->branch_flags.cycles;
598 call->iter_count = cursor_node->nr_loop_iter;
599 call->iter_cycles = cursor_node->iter_cycles;
600 }
601 }
602
603 list_add_tail(&call->list, &node->val);
604
605 callchain_cursor_advance(cursor);
606 cursor_node = callchain_cursor_current(cursor);
607 }
608 return 0;
609 }
610
611 static struct callchain_node *
612 add_child(struct callchain_node *parent,
613 struct callchain_cursor *cursor,
614 u64 period)
615 {
616 struct callchain_node *new;
617
618 new = create_child(parent, false);
619 if (new == NULL)
620 return NULL;
621
622 if (fill_node(new, cursor) < 0) {
623 struct callchain_list *call, *tmp;
624
625 list_for_each_entry_safe(call, tmp, &new->val, list) {
626 list_del(&call->list);
627 map__zput(call->ms.map);
628 free(call);
629 }
630 free(new);
631 return NULL;
632 }
633
634 new->children_hit = 0;
635 new->hit = period;
636 new->children_count = 0;
637 new->count = 1;
638 return new;
639 }
640
641 enum match_result {
642 MATCH_ERROR = -1,
643 MATCH_EQ,
644 MATCH_LT,
645 MATCH_GT,
646 };
647
648 static enum match_result match_chain_srcline(struct callchain_cursor_node *node,
649 struct callchain_list *cnode)
650 {
651 char *left = NULL;
652 char *right = NULL;
653 enum match_result ret = MATCH_EQ;
654 int cmp;
655
656 if (cnode->ms.map)
657 left = get_srcline(cnode->ms.map->dso,
658 map__rip_2objdump(cnode->ms.map, cnode->ip),
659 cnode->ms.sym, true, false);
660 if (node->map)
661 right = get_srcline(node->map->dso,
662 map__rip_2objdump(node->map, node->ip),
663 node->sym, true, false);
664
665 if (left && right)
666 cmp = strcmp(left, right);
667 else if (!left && right)
668 cmp = 1;
669 else if (left && !right)
670 cmp = -1;
671 else if (cnode->ip == node->ip)
672 cmp = 0;
673 else
674 cmp = (cnode->ip < node->ip) ? -1 : 1;
675
676 if (cmp != 0)
677 ret = cmp < 0 ? MATCH_LT : MATCH_GT;
678
679 free_srcline(left);
680 free_srcline(right);
681 return ret;
682 }
683
684 static enum match_result match_chain(struct callchain_cursor_node *node,
685 struct callchain_list *cnode)
686 {
687 struct symbol *sym = node->sym;
688 u64 left, right;
689 struct dso *left_dso = NULL;
690 struct dso *right_dso = NULL;
691
692 if (callchain_param.key == CCKEY_SRCLINE) {
693 enum match_result match = match_chain_srcline(node, cnode);
694
695 if (match != MATCH_ERROR)
696 return match;
697 }
698
699 if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
700 left = cnode->ms.sym->start;
701 right = sym->start;
702 left_dso = cnode->ms.map->dso;
703 right_dso = node->map->dso;
704 } else {
705 left = cnode->ip;
706 right = node->ip;
707 }
708
709 if (left == right && left_dso == right_dso) {
710 if (node->branch) {
711 cnode->branch_count++;
712
713 if (node->branch_from) {
714 /*
715 * It's "to" of a branch
716 */
717 cnode->brtype_stat.branch_to = true;
718
719 if (node->branch_flags.predicted)
720 cnode->predicted_count++;
721
722 if (node->branch_flags.abort)
723 cnode->abort_count++;
724
725 branch_type_count(&cnode->brtype_stat,
726 &node->branch_flags,
727 node->branch_from,
728 node->ip);
729 } else {
730 /*
731 * It's "from" of a branch
732 */
733 cnode->brtype_stat.branch_to = false;
734 cnode->cycles_count +=
735 node->branch_flags.cycles;
736 cnode->iter_count += node->nr_loop_iter;
737 cnode->iter_cycles += node->iter_cycles;
738 }
739 }
740
741 return MATCH_EQ;
742 }
743
744 return left > right ? MATCH_GT : MATCH_LT;
745 }
746
747 /*
748 * Split the parent in two parts (a new child is created) and
749 * give a part of its callchain to the created child.
750 * Then create another child to host the given callchain of new branch
751 */
752 static int
753 split_add_child(struct callchain_node *parent,
754 struct callchain_cursor *cursor,
755 struct callchain_list *to_split,
756 u64 idx_parents, u64 idx_local, u64 period)
757 {
758 struct callchain_node *new;
759 struct list_head *old_tail;
760 unsigned int idx_total = idx_parents + idx_local;
761
762 /* split */
763 new = create_child(parent, true);
764 if (new == NULL)
765 return -1;
766
767 /* split the callchain and move a part to the new child */
768 old_tail = parent->val.prev;
769 list_del_range(&to_split->list, old_tail);
770 new->val.next = &to_split->list;
771 new->val.prev = old_tail;
772 to_split->list.prev = &new->val;
773 old_tail->next = &new->val;
774
775 /* split the hits */
776 new->hit = parent->hit;
777 new->children_hit = parent->children_hit;
778 parent->children_hit = callchain_cumul_hits(new);
779 new->val_nr = parent->val_nr - idx_local;
780 parent->val_nr = idx_local;
781 new->count = parent->count;
782 new->children_count = parent->children_count;
783 parent->children_count = callchain_cumul_counts(new);
784
785 /* create a new child for the new branch if any */
786 if (idx_total < cursor->nr) {
787 struct callchain_node *first;
788 struct callchain_list *cnode;
789 struct callchain_cursor_node *node;
790 struct rb_node *p, **pp;
791
792 parent->hit = 0;
793 parent->children_hit += period;
794 parent->count = 0;
795 parent->children_count += 1;
796
797 node = callchain_cursor_current(cursor);
798 new = add_child(parent, cursor, period);
799 if (new == NULL)
800 return -1;
801
802 /*
803 * This is second child since we moved parent's children
804 * to new (first) child above.
805 */
806 p = parent->rb_root_in.rb_node;
807 first = rb_entry(p, struct callchain_node, rb_node_in);
808 cnode = list_first_entry(&first->val, struct callchain_list,
809 list);
810
811 if (match_chain(node, cnode) == MATCH_LT)
812 pp = &p->rb_left;
813 else
814 pp = &p->rb_right;
815
816 rb_link_node(&new->rb_node_in, p, pp);
817 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
818 } else {
819 parent->hit = period;
820 parent->count = 1;
821 }
822 return 0;
823 }
824
825 static enum match_result
826 append_chain(struct callchain_node *root,
827 struct callchain_cursor *cursor,
828 u64 period);
829
830 static int
831 append_chain_children(struct callchain_node *root,
832 struct callchain_cursor *cursor,
833 u64 period)
834 {
835 struct callchain_node *rnode;
836 struct callchain_cursor_node *node;
837 struct rb_node **p = &root->rb_root_in.rb_node;
838 struct rb_node *parent = NULL;
839
840 node = callchain_cursor_current(cursor);
841 if (!node)
842 return -1;
843
844 /* lookup in childrens */
845 while (*p) {
846 enum match_result ret;
847
848 parent = *p;
849 rnode = rb_entry(parent, struct callchain_node, rb_node_in);
850
851 /* If at least first entry matches, rely to children */
852 ret = append_chain(rnode, cursor, period);
853 if (ret == MATCH_EQ)
854 goto inc_children_hit;
855 if (ret == MATCH_ERROR)
856 return -1;
857
858 if (ret == MATCH_LT)
859 p = &parent->rb_left;
860 else
861 p = &parent->rb_right;
862 }
863 /* nothing in children, add to the current node */
864 rnode = add_child(root, cursor, period);
865 if (rnode == NULL)
866 return -1;
867
868 rb_link_node(&rnode->rb_node_in, parent, p);
869 rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
870
871 inc_children_hit:
872 root->children_hit += period;
873 root->children_count++;
874 return 0;
875 }
876
877 static enum match_result
878 append_chain(struct callchain_node *root,
879 struct callchain_cursor *cursor,
880 u64 period)
881 {
882 struct callchain_list *cnode;
883 u64 start = cursor->pos;
884 bool found = false;
885 u64 matches;
886 enum match_result cmp = MATCH_ERROR;
887
888 /*
889 * Lookup in the current node
890 * If we have a symbol, then compare the start to match
891 * anywhere inside a function, unless function
892 * mode is disabled.
893 */
894 list_for_each_entry(cnode, &root->val, list) {
895 struct callchain_cursor_node *node;
896
897 node = callchain_cursor_current(cursor);
898 if (!node)
899 break;
900
901 cmp = match_chain(node, cnode);
902 if (cmp != MATCH_EQ)
903 break;
904
905 found = true;
906
907 callchain_cursor_advance(cursor);
908 }
909
910 /* matches not, relay no the parent */
911 if (!found) {
912 WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
913 return cmp;
914 }
915
916 matches = cursor->pos - start;
917
918 /* we match only a part of the node. Split it and add the new chain */
919 if (matches < root->val_nr) {
920 if (split_add_child(root, cursor, cnode, start, matches,
921 period) < 0)
922 return MATCH_ERROR;
923
924 return MATCH_EQ;
925 }
926
927 /* we match 100% of the path, increment the hit */
928 if (matches == root->val_nr && cursor->pos == cursor->nr) {
929 root->hit += period;
930 root->count++;
931 return MATCH_EQ;
932 }
933
934 /* We match the node and still have a part remaining */
935 if (append_chain_children(root, cursor, period) < 0)
936 return MATCH_ERROR;
937
938 return MATCH_EQ;
939 }
940
941 int callchain_append(struct callchain_root *root,
942 struct callchain_cursor *cursor,
943 u64 period)
944 {
945 if (!cursor->nr)
946 return 0;
947
948 callchain_cursor_commit(cursor);
949
950 if (append_chain_children(&root->node, cursor, period) < 0)
951 return -1;
952
953 if (cursor->nr > root->max_depth)
954 root->max_depth = cursor->nr;
955
956 return 0;
957 }
958
959 static int
960 merge_chain_branch(struct callchain_cursor *cursor,
961 struct callchain_node *dst, struct callchain_node *src)
962 {
963 struct callchain_cursor_node **old_last = cursor->last;
964 struct callchain_node *child;
965 struct callchain_list *list, *next_list;
966 struct rb_node *n;
967 int old_pos = cursor->nr;
968 int err = 0;
969
970 list_for_each_entry_safe(list, next_list, &src->val, list) {
971 callchain_cursor_append(cursor, list->ip,
972 list->ms.map, list->ms.sym,
973 false, NULL, 0, 0, 0);
974 list_del(&list->list);
975 map__zput(list->ms.map);
976 free(list);
977 }
978
979 if (src->hit) {
980 callchain_cursor_commit(cursor);
981 if (append_chain_children(dst, cursor, src->hit) < 0)
982 return -1;
983 }
984
985 n = rb_first(&src->rb_root_in);
986 while (n) {
987 child = container_of(n, struct callchain_node, rb_node_in);
988 n = rb_next(n);
989 rb_erase(&child->rb_node_in, &src->rb_root_in);
990
991 err = merge_chain_branch(cursor, dst, child);
992 if (err)
993 break;
994
995 free(child);
996 }
997
998 cursor->nr = old_pos;
999 cursor->last = old_last;
1000
1001 return err;
1002 }
1003
1004 int callchain_merge(struct callchain_cursor *cursor,
1005 struct callchain_root *dst, struct callchain_root *src)
1006 {
1007 return merge_chain_branch(cursor, &dst->node, &src->node);
1008 }
1009
1010 int callchain_cursor_append(struct callchain_cursor *cursor,
1011 u64 ip, struct map *map, struct symbol *sym,
1012 bool branch, struct branch_flags *flags,
1013 int nr_loop_iter, u64 iter_cycles, u64 branch_from)
1014 {
1015 struct callchain_cursor_node *node = *cursor->last;
1016
1017 if (!node) {
1018 node = calloc(1, sizeof(*node));
1019 if (!node)
1020 return -ENOMEM;
1021
1022 *cursor->last = node;
1023 }
1024
1025 node->ip = ip;
1026 map__zput(node->map);
1027 node->map = map__get(map);
1028 node->sym = sym;
1029 node->branch = branch;
1030 node->nr_loop_iter = nr_loop_iter;
1031 node->iter_cycles = iter_cycles;
1032
1033 if (flags)
1034 memcpy(&node->branch_flags, flags,
1035 sizeof(struct branch_flags));
1036
1037 node->branch_from = branch_from;
1038 cursor->nr++;
1039
1040 cursor->last = &node->next;
1041
1042 return 0;
1043 }
1044
1045 int sample__resolve_callchain(struct perf_sample *sample,
1046 struct callchain_cursor *cursor, struct symbol **parent,
1047 struct perf_evsel *evsel, struct addr_location *al,
1048 int max_stack)
1049 {
1050 if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1051 return 0;
1052
1053 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1054 perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1055 return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1056 parent, al, max_stack);
1057 }
1058 return 0;
1059 }
1060
1061 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1062 {
1063 if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1064 !symbol_conf.show_branchflag_count)
1065 return 0;
1066 return callchain_append(he->callchain, &callchain_cursor, sample->period);
1067 }
1068
1069 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1070 bool hide_unresolved)
1071 {
1072 al->map = node->map;
1073 al->sym = node->sym;
1074 if (node->map)
1075 al->addr = node->map->map_ip(node->map, node->ip);
1076 else
1077 al->addr = node->ip;
1078
1079 if (al->sym == NULL) {
1080 if (hide_unresolved)
1081 return 0;
1082 if (al->map == NULL)
1083 goto out;
1084 }
1085
1086 if (al->map->groups == &al->machine->kmaps) {
1087 if (machine__is_host(al->machine)) {
1088 al->cpumode = PERF_RECORD_MISC_KERNEL;
1089 al->level = 'k';
1090 } else {
1091 al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1092 al->level = 'g';
1093 }
1094 } else {
1095 if (machine__is_host(al->machine)) {
1096 al->cpumode = PERF_RECORD_MISC_USER;
1097 al->level = '.';
1098 } else if (perf_guest) {
1099 al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1100 al->level = 'u';
1101 } else {
1102 al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1103 al->level = 'H';
1104 }
1105 }
1106
1107 out:
1108 return 1;
1109 }
1110
1111 char *callchain_list__sym_name(struct callchain_list *cl,
1112 char *bf, size_t bfsize, bool show_dso)
1113 {
1114 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1115 bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1116 int printed;
1117
1118 if (cl->ms.sym) {
1119 if (show_srcline && cl->ms.map && !cl->srcline)
1120 cl->srcline = get_srcline(cl->ms.map->dso,
1121 map__rip_2objdump(cl->ms.map,
1122 cl->ip),
1123 cl->ms.sym, false, show_addr);
1124 if (cl->srcline)
1125 printed = scnprintf(bf, bfsize, "%s %s",
1126 cl->ms.sym->name, cl->srcline);
1127 else
1128 printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name);
1129 } else
1130 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1131
1132 if (show_dso)
1133 scnprintf(bf + printed, bfsize - printed, " %s",
1134 cl->ms.map ?
1135 cl->ms.map->dso->short_name :
1136 "unknown");
1137
1138 return bf;
1139 }
1140
1141 char *callchain_node__scnprintf_value(struct callchain_node *node,
1142 char *bf, size_t bfsize, u64 total)
1143 {
1144 double percent = 0.0;
1145 u64 period = callchain_cumul_hits(node);
1146 unsigned count = callchain_cumul_counts(node);
1147
1148 if (callchain_param.mode == CHAIN_FOLDED) {
1149 period = node->hit;
1150 count = node->count;
1151 }
1152
1153 switch (callchain_param.value) {
1154 case CCVAL_PERIOD:
1155 scnprintf(bf, bfsize, "%"PRIu64, period);
1156 break;
1157 case CCVAL_COUNT:
1158 scnprintf(bf, bfsize, "%u", count);
1159 break;
1160 case CCVAL_PERCENT:
1161 default:
1162 if (total)
1163 percent = period * 100.0 / total;
1164 scnprintf(bf, bfsize, "%.2f%%", percent);
1165 break;
1166 }
1167 return bf;
1168 }
1169
1170 int callchain_node__fprintf_value(struct callchain_node *node,
1171 FILE *fp, u64 total)
1172 {
1173 double percent = 0.0;
1174 u64 period = callchain_cumul_hits(node);
1175 unsigned count = callchain_cumul_counts(node);
1176
1177 if (callchain_param.mode == CHAIN_FOLDED) {
1178 period = node->hit;
1179 count = node->count;
1180 }
1181
1182 switch (callchain_param.value) {
1183 case CCVAL_PERIOD:
1184 return fprintf(fp, "%"PRIu64, period);
1185 case CCVAL_COUNT:
1186 return fprintf(fp, "%u", count);
1187 case CCVAL_PERCENT:
1188 default:
1189 if (total)
1190 percent = period * 100.0 / total;
1191 return percent_color_fprintf(fp, "%.2f%%", percent);
1192 }
1193 return 0;
1194 }
1195
1196 static void callchain_counts_value(struct callchain_node *node,
1197 u64 *branch_count, u64 *predicted_count,
1198 u64 *abort_count, u64 *cycles_count)
1199 {
1200 struct callchain_list *clist;
1201
1202 list_for_each_entry(clist, &node->val, list) {
1203 if (branch_count)
1204 *branch_count += clist->branch_count;
1205
1206 if (predicted_count)
1207 *predicted_count += clist->predicted_count;
1208
1209 if (abort_count)
1210 *abort_count += clist->abort_count;
1211
1212 if (cycles_count)
1213 *cycles_count += clist->cycles_count;
1214 }
1215 }
1216
1217 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1218 u64 *branch_count,
1219 u64 *predicted_count,
1220 u64 *abort_count,
1221 u64 *cycles_count)
1222 {
1223 struct callchain_node *child;
1224 struct rb_node *n;
1225
1226 n = rb_first(&node->rb_root_in);
1227 while (n) {
1228 child = rb_entry(n, struct callchain_node, rb_node_in);
1229 n = rb_next(n);
1230
1231 callchain_node_branch_counts_cumul(child, branch_count,
1232 predicted_count,
1233 abort_count,
1234 cycles_count);
1235
1236 callchain_counts_value(child, branch_count,
1237 predicted_count, abort_count,
1238 cycles_count);
1239 }
1240
1241 return 0;
1242 }
1243
1244 int callchain_branch_counts(struct callchain_root *root,
1245 u64 *branch_count, u64 *predicted_count,
1246 u64 *abort_count, u64 *cycles_count)
1247 {
1248 if (branch_count)
1249 *branch_count = 0;
1250
1251 if (predicted_count)
1252 *predicted_count = 0;
1253
1254 if (abort_count)
1255 *abort_count = 0;
1256
1257 if (cycles_count)
1258 *cycles_count = 0;
1259
1260 return callchain_node_branch_counts_cumul(&root->node,
1261 branch_count,
1262 predicted_count,
1263 abort_count,
1264 cycles_count);
1265 }
1266
1267 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1268 {
1269 int printed;
1270
1271 printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1272
1273 return printed;
1274 }
1275
1276 static int count_float_printf(int idx, const char *str, float value,
1277 char *bf, int bfsize, float threshold)
1278 {
1279 int printed;
1280
1281 if (threshold != 0.0 && value < threshold)
1282 return 0;
1283
1284 printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1285
1286 return printed;
1287 }
1288
1289 static int branch_to_str(char *bf, int bfsize,
1290 u64 branch_count, u64 predicted_count,
1291 u64 abort_count,
1292 struct branch_type_stat *brtype_stat)
1293 {
1294 int printed, i = 0;
1295
1296 printed = branch_type_str(brtype_stat, bf, bfsize);
1297 if (printed)
1298 i++;
1299
1300 if (predicted_count < branch_count) {
1301 printed += count_float_printf(i++, "predicted",
1302 predicted_count * 100.0 / branch_count,
1303 bf + printed, bfsize - printed, 0.0);
1304 }
1305
1306 if (abort_count) {
1307 printed += count_float_printf(i++, "abort",
1308 abort_count * 100.0 / branch_count,
1309 bf + printed, bfsize - printed, 0.1);
1310 }
1311
1312 if (i)
1313 printed += scnprintf(bf + printed, bfsize - printed, ")");
1314
1315 return printed;
1316 }
1317
1318 static int branch_from_str(char *bf, int bfsize,
1319 u64 branch_count,
1320 u64 cycles_count, u64 iter_count,
1321 u64 iter_cycles)
1322 {
1323 int printed = 0, i = 0;
1324 u64 cycles;
1325
1326 cycles = cycles_count / branch_count;
1327 if (cycles) {
1328 printed += count_pri64_printf(i++, "cycles",
1329 cycles,
1330 bf + printed, bfsize - printed);
1331 }
1332
1333 if (iter_count) {
1334 printed += count_pri64_printf(i++, "iter",
1335 iter_count,
1336 bf + printed, bfsize - printed);
1337
1338 printed += count_pri64_printf(i++, "avg_cycles",
1339 iter_cycles / iter_count,
1340 bf + printed, bfsize - printed);
1341 }
1342
1343 if (i)
1344 printed += scnprintf(bf + printed, bfsize - printed, ")");
1345
1346 return printed;
1347 }
1348
1349 static int counts_str_build(char *bf, int bfsize,
1350 u64 branch_count, u64 predicted_count,
1351 u64 abort_count, u64 cycles_count,
1352 u64 iter_count, u64 iter_cycles,
1353 struct branch_type_stat *brtype_stat)
1354 {
1355 int printed;
1356
1357 if (branch_count == 0)
1358 return scnprintf(bf, bfsize, " (calltrace)");
1359
1360 if (brtype_stat->branch_to) {
1361 printed = branch_to_str(bf, bfsize, branch_count,
1362 predicted_count, abort_count, brtype_stat);
1363 } else {
1364 printed = branch_from_str(bf, bfsize, branch_count,
1365 cycles_count, iter_count, iter_cycles);
1366 }
1367
1368 if (!printed)
1369 bf[0] = 0;
1370
1371 return printed;
1372 }
1373
1374 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1375 u64 branch_count, u64 predicted_count,
1376 u64 abort_count, u64 cycles_count,
1377 u64 iter_count, u64 iter_cycles,
1378 struct branch_type_stat *brtype_stat)
1379 {
1380 char str[256];
1381
1382 counts_str_build(str, sizeof(str), branch_count,
1383 predicted_count, abort_count, cycles_count,
1384 iter_count, iter_cycles, brtype_stat);
1385
1386 if (fp)
1387 return fprintf(fp, "%s", str);
1388
1389 return scnprintf(bf, bfsize, "%s", str);
1390 }
1391
1392 int callchain_list_counts__printf_value(struct callchain_list *clist,
1393 FILE *fp, char *bf, int bfsize)
1394 {
1395 u64 branch_count, predicted_count;
1396 u64 abort_count, cycles_count;
1397 u64 iter_count, iter_cycles;
1398
1399 branch_count = clist->branch_count;
1400 predicted_count = clist->predicted_count;
1401 abort_count = clist->abort_count;
1402 cycles_count = clist->cycles_count;
1403 iter_count = clist->iter_count;
1404 iter_cycles = clist->iter_cycles;
1405
1406 return callchain_counts_printf(fp, bf, bfsize, branch_count,
1407 predicted_count, abort_count,
1408 cycles_count, iter_count, iter_cycles,
1409 &clist->brtype_stat);
1410 }
1411
1412 static void free_callchain_node(struct callchain_node *node)
1413 {
1414 struct callchain_list *list, *tmp;
1415 struct callchain_node *child;
1416 struct rb_node *n;
1417
1418 list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1419 list_del(&list->list);
1420 map__zput(list->ms.map);
1421 free(list);
1422 }
1423
1424 list_for_each_entry_safe(list, tmp, &node->val, list) {
1425 list_del(&list->list);
1426 map__zput(list->ms.map);
1427 free(list);
1428 }
1429
1430 n = rb_first(&node->rb_root_in);
1431 while (n) {
1432 child = container_of(n, struct callchain_node, rb_node_in);
1433 n = rb_next(n);
1434 rb_erase(&child->rb_node_in, &node->rb_root_in);
1435
1436 free_callchain_node(child);
1437 free(child);
1438 }
1439 }
1440
1441 void free_callchain(struct callchain_root *root)
1442 {
1443 if (!symbol_conf.use_callchain)
1444 return;
1445
1446 free_callchain_node(&root->node);
1447 }
1448
1449 static u64 decay_callchain_node(struct callchain_node *node)
1450 {
1451 struct callchain_node *child;
1452 struct rb_node *n;
1453 u64 child_hits = 0;
1454
1455 n = rb_first(&node->rb_root_in);
1456 while (n) {
1457 child = container_of(n, struct callchain_node, rb_node_in);
1458
1459 child_hits += decay_callchain_node(child);
1460 n = rb_next(n);
1461 }
1462
1463 node->hit = (node->hit * 7) / 8;
1464 node->children_hit = child_hits;
1465
1466 return node->hit;
1467 }
1468
1469 void decay_callchain(struct callchain_root *root)
1470 {
1471 if (!symbol_conf.use_callchain)
1472 return;
1473
1474 decay_callchain_node(&root->node);
1475 }
1476
1477 int callchain_node__make_parent_list(struct callchain_node *node)
1478 {
1479 struct callchain_node *parent = node->parent;
1480 struct callchain_list *chain, *new;
1481 LIST_HEAD(head);
1482
1483 while (parent) {
1484 list_for_each_entry_reverse(chain, &parent->val, list) {
1485 new = malloc(sizeof(*new));
1486 if (new == NULL)
1487 goto out;
1488 *new = *chain;
1489 new->has_children = false;
1490 map__get(new->ms.map);
1491 list_add_tail(&new->list, &head);
1492 }
1493 parent = parent->parent;
1494 }
1495
1496 list_for_each_entry_safe_reverse(chain, new, &head, list)
1497 list_move_tail(&chain->list, &node->parent_val);
1498
1499 if (!list_empty(&node->parent_val)) {
1500 chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1501 chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1502
1503 chain = list_first_entry(&node->val, struct callchain_list, list);
1504 chain->has_children = false;
1505 }
1506 return 0;
1507
1508 out:
1509 list_for_each_entry_safe(chain, new, &head, list) {
1510 list_del(&chain->list);
1511 map__zput(chain->ms.map);
1512 free(chain);
1513 }
1514 return -ENOMEM;
1515 }
1516
1517 int callchain_cursor__copy(struct callchain_cursor *dst,
1518 struct callchain_cursor *src)
1519 {
1520 int rc = 0;
1521
1522 callchain_cursor_reset(dst);
1523 callchain_cursor_commit(src);
1524
1525 while (true) {
1526 struct callchain_cursor_node *node;
1527
1528 node = callchain_cursor_current(src);
1529 if (node == NULL)
1530 break;
1531
1532 rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1533 node->branch, &node->branch_flags,
1534 node->nr_loop_iter,
1535 node->iter_cycles,
1536 node->branch_from);
1537 if (rc)
1538 break;
1539
1540 callchain_cursor_advance(src);
1541 }
1542
1543 return rc;
1544 }