]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/perf/ui/stdio/hist.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-jammy-kernel.git] / tools / perf / ui / stdio / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <linux/string.h>
4
5 #include "../../util/util.h"
6 #include "../../util/hist.h"
7 #include "../../util/sort.h"
8 #include "../../util/evsel.h"
9 #include "../../util/srcline.h"
10 #include "../../util/string2.h"
11 #include "../../util/thread.h"
12 #include "../../util/sane_ctype.h"
13
14 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
15 {
16 int i;
17 int ret = fprintf(fp, " ");
18
19 for (i = 0; i < left_margin; i++)
20 ret += fprintf(fp, " ");
21
22 return ret;
23 }
24
25 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
26 int depth, int depth_mask, FILE *fp)
27 {
28 struct dso *dso;
29 struct inline_node *node;
30 struct inline_list *ilist;
31 int ret = 0, i;
32
33 if (map == NULL)
34 return 0;
35
36 dso = map->dso;
37 if (dso == NULL)
38 return 0;
39
40 node = dso__parse_addr_inlines(dso,
41 map__rip_2objdump(map, ip));
42 if (node == NULL)
43 return 0;
44
45 list_for_each_entry(ilist, &node->val, list) {
46 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
47 ret += callchain__fprintf_left_margin(fp, left_margin);
48
49 for (i = 0; i < depth; i++) {
50 if (depth_mask & (1 << i))
51 ret += fprintf(fp, "|");
52 else
53 ret += fprintf(fp, " ");
54 ret += fprintf(fp, " ");
55 }
56
57 if (callchain_param.key == CCKEY_ADDRESS ||
58 callchain_param.key == CCKEY_SRCLINE) {
59 if (ilist->filename != NULL)
60 ret += fprintf(fp, "%s:%d (inline)",
61 ilist->filename,
62 ilist->line_nr);
63 else
64 ret += fprintf(fp, "??");
65 } else if (ilist->funcname != NULL)
66 ret += fprintf(fp, "%s (inline)",
67 ilist->funcname);
68 else if (ilist->filename != NULL)
69 ret += fprintf(fp, "%s:%d (inline)",
70 ilist->filename,
71 ilist->line_nr);
72 else
73 ret += fprintf(fp, "??");
74
75 ret += fprintf(fp, "\n");
76 }
77 }
78
79 inline_node__delete(node);
80 return ret;
81 }
82
83 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
84 int left_margin)
85 {
86 int i;
87 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
88
89 for (i = 0; i < depth; i++)
90 if (depth_mask & (1 << i))
91 ret += fprintf(fp, "| ");
92 else
93 ret += fprintf(fp, " ");
94
95 ret += fprintf(fp, "\n");
96
97 return ret;
98 }
99
100 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
101 struct callchain_list *chain,
102 int depth, int depth_mask, int period,
103 u64 total_samples, int left_margin)
104 {
105 int i;
106 size_t ret = 0;
107 char bf[1024], *alloc_str = NULL;
108 char buf[64];
109 const char *str;
110
111 ret += callchain__fprintf_left_margin(fp, left_margin);
112 for (i = 0; i < depth; i++) {
113 if (depth_mask & (1 << i))
114 ret += fprintf(fp, "|");
115 else
116 ret += fprintf(fp, " ");
117 if (!period && i == depth - 1) {
118 ret += fprintf(fp, "--");
119 ret += callchain_node__fprintf_value(node, fp, total_samples);
120 ret += fprintf(fp, "--");
121 } else
122 ret += fprintf(fp, "%s", " ");
123 }
124
125 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
126
127 if (symbol_conf.show_branchflag_count) {
128 callchain_list_counts__printf_value(chain, NULL,
129 buf, sizeof(buf));
130
131 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
132 str = "Not enough memory!";
133 else
134 str = alloc_str;
135 }
136
137 fputs(str, fp);
138 fputc('\n', fp);
139 free(alloc_str);
140
141 if (symbol_conf.inline_name)
142 ret += inline__fprintf(chain->ms.map, chain->ip,
143 left_margin, depth, depth_mask, fp);
144 return ret;
145 }
146
147 static struct symbol *rem_sq_bracket;
148 static struct callchain_list rem_hits;
149
150 static void init_rem_hits(void)
151 {
152 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
153 if (!rem_sq_bracket) {
154 fprintf(stderr, "Not enough memory to display remaining hits\n");
155 return;
156 }
157
158 strcpy(rem_sq_bracket->name, "[...]");
159 rem_hits.ms.sym = rem_sq_bracket;
160 }
161
162 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
163 u64 total_samples, int depth,
164 int depth_mask, int left_margin)
165 {
166 struct rb_node *node, *next;
167 struct callchain_node *child = NULL;
168 struct callchain_list *chain;
169 int new_depth_mask = depth_mask;
170 u64 remaining;
171 size_t ret = 0;
172 int i;
173 uint entries_printed = 0;
174 int cumul_count = 0;
175
176 remaining = total_samples;
177
178 node = rb_first(root);
179 while (node) {
180 u64 new_total;
181 u64 cumul;
182
183 child = rb_entry(node, struct callchain_node, rb_node);
184 cumul = callchain_cumul_hits(child);
185 remaining -= cumul;
186 cumul_count += callchain_cumul_counts(child);
187
188 /*
189 * The depth mask manages the output of pipes that show
190 * the depth. We don't want to keep the pipes of the current
191 * level for the last child of this depth.
192 * Except if we have remaining filtered hits. They will
193 * supersede the last child
194 */
195 next = rb_next(node);
196 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
197 new_depth_mask &= ~(1 << (depth - 1));
198
199 /*
200 * But we keep the older depth mask for the line separator
201 * to keep the level link until we reach the last child
202 */
203 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
204 left_margin);
205 i = 0;
206 list_for_each_entry(chain, &child->val, list) {
207 ret += ipchain__fprintf_graph(fp, child, chain, depth,
208 new_depth_mask, i++,
209 total_samples,
210 left_margin);
211 }
212
213 if (callchain_param.mode == CHAIN_GRAPH_REL)
214 new_total = child->children_hit;
215 else
216 new_total = total_samples;
217
218 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
219 depth + 1,
220 new_depth_mask | (1 << depth),
221 left_margin);
222 node = next;
223 if (++entries_printed == callchain_param.print_limit)
224 break;
225 }
226
227 if (callchain_param.mode == CHAIN_GRAPH_REL &&
228 remaining && remaining != total_samples) {
229 struct callchain_node rem_node = {
230 .hit = remaining,
231 };
232
233 if (!rem_sq_bracket)
234 return ret;
235
236 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
237 rem_node.count = child->parent->children_count - cumul_count;
238 if (rem_node.count <= 0)
239 return ret;
240 }
241
242 new_depth_mask &= ~(1 << (depth - 1));
243 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
244 new_depth_mask, 0, total_samples,
245 left_margin);
246 }
247
248 return ret;
249 }
250
251 /*
252 * If have one single callchain root, don't bother printing
253 * its percentage (100 % in fractal mode and the same percentage
254 * than the hist in graph mode). This also avoid one level of column.
255 *
256 * However when percent-limit applied, it's possible that single callchain
257 * node have different (non-100% in fractal mode) percentage.
258 */
259 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
260 {
261 struct callchain_node *cnode;
262
263 if (rb_next(node))
264 return true;
265
266 cnode = rb_entry(node, struct callchain_node, rb_node);
267 return callchain_cumul_hits(cnode) != parent_samples;
268 }
269
270 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
271 u64 total_samples, u64 parent_samples,
272 int left_margin)
273 {
274 struct callchain_node *cnode;
275 struct callchain_list *chain;
276 u32 entries_printed = 0;
277 bool printed = false;
278 struct rb_node *node;
279 int i = 0;
280 int ret = 0;
281 char bf[1024];
282
283 node = rb_first(root);
284 if (node && !need_percent_display(node, parent_samples)) {
285 cnode = rb_entry(node, struct callchain_node, rb_node);
286 list_for_each_entry(chain, &cnode->val, list) {
287 /*
288 * If we sort by symbol, the first entry is the same than
289 * the symbol. No need to print it otherwise it appears as
290 * displayed twice.
291 */
292 if (!i++ && field_order == NULL &&
293 sort_order && strstarts(sort_order, "sym"))
294 continue;
295
296 if (!printed) {
297 ret += callchain__fprintf_left_margin(fp, left_margin);
298 ret += fprintf(fp, "|\n");
299 ret += callchain__fprintf_left_margin(fp, left_margin);
300 ret += fprintf(fp, "---");
301 left_margin += 3;
302 printed = true;
303 } else
304 ret += callchain__fprintf_left_margin(fp, left_margin);
305
306 ret += fprintf(fp, "%s",
307 callchain_list__sym_name(chain, bf,
308 sizeof(bf),
309 false));
310
311 if (symbol_conf.show_branchflag_count)
312 ret += callchain_list_counts__printf_value(
313 chain, fp, NULL, 0);
314 ret += fprintf(fp, "\n");
315
316 if (++entries_printed == callchain_param.print_limit)
317 break;
318
319 if (symbol_conf.inline_name)
320 ret += inline__fprintf(chain->ms.map,
321 chain->ip,
322 left_margin,
323 0, 0,
324 fp);
325 }
326 root = &cnode->rb_root;
327 }
328
329 if (callchain_param.mode == CHAIN_GRAPH_REL)
330 total_samples = parent_samples;
331
332 ret += __callchain__fprintf_graph(fp, root, total_samples,
333 1, 1, left_margin);
334 if (ret) {
335 /* do not add a blank line if it printed nothing */
336 ret += fprintf(fp, "\n");
337 }
338
339 return ret;
340 }
341
342 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
343 u64 total_samples)
344 {
345 struct callchain_list *chain;
346 size_t ret = 0;
347 char bf[1024];
348
349 if (!node)
350 return 0;
351
352 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
353
354
355 list_for_each_entry(chain, &node->val, list) {
356 if (chain->ip >= PERF_CONTEXT_MAX)
357 continue;
358 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
359 bf, sizeof(bf), false));
360 }
361
362 return ret;
363 }
364
365 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
366 u64 total_samples)
367 {
368 size_t ret = 0;
369 u32 entries_printed = 0;
370 struct callchain_node *chain;
371 struct rb_node *rb_node = rb_first(tree);
372
373 while (rb_node) {
374 chain = rb_entry(rb_node, struct callchain_node, rb_node);
375
376 ret += fprintf(fp, " ");
377 ret += callchain_node__fprintf_value(chain, fp, total_samples);
378 ret += fprintf(fp, "\n");
379 ret += __callchain__fprintf_flat(fp, chain, total_samples);
380 ret += fprintf(fp, "\n");
381 if (++entries_printed == callchain_param.print_limit)
382 break;
383
384 rb_node = rb_next(rb_node);
385 }
386
387 return ret;
388 }
389
390 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
391 {
392 const char *sep = symbol_conf.field_sep ?: ";";
393 struct callchain_list *chain;
394 size_t ret = 0;
395 char bf[1024];
396 bool first;
397
398 if (!node)
399 return 0;
400
401 ret += __callchain__fprintf_folded(fp, node->parent);
402
403 first = (ret == 0);
404 list_for_each_entry(chain, &node->val, list) {
405 if (chain->ip >= PERF_CONTEXT_MAX)
406 continue;
407 ret += fprintf(fp, "%s%s", first ? "" : sep,
408 callchain_list__sym_name(chain,
409 bf, sizeof(bf), false));
410 first = false;
411 }
412
413 return ret;
414 }
415
416 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
417 u64 total_samples)
418 {
419 size_t ret = 0;
420 u32 entries_printed = 0;
421 struct callchain_node *chain;
422 struct rb_node *rb_node = rb_first(tree);
423
424 while (rb_node) {
425
426 chain = rb_entry(rb_node, struct callchain_node, rb_node);
427
428 ret += callchain_node__fprintf_value(chain, fp, total_samples);
429 ret += fprintf(fp, " ");
430 ret += __callchain__fprintf_folded(fp, chain);
431 ret += fprintf(fp, "\n");
432 if (++entries_printed == callchain_param.print_limit)
433 break;
434
435 rb_node = rb_next(rb_node);
436 }
437
438 return ret;
439 }
440
441 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
442 u64 total_samples, int left_margin,
443 FILE *fp)
444 {
445 u64 parent_samples = he->stat.period;
446
447 if (symbol_conf.cumulate_callchain)
448 parent_samples = he->stat_acc->period;
449
450 switch (callchain_param.mode) {
451 case CHAIN_GRAPH_REL:
452 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
453 parent_samples, left_margin);
454 break;
455 case CHAIN_GRAPH_ABS:
456 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
457 parent_samples, left_margin);
458 break;
459 case CHAIN_FLAT:
460 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
461 break;
462 case CHAIN_FOLDED:
463 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
464 break;
465 case CHAIN_NONE:
466 break;
467 default:
468 pr_err("Bad callchain mode\n");
469 }
470
471 return 0;
472 }
473
474 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
475 struct perf_hpp_list *hpp_list)
476 {
477 const char *sep = symbol_conf.field_sep;
478 struct perf_hpp_fmt *fmt;
479 char *start = hpp->buf;
480 int ret;
481 bool first = true;
482
483 if (symbol_conf.exclude_other && !he->parent)
484 return 0;
485
486 perf_hpp_list__for_each_format(hpp_list, fmt) {
487 if (perf_hpp__should_skip(fmt, he->hists))
488 continue;
489
490 /*
491 * If there's no field_sep, we still need
492 * to display initial ' '.
493 */
494 if (!sep || !first) {
495 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
496 advance_hpp(hpp, ret);
497 } else
498 first = false;
499
500 if (perf_hpp__use_color() && fmt->color)
501 ret = fmt->color(fmt, hpp, he);
502 else
503 ret = fmt->entry(fmt, hpp, he);
504
505 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
506 advance_hpp(hpp, ret);
507 }
508
509 return hpp->buf - start;
510 }
511
512 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
513 {
514 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
515 }
516
517 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
518 struct perf_hpp *hpp,
519 struct hists *hists,
520 FILE *fp)
521 {
522 const char *sep = symbol_conf.field_sep;
523 struct perf_hpp_fmt *fmt;
524 struct perf_hpp_list_node *fmt_node;
525 char *buf = hpp->buf;
526 size_t size = hpp->size;
527 int ret, printed = 0;
528 bool first = true;
529
530 if (symbol_conf.exclude_other && !he->parent)
531 return 0;
532
533 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
534 advance_hpp(hpp, ret);
535
536 /* the first hpp_list_node is for overhead columns */
537 fmt_node = list_first_entry(&hists->hpp_formats,
538 struct perf_hpp_list_node, list);
539 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
540 /*
541 * If there's no field_sep, we still need
542 * to display initial ' '.
543 */
544 if (!sep || !first) {
545 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
546 advance_hpp(hpp, ret);
547 } else
548 first = false;
549
550 if (perf_hpp__use_color() && fmt->color)
551 ret = fmt->color(fmt, hpp, he);
552 else
553 ret = fmt->entry(fmt, hpp, he);
554
555 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
556 advance_hpp(hpp, ret);
557 }
558
559 if (!sep)
560 ret = scnprintf(hpp->buf, hpp->size, "%*s",
561 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
562 advance_hpp(hpp, ret);
563
564 printed += fprintf(fp, "%s", buf);
565
566 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
567 hpp->buf = buf;
568 hpp->size = size;
569
570 /*
571 * No need to call hist_entry__snprintf_alignment() since this
572 * fmt is always the last column in the hierarchy mode.
573 */
574 if (perf_hpp__use_color() && fmt->color)
575 fmt->color(fmt, hpp, he);
576 else
577 fmt->entry(fmt, hpp, he);
578
579 /*
580 * dynamic entries are right-aligned but we want left-aligned
581 * in the hierarchy mode
582 */
583 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
584 }
585 printed += putc('\n', fp);
586
587 if (symbol_conf.use_callchain && he->leaf) {
588 u64 total = hists__total_period(hists);
589
590 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
591 goto out;
592 }
593
594 out:
595 return printed;
596 }
597
598 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
599 char *bf, size_t bfsz, FILE *fp,
600 bool use_callchain)
601 {
602 int ret;
603 int callchain_ret = 0;
604 int inline_ret = 0;
605 struct perf_hpp hpp = {
606 .buf = bf,
607 .size = size,
608 };
609 struct hists *hists = he->hists;
610 u64 total_period = hists->stats.total_period;
611
612 if (size == 0 || size > bfsz)
613 size = hpp.size = bfsz;
614
615 if (symbol_conf.report_hierarchy)
616 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
617
618 hist_entry__snprintf(he, &hpp);
619
620 ret = fprintf(fp, "%s\n", bf);
621
622 if (use_callchain)
623 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
624 0, fp);
625
626 if (callchain_ret == 0 && symbol_conf.inline_name) {
627 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
628 ret += inline_ret;
629 if (inline_ret > 0)
630 ret += fprintf(fp, "\n");
631 } else
632 ret += callchain_ret;
633
634 return ret;
635 }
636
637 static int print_hierarchy_indent(const char *sep, int indent,
638 const char *line, FILE *fp)
639 {
640 if (sep != NULL || indent < 2)
641 return 0;
642
643 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
644 }
645
646 static int hists__fprintf_hierarchy_headers(struct hists *hists,
647 struct perf_hpp *hpp, FILE *fp)
648 {
649 bool first_node, first_col;
650 int indent;
651 int depth;
652 unsigned width = 0;
653 unsigned header_width = 0;
654 struct perf_hpp_fmt *fmt;
655 struct perf_hpp_list_node *fmt_node;
656 const char *sep = symbol_conf.field_sep;
657
658 indent = hists->nr_hpp_node;
659
660 /* preserve max indent depth for column headers */
661 print_hierarchy_indent(sep, indent, spaces, fp);
662
663 /* the first hpp_list_node is for overhead columns */
664 fmt_node = list_first_entry(&hists->hpp_formats,
665 struct perf_hpp_list_node, list);
666
667 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
668 fmt->header(fmt, hpp, hists, 0, NULL);
669 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
670 }
671
672 /* combine sort headers with ' / ' */
673 first_node = true;
674 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
675 if (!first_node)
676 header_width += fprintf(fp, " / ");
677 first_node = false;
678
679 first_col = true;
680 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
681 if (perf_hpp__should_skip(fmt, hists))
682 continue;
683
684 if (!first_col)
685 header_width += fprintf(fp, "+");
686 first_col = false;
687
688 fmt->header(fmt, hpp, hists, 0, NULL);
689
690 header_width += fprintf(fp, "%s", trim(hpp->buf));
691 }
692 }
693
694 fprintf(fp, "\n# ");
695
696 /* preserve max indent depth for initial dots */
697 print_hierarchy_indent(sep, indent, dots, fp);
698
699 /* the first hpp_list_node is for overhead columns */
700 fmt_node = list_first_entry(&hists->hpp_formats,
701 struct perf_hpp_list_node, list);
702
703 first_col = true;
704 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
705 if (!first_col)
706 fprintf(fp, "%s", sep ?: "..");
707 first_col = false;
708
709 width = fmt->width(fmt, hpp, hists);
710 fprintf(fp, "%.*s", width, dots);
711 }
712
713 depth = 0;
714 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
715 first_col = true;
716 width = depth * HIERARCHY_INDENT;
717
718 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
719 if (perf_hpp__should_skip(fmt, hists))
720 continue;
721
722 if (!first_col)
723 width++; /* for '+' sign between column header */
724 first_col = false;
725
726 width += fmt->width(fmt, hpp, hists);
727 }
728
729 if (width > header_width)
730 header_width = width;
731
732 depth++;
733 }
734
735 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
736
737 fprintf(fp, "\n#\n");
738
739 return 2;
740 }
741
742 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
743 int line, FILE *fp)
744 {
745 struct perf_hpp_fmt *fmt;
746 const char *sep = symbol_conf.field_sep;
747 bool first = true;
748 int span = 0;
749
750 hists__for_each_format(hists, fmt) {
751 if (perf_hpp__should_skip(fmt, hists))
752 continue;
753
754 if (!first && !span)
755 fprintf(fp, "%s", sep ?: " ");
756 else
757 first = false;
758
759 fmt->header(fmt, hpp, hists, line, &span);
760
761 if (!span)
762 fprintf(fp, "%s", hpp->buf);
763 }
764 }
765
766 static int
767 hists__fprintf_standard_headers(struct hists *hists,
768 struct perf_hpp *hpp,
769 FILE *fp)
770 {
771 struct perf_hpp_list *hpp_list = hists->hpp_list;
772 struct perf_hpp_fmt *fmt;
773 unsigned int width;
774 const char *sep = symbol_conf.field_sep;
775 bool first = true;
776 int line;
777
778 for (line = 0; line < hpp_list->nr_header_lines; line++) {
779 /* first # is displayed one level up */
780 if (line)
781 fprintf(fp, "# ");
782 fprintf_line(hists, hpp, line, fp);
783 fprintf(fp, "\n");
784 }
785
786 if (sep)
787 return hpp_list->nr_header_lines;
788
789 first = true;
790
791 fprintf(fp, "# ");
792
793 hists__for_each_format(hists, fmt) {
794 unsigned int i;
795
796 if (perf_hpp__should_skip(fmt, hists))
797 continue;
798
799 if (!first)
800 fprintf(fp, "%s", sep ?: " ");
801 else
802 first = false;
803
804 width = fmt->width(fmt, hpp, hists);
805 for (i = 0; i < width; i++)
806 fprintf(fp, ".");
807 }
808
809 fprintf(fp, "\n");
810 fprintf(fp, "#\n");
811 return hpp_list->nr_header_lines + 2;
812 }
813
814 int hists__fprintf_headers(struct hists *hists, FILE *fp)
815 {
816 char bf[1024];
817 struct perf_hpp dummy_hpp = {
818 .buf = bf,
819 .size = sizeof(bf),
820 };
821
822 fprintf(fp, "# ");
823
824 if (symbol_conf.report_hierarchy)
825 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
826 else
827 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
828
829 }
830
831 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
832 int max_cols, float min_pcnt, FILE *fp,
833 bool use_callchain)
834 {
835 struct rb_node *nd;
836 size_t ret = 0;
837 const char *sep = symbol_conf.field_sep;
838 int nr_rows = 0;
839 size_t linesz;
840 char *line = NULL;
841 unsigned indent;
842
843 init_rem_hits();
844
845 hists__reset_column_width(hists);
846
847 if (symbol_conf.col_width_list_str)
848 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
849
850 if (show_header)
851 nr_rows += hists__fprintf_headers(hists, fp);
852
853 if (max_rows && nr_rows >= max_rows)
854 goto out;
855
856 linesz = hists__sort_list_width(hists) + 3 + 1;
857 linesz += perf_hpp__color_overhead();
858 line = malloc(linesz);
859 if (line == NULL) {
860 ret = -1;
861 goto out;
862 }
863
864 indent = hists__overhead_width(hists) + 4;
865
866 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
867 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
868 float percent;
869
870 if (h->filtered)
871 continue;
872
873 percent = hist_entry__get_percent_limit(h);
874 if (percent < min_pcnt)
875 continue;
876
877 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
878
879 if (max_rows && ++nr_rows >= max_rows)
880 break;
881
882 /*
883 * If all children are filtered out or percent-limited,
884 * display "no entry >= x.xx%" message.
885 */
886 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
887 int depth = hists->nr_hpp_node + h->depth + 1;
888
889 print_hierarchy_indent(sep, depth, spaces, fp);
890 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
891
892 if (max_rows && ++nr_rows >= max_rows)
893 break;
894 }
895
896 if (h->ms.map == NULL && verbose > 1) {
897 __map_groups__fprintf_maps(h->thread->mg,
898 MAP__FUNCTION, fp);
899 fprintf(fp, "%.10s end\n", graph_dotted_line);
900 }
901 }
902
903 free(line);
904 out:
905 zfree(&rem_sq_bracket);
906
907 return ret;
908 }
909
910 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
911 {
912 int i;
913 size_t ret = 0;
914
915 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
916 const char *name;
917
918 if (stats->nr_events[i] == 0)
919 continue;
920
921 name = perf_event__name(i);
922 if (!strcmp(name, "UNKNOWN"))
923 continue;
924
925 ret += fprintf(fp, "%16s events: %10d\n", name,
926 stats->nr_events[i]);
927 }
928
929 return ret;
930 }