]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/ui/stdio/hist.c
perf report: Show branch info in callchain entry for stdio mode
[mirror_ubuntu-artful-kernel.git] / tools / perf / ui / stdio / hist.c
1 #include <stdio.h>
2
3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
7
8
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10 {
11 int i;
12 int ret = fprintf(fp, " ");
13
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
16
17 return ret;
18 }
19
20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
21 int left_margin)
22 {
23 int i;
24 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
25
26 for (i = 0; i < depth; i++)
27 if (depth_mask & (1 << i))
28 ret += fprintf(fp, "| ");
29 else
30 ret += fprintf(fp, " ");
31
32 ret += fprintf(fp, "\n");
33
34 return ret;
35 }
36
37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
38 struct callchain_list *chain,
39 int depth, int depth_mask, int period,
40 u64 total_samples, int left_margin)
41 {
42 int i;
43 size_t ret = 0;
44 char bf[1024], *alloc_str = NULL;
45 char buf[64];
46 const char *str;
47
48 ret += callchain__fprintf_left_margin(fp, left_margin);
49 for (i = 0; i < depth; i++) {
50 if (depth_mask & (1 << i))
51 ret += fprintf(fp, "|");
52 else
53 ret += fprintf(fp, " ");
54 if (!period && i == depth - 1) {
55 ret += fprintf(fp, "--");
56 ret += callchain_node__fprintf_value(node, fp, total_samples);
57 ret += fprintf(fp, "--");
58 } else
59 ret += fprintf(fp, "%s", " ");
60 }
61
62 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
63
64 if (symbol_conf.show_branchflag_count) {
65 if (!period)
66 callchain_list_counts__printf_value(node, chain, NULL,
67 buf, sizeof(buf));
68 else
69 callchain_list_counts__printf_value(NULL, chain, NULL,
70 buf, sizeof(buf));
71
72 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
73 str = "Not enough memory!";
74 else
75 str = alloc_str;
76 }
77
78 fputs(str, fp);
79 fputc('\n', fp);
80 free(alloc_str);
81 return ret;
82 }
83
84 static struct symbol *rem_sq_bracket;
85 static struct callchain_list rem_hits;
86
87 static void init_rem_hits(void)
88 {
89 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
90 if (!rem_sq_bracket) {
91 fprintf(stderr, "Not enough memory to display remaining hits\n");
92 return;
93 }
94
95 strcpy(rem_sq_bracket->name, "[...]");
96 rem_hits.ms.sym = rem_sq_bracket;
97 }
98
99 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
100 u64 total_samples, int depth,
101 int depth_mask, int left_margin)
102 {
103 struct rb_node *node, *next;
104 struct callchain_node *child = NULL;
105 struct callchain_list *chain;
106 int new_depth_mask = depth_mask;
107 u64 remaining;
108 size_t ret = 0;
109 int i;
110 uint entries_printed = 0;
111 int cumul_count = 0;
112
113 remaining = total_samples;
114
115 node = rb_first(root);
116 while (node) {
117 u64 new_total;
118 u64 cumul;
119
120 child = rb_entry(node, struct callchain_node, rb_node);
121 cumul = callchain_cumul_hits(child);
122 remaining -= cumul;
123 cumul_count += callchain_cumul_counts(child);
124
125 /*
126 * The depth mask manages the output of pipes that show
127 * the depth. We don't want to keep the pipes of the current
128 * level for the last child of this depth.
129 * Except if we have remaining filtered hits. They will
130 * supersede the last child
131 */
132 next = rb_next(node);
133 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
134 new_depth_mask &= ~(1 << (depth - 1));
135
136 /*
137 * But we keep the older depth mask for the line separator
138 * to keep the level link until we reach the last child
139 */
140 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
141 left_margin);
142 i = 0;
143 list_for_each_entry(chain, &child->val, list) {
144 ret += ipchain__fprintf_graph(fp, child, chain, depth,
145 new_depth_mask, i++,
146 total_samples,
147 left_margin);
148 }
149
150 if (callchain_param.mode == CHAIN_GRAPH_REL)
151 new_total = child->children_hit;
152 else
153 new_total = total_samples;
154
155 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
156 depth + 1,
157 new_depth_mask | (1 << depth),
158 left_margin);
159 node = next;
160 if (++entries_printed == callchain_param.print_limit)
161 break;
162 }
163
164 if (callchain_param.mode == CHAIN_GRAPH_REL &&
165 remaining && remaining != total_samples) {
166 struct callchain_node rem_node = {
167 .hit = remaining,
168 };
169
170 if (!rem_sq_bracket)
171 return ret;
172
173 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
174 rem_node.count = child->parent->children_count - cumul_count;
175 if (rem_node.count <= 0)
176 return ret;
177 }
178
179 new_depth_mask &= ~(1 << (depth - 1));
180 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
181 new_depth_mask, 0, total_samples,
182 left_margin);
183 }
184
185 return ret;
186 }
187
188 /*
189 * If have one single callchain root, don't bother printing
190 * its percentage (100 % in fractal mode and the same percentage
191 * than the hist in graph mode). This also avoid one level of column.
192 *
193 * However when percent-limit applied, it's possible that single callchain
194 * node have different (non-100% in fractal mode) percentage.
195 */
196 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
197 {
198 struct callchain_node *cnode;
199
200 if (rb_next(node))
201 return true;
202
203 cnode = rb_entry(node, struct callchain_node, rb_node);
204 return callchain_cumul_hits(cnode) != parent_samples;
205 }
206
207 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
208 u64 total_samples, u64 parent_samples,
209 int left_margin)
210 {
211 struct callchain_node *cnode;
212 struct callchain_list *chain;
213 u32 entries_printed = 0;
214 bool printed = false;
215 struct rb_node *node;
216 int i = 0;
217 int ret = 0;
218 char bf[1024];
219
220 node = rb_first(root);
221 if (node && !need_percent_display(node, parent_samples)) {
222 cnode = rb_entry(node, struct callchain_node, rb_node);
223 list_for_each_entry(chain, &cnode->val, list) {
224 /*
225 * If we sort by symbol, the first entry is the same than
226 * the symbol. No need to print it otherwise it appears as
227 * displayed twice.
228 */
229 if (!i++ && field_order == NULL &&
230 sort_order && !prefixcmp(sort_order, "sym"))
231 continue;
232 if (!printed) {
233 ret += callchain__fprintf_left_margin(fp, left_margin);
234 ret += fprintf(fp, "|\n");
235 ret += callchain__fprintf_left_margin(fp, left_margin);
236 ret += fprintf(fp, "---");
237 left_margin += 3;
238 printed = true;
239 } else
240 ret += callchain__fprintf_left_margin(fp, left_margin);
241
242 ret += fprintf(fp, "%s",
243 callchain_list__sym_name(chain, bf,
244 sizeof(bf),
245 false));
246
247 if (symbol_conf.show_branchflag_count)
248 ret += callchain_list_counts__printf_value(
249 NULL, chain, fp, NULL, 0);
250 ret += fprintf(fp, "\n");
251
252 if (++entries_printed == callchain_param.print_limit)
253 break;
254 }
255 root = &cnode->rb_root;
256 }
257
258 if (callchain_param.mode == CHAIN_GRAPH_REL)
259 total_samples = parent_samples;
260
261 ret += __callchain__fprintf_graph(fp, root, total_samples,
262 1, 1, left_margin);
263 if (ret) {
264 /* do not add a blank line if it printed nothing */
265 ret += fprintf(fp, "\n");
266 }
267
268 return ret;
269 }
270
271 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
272 u64 total_samples)
273 {
274 struct callchain_list *chain;
275 size_t ret = 0;
276 char bf[1024];
277
278 if (!node)
279 return 0;
280
281 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
282
283
284 list_for_each_entry(chain, &node->val, list) {
285 if (chain->ip >= PERF_CONTEXT_MAX)
286 continue;
287 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
288 bf, sizeof(bf), false));
289 }
290
291 return ret;
292 }
293
294 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
295 u64 total_samples)
296 {
297 size_t ret = 0;
298 u32 entries_printed = 0;
299 struct callchain_node *chain;
300 struct rb_node *rb_node = rb_first(tree);
301
302 while (rb_node) {
303 chain = rb_entry(rb_node, struct callchain_node, rb_node);
304
305 ret += fprintf(fp, " ");
306 ret += callchain_node__fprintf_value(chain, fp, total_samples);
307 ret += fprintf(fp, "\n");
308 ret += __callchain__fprintf_flat(fp, chain, total_samples);
309 ret += fprintf(fp, "\n");
310 if (++entries_printed == callchain_param.print_limit)
311 break;
312
313 rb_node = rb_next(rb_node);
314 }
315
316 return ret;
317 }
318
319 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
320 {
321 const char *sep = symbol_conf.field_sep ?: ";";
322 struct callchain_list *chain;
323 size_t ret = 0;
324 char bf[1024];
325 bool first;
326
327 if (!node)
328 return 0;
329
330 ret += __callchain__fprintf_folded(fp, node->parent);
331
332 first = (ret == 0);
333 list_for_each_entry(chain, &node->val, list) {
334 if (chain->ip >= PERF_CONTEXT_MAX)
335 continue;
336 ret += fprintf(fp, "%s%s", first ? "" : sep,
337 callchain_list__sym_name(chain,
338 bf, sizeof(bf), false));
339 first = false;
340 }
341
342 return ret;
343 }
344
345 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
346 u64 total_samples)
347 {
348 size_t ret = 0;
349 u32 entries_printed = 0;
350 struct callchain_node *chain;
351 struct rb_node *rb_node = rb_first(tree);
352
353 while (rb_node) {
354
355 chain = rb_entry(rb_node, struct callchain_node, rb_node);
356
357 ret += callchain_node__fprintf_value(chain, fp, total_samples);
358 ret += fprintf(fp, " ");
359 ret += __callchain__fprintf_folded(fp, chain);
360 ret += fprintf(fp, "\n");
361 if (++entries_printed == callchain_param.print_limit)
362 break;
363
364 rb_node = rb_next(rb_node);
365 }
366
367 return ret;
368 }
369
370 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
371 u64 total_samples, int left_margin,
372 FILE *fp)
373 {
374 u64 parent_samples = he->stat.period;
375
376 if (symbol_conf.cumulate_callchain)
377 parent_samples = he->stat_acc->period;
378
379 switch (callchain_param.mode) {
380 case CHAIN_GRAPH_REL:
381 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
382 parent_samples, left_margin);
383 break;
384 case CHAIN_GRAPH_ABS:
385 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
386 parent_samples, left_margin);
387 break;
388 case CHAIN_FLAT:
389 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
390 break;
391 case CHAIN_FOLDED:
392 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
393 break;
394 case CHAIN_NONE:
395 break;
396 default:
397 pr_err("Bad callchain mode\n");
398 }
399
400 return 0;
401 }
402
403 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
404 struct perf_hpp_list *hpp_list)
405 {
406 const char *sep = symbol_conf.field_sep;
407 struct perf_hpp_fmt *fmt;
408 char *start = hpp->buf;
409 int ret;
410 bool first = true;
411
412 if (symbol_conf.exclude_other && !he->parent)
413 return 0;
414
415 perf_hpp_list__for_each_format(hpp_list, fmt) {
416 if (perf_hpp__should_skip(fmt, he->hists))
417 continue;
418
419 /*
420 * If there's no field_sep, we still need
421 * to display initial ' '.
422 */
423 if (!sep || !first) {
424 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
425 advance_hpp(hpp, ret);
426 } else
427 first = false;
428
429 if (perf_hpp__use_color() && fmt->color)
430 ret = fmt->color(fmt, hpp, he);
431 else
432 ret = fmt->entry(fmt, hpp, he);
433
434 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
435 advance_hpp(hpp, ret);
436 }
437
438 return hpp->buf - start;
439 }
440
441 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
442 {
443 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
444 }
445
446 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
447 struct perf_hpp *hpp,
448 struct hists *hists,
449 FILE *fp)
450 {
451 const char *sep = symbol_conf.field_sep;
452 struct perf_hpp_fmt *fmt;
453 struct perf_hpp_list_node *fmt_node;
454 char *buf = hpp->buf;
455 size_t size = hpp->size;
456 int ret, printed = 0;
457 bool first = true;
458
459 if (symbol_conf.exclude_other && !he->parent)
460 return 0;
461
462 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
463 advance_hpp(hpp, ret);
464
465 /* the first hpp_list_node is for overhead columns */
466 fmt_node = list_first_entry(&hists->hpp_formats,
467 struct perf_hpp_list_node, list);
468 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
469 /*
470 * If there's no field_sep, we still need
471 * to display initial ' '.
472 */
473 if (!sep || !first) {
474 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
475 advance_hpp(hpp, ret);
476 } else
477 first = false;
478
479 if (perf_hpp__use_color() && fmt->color)
480 ret = fmt->color(fmt, hpp, he);
481 else
482 ret = fmt->entry(fmt, hpp, he);
483
484 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
485 advance_hpp(hpp, ret);
486 }
487
488 if (!sep)
489 ret = scnprintf(hpp->buf, hpp->size, "%*s",
490 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
491 advance_hpp(hpp, ret);
492
493 printed += fprintf(fp, "%s", buf);
494
495 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
496 hpp->buf = buf;
497 hpp->size = size;
498
499 /*
500 * No need to call hist_entry__snprintf_alignment() since this
501 * fmt is always the last column in the hierarchy mode.
502 */
503 if (perf_hpp__use_color() && fmt->color)
504 fmt->color(fmt, hpp, he);
505 else
506 fmt->entry(fmt, hpp, he);
507
508 /*
509 * dynamic entries are right-aligned but we want left-aligned
510 * in the hierarchy mode
511 */
512 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
513 }
514 printed += putc('\n', fp);
515
516 if (symbol_conf.use_callchain && he->leaf) {
517 u64 total = hists__total_period(hists);
518
519 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
520 goto out;
521 }
522
523 out:
524 return printed;
525 }
526
527 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
528 char *bf, size_t bfsz, FILE *fp,
529 bool use_callchain)
530 {
531 int ret;
532 struct perf_hpp hpp = {
533 .buf = bf,
534 .size = size,
535 };
536 struct hists *hists = he->hists;
537 u64 total_period = hists->stats.total_period;
538
539 if (size == 0 || size > bfsz)
540 size = hpp.size = bfsz;
541
542 if (symbol_conf.report_hierarchy)
543 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
544
545 hist_entry__snprintf(he, &hpp);
546
547 ret = fprintf(fp, "%s\n", bf);
548
549 if (use_callchain)
550 ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
551
552 return ret;
553 }
554
555 static int print_hierarchy_indent(const char *sep, int indent,
556 const char *line, FILE *fp)
557 {
558 if (sep != NULL || indent < 2)
559 return 0;
560
561 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
562 }
563
564 static int hists__fprintf_hierarchy_headers(struct hists *hists,
565 struct perf_hpp *hpp, FILE *fp)
566 {
567 bool first_node, first_col;
568 int indent;
569 int depth;
570 unsigned width = 0;
571 unsigned header_width = 0;
572 struct perf_hpp_fmt *fmt;
573 struct perf_hpp_list_node *fmt_node;
574 const char *sep = symbol_conf.field_sep;
575
576 indent = hists->nr_hpp_node;
577
578 /* preserve max indent depth for column headers */
579 print_hierarchy_indent(sep, indent, spaces, fp);
580
581 /* the first hpp_list_node is for overhead columns */
582 fmt_node = list_first_entry(&hists->hpp_formats,
583 struct perf_hpp_list_node, list);
584
585 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
586 fmt->header(fmt, hpp, hists, 0, NULL);
587 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
588 }
589
590 /* combine sort headers with ' / ' */
591 first_node = true;
592 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
593 if (!first_node)
594 header_width += fprintf(fp, " / ");
595 first_node = false;
596
597 first_col = true;
598 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
599 if (perf_hpp__should_skip(fmt, hists))
600 continue;
601
602 if (!first_col)
603 header_width += fprintf(fp, "+");
604 first_col = false;
605
606 fmt->header(fmt, hpp, hists, 0, NULL);
607
608 header_width += fprintf(fp, "%s", trim(hpp->buf));
609 }
610 }
611
612 fprintf(fp, "\n# ");
613
614 /* preserve max indent depth for initial dots */
615 print_hierarchy_indent(sep, indent, dots, fp);
616
617 /* the first hpp_list_node is for overhead columns */
618 fmt_node = list_first_entry(&hists->hpp_formats,
619 struct perf_hpp_list_node, list);
620
621 first_col = true;
622 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
623 if (!first_col)
624 fprintf(fp, "%s", sep ?: "..");
625 first_col = false;
626
627 width = fmt->width(fmt, hpp, hists);
628 fprintf(fp, "%.*s", width, dots);
629 }
630
631 depth = 0;
632 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
633 first_col = true;
634 width = depth * HIERARCHY_INDENT;
635
636 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
637 if (perf_hpp__should_skip(fmt, hists))
638 continue;
639
640 if (!first_col)
641 width++; /* for '+' sign between column header */
642 first_col = false;
643
644 width += fmt->width(fmt, hpp, hists);
645 }
646
647 if (width > header_width)
648 header_width = width;
649
650 depth++;
651 }
652
653 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
654
655 fprintf(fp, "\n#\n");
656
657 return 2;
658 }
659
660 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
661 int line, FILE *fp)
662 {
663 struct perf_hpp_fmt *fmt;
664 const char *sep = symbol_conf.field_sep;
665 bool first = true;
666 int span = 0;
667
668 hists__for_each_format(hists, fmt) {
669 if (perf_hpp__should_skip(fmt, hists))
670 continue;
671
672 if (!first && !span)
673 fprintf(fp, "%s", sep ?: " ");
674 else
675 first = false;
676
677 fmt->header(fmt, hpp, hists, line, &span);
678
679 if (!span)
680 fprintf(fp, "%s", hpp->buf);
681 }
682 }
683
684 static int
685 hists__fprintf_standard_headers(struct hists *hists,
686 struct perf_hpp *hpp,
687 FILE *fp)
688 {
689 struct perf_hpp_list *hpp_list = hists->hpp_list;
690 struct perf_hpp_fmt *fmt;
691 unsigned int width;
692 const char *sep = symbol_conf.field_sep;
693 bool first = true;
694 int line;
695
696 for (line = 0; line < hpp_list->nr_header_lines; line++) {
697 /* first # is displayed one level up */
698 if (line)
699 fprintf(fp, "# ");
700 fprintf_line(hists, hpp, line, fp);
701 fprintf(fp, "\n");
702 }
703
704 if (sep)
705 return hpp_list->nr_header_lines;
706
707 first = true;
708
709 fprintf(fp, "# ");
710
711 hists__for_each_format(hists, fmt) {
712 unsigned int i;
713
714 if (perf_hpp__should_skip(fmt, hists))
715 continue;
716
717 if (!first)
718 fprintf(fp, "%s", sep ?: " ");
719 else
720 first = false;
721
722 width = fmt->width(fmt, hpp, hists);
723 for (i = 0; i < width; i++)
724 fprintf(fp, ".");
725 }
726
727 fprintf(fp, "\n");
728 fprintf(fp, "#\n");
729 return hpp_list->nr_header_lines + 2;
730 }
731
732 int hists__fprintf_headers(struct hists *hists, FILE *fp)
733 {
734 char bf[1024];
735 struct perf_hpp dummy_hpp = {
736 .buf = bf,
737 .size = sizeof(bf),
738 };
739
740 fprintf(fp, "# ");
741
742 if (symbol_conf.report_hierarchy)
743 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
744 else
745 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
746
747 }
748
749 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
750 int max_cols, float min_pcnt, FILE *fp,
751 bool use_callchain)
752 {
753 struct rb_node *nd;
754 size_t ret = 0;
755 const char *sep = symbol_conf.field_sep;
756 int nr_rows = 0;
757 size_t linesz;
758 char *line = NULL;
759 unsigned indent;
760
761 init_rem_hits();
762
763 hists__reset_column_width(hists);
764
765 if (symbol_conf.col_width_list_str)
766 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
767
768 if (show_header)
769 nr_rows += hists__fprintf_headers(hists, fp);
770
771 if (max_rows && nr_rows >= max_rows)
772 goto out;
773
774 linesz = hists__sort_list_width(hists) + 3 + 1;
775 linesz += perf_hpp__color_overhead();
776 line = malloc(linesz);
777 if (line == NULL) {
778 ret = -1;
779 goto out;
780 }
781
782 indent = hists__overhead_width(hists) + 4;
783
784 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
785 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
786 float percent;
787
788 if (h->filtered)
789 continue;
790
791 percent = hist_entry__get_percent_limit(h);
792 if (percent < min_pcnt)
793 continue;
794
795 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
796
797 if (max_rows && ++nr_rows >= max_rows)
798 break;
799
800 /*
801 * If all children are filtered out or percent-limited,
802 * display "no entry >= x.xx%" message.
803 */
804 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
805 int depth = hists->nr_hpp_node + h->depth + 1;
806
807 print_hierarchy_indent(sep, depth, spaces, fp);
808 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
809
810 if (max_rows && ++nr_rows >= max_rows)
811 break;
812 }
813
814 if (h->ms.map == NULL && verbose > 1) {
815 __map_groups__fprintf_maps(h->thread->mg,
816 MAP__FUNCTION, fp);
817 fprintf(fp, "%.10s end\n", graph_dotted_line);
818 }
819 }
820
821 free(line);
822 out:
823 zfree(&rem_sq_bracket);
824
825 return ret;
826 }
827
828 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
829 {
830 int i;
831 size_t ret = 0;
832
833 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
834 const char *name;
835
836 if (stats->nr_events[i] == 0)
837 continue;
838
839 name = perf_event__name(i);
840 if (!strcmp(name, "UNKNOWN"))
841 continue;
842
843 ret += fprintf(fp, "%16s events: %10d\n", name,
844 stats->nr_events[i]);
845 }
846
847 return ret;
848 }