]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/hist.c
perf tools: Rewrite and improve support for kernel modules
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / hist.c
CommitLineData
3d1d07ec
JK
1#include "hist.h"
2
3struct rb_root hist;
4struct rb_root collapse_hists;
5struct rb_root output_hists;
6int callchain;
7
8struct callchain_param callchain_param = {
9 .mode = CHAIN_GRAPH_REL,
10 .min_percent = 0.5
11};
12
13unsigned long total;
14unsigned long total_mmap;
15unsigned long total_comm;
16unsigned long total_fork;
17unsigned long total_unknown;
18unsigned long total_lost;
19
20/*
21 * histogram, sorted on item, collects counts
22 */
23
24int64_t
25hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
26{
27 struct sort_entry *se;
28 int64_t cmp = 0;
29
30 list_for_each_entry(se, &hist_entry__sort_list, list) {
31 cmp = se->cmp(left, right);
32 if (cmp)
33 break;
34 }
35
36 return cmp;
37}
38
39int64_t
40hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
41{
42 struct sort_entry *se;
43 int64_t cmp = 0;
44
45 list_for_each_entry(se, &hist_entry__sort_list, list) {
46 int64_t (*f)(struct hist_entry *, struct hist_entry *);
47
48 f = se->collapse ?: se->cmp;
49
50 cmp = f(left, right);
51 if (cmp)
52 break;
53 }
54
55 return cmp;
56}
57
58void hist_entry__free(struct hist_entry *he)
59{
60 free(he);
61}
62
63/*
64 * collapse the histogram
65 */
66
67void collapse__insert_entry(struct hist_entry *he)
68{
69 struct rb_node **p = &collapse_hists.rb_node;
70 struct rb_node *parent = NULL;
71 struct hist_entry *iter;
72 int64_t cmp;
73
74 while (*p != NULL) {
75 parent = *p;
76 iter = rb_entry(parent, struct hist_entry, rb_node);
77
78 cmp = hist_entry__collapse(iter, he);
79
80 if (!cmp) {
81 iter->count += he->count;
82 hist_entry__free(he);
83 return;
84 }
85
86 if (cmp < 0)
87 p = &(*p)->rb_left;
88 else
89 p = &(*p)->rb_right;
90 }
91
92 rb_link_node(&he->rb_node, parent, p);
93 rb_insert_color(&he->rb_node, &collapse_hists);
94}
95
96void collapse__resort(void)
97{
98 struct rb_node *next;
99 struct hist_entry *n;
100
101 if (!sort__need_collapse)
102 return;
103
104 next = rb_first(&hist);
105 while (next) {
106 n = rb_entry(next, struct hist_entry, rb_node);
107 next = rb_next(&n->rb_node);
108
109 rb_erase(&n->rb_node, &hist);
110 collapse__insert_entry(n);
111 }
112}
113
114/*
115 * reverse the map, sort on count.
116 */
117
118void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
119{
120 struct rb_node **p = &output_hists.rb_node;
121 struct rb_node *parent = NULL;
122 struct hist_entry *iter;
123
124 if (callchain)
125 callchain_param.sort(&he->sorted_chain, &he->callchain,
126 min_callchain_hits, &callchain_param);
127
128 while (*p != NULL) {
129 parent = *p;
130 iter = rb_entry(parent, struct hist_entry, rb_node);
131
132 if (he->count > iter->count)
133 p = &(*p)->rb_left;
134 else
135 p = &(*p)->rb_right;
136 }
137
138 rb_link_node(&he->rb_node, parent, p);
139 rb_insert_color(&he->rb_node, &output_hists);
140}
141
142void output__resort(u64 total_samples)
143{
144 struct rb_node *next;
145 struct hist_entry *n;
146 struct rb_root *tree = &hist;
147 u64 min_callchain_hits;
148
149 min_callchain_hits =
150 total_samples * (callchain_param.min_percent / 100);
151
152 if (sort__need_collapse)
153 tree = &collapse_hists;
154
155 next = rb_first(tree);
156
157 while (next) {
158 n = rb_entry(next, struct hist_entry, rb_node);
159 next = rb_next(&n->rb_node);
160
161 rb_erase(&n->rb_node, tree);
162 output__insert_entry(n, min_callchain_hits);
163 }
164}