]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - Documentation/perf_counter/builtin-stat.c
perf_counter tools: Remove the last nmi bits
[mirror_ubuntu-artful-kernel.git] / Documentation / perf_counter / builtin-stat.c
1 /*
2 * perf stat: /usr/bin/time -alike performance counter statistics utility
3
4 It summarizes the counter events of all tasks (and child tasks),
5 covering all CPUs that the command (or workload) executes on.
6 It only counts the per-task events of the workload started,
7 independent of how many other tasks run on those CPUs.
8
9 Sample output:
10
11 $ perf stat -e 1 -e 3 -e 5 ls -lR /usr/include/ >/dev/null
12
13 Performance counter stats for 'ls':
14
15 163516953 instructions
16 2295 cache-misses
17 2855182 branch-misses
18 *
19 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
20 *
21 * Improvements and fixes by:
22 *
23 * Arjan van de Ven <arjan@linux.intel.com>
24 * Yanmin Zhang <yanmin.zhang@intel.com>
25 * Wu Fengguang <fengguang.wu@intel.com>
26 * Mike Galbraith <efault@gmx.de>
27 * Paul Mackerras <paulus@samba.org>
28 *
29 * Released under the GPL v2. (and only v2, not any later version)
30 */
31
32 #include "perf.h"
33 #include "builtin.h"
34 #include "util/util.h"
35 #include "util/parse-options.h"
36 #include "util/parse-events.h"
37
38 #include <sys/prctl.h>
39
40 static int system_wide = 0;
41 static int inherit = 1;
42
43 static __u64 default_event_id[MAX_COUNTERS] = {
44 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
45 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
46 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
47 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
48
49 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
50 EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
51 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
52 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
53 };
54
55 static int default_interval = 100000;
56 static int event_count[MAX_COUNTERS];
57 static int fd[MAX_NR_CPUS][MAX_COUNTERS];
58
59 static int target_pid = -1;
60 static int nr_cpus = 0;
61 static unsigned int page_size;
62
63 static int scale = 1;
64
65 static const unsigned int default_count[] = {
66 1000000,
67 1000000,
68 10000,
69 10000,
70 1000000,
71 10000,
72 };
73
74 static __u64 event_res[MAX_COUNTERS][3];
75 static __u64 event_scaled[MAX_COUNTERS];
76
77 static __u64 runtime_nsecs;
78 static __u64 walltime_nsecs;
79
80 static void create_perfstat_counter(int counter)
81 {
82 struct perf_counter_hw_event hw_event;
83
84 memset(&hw_event, 0, sizeof(hw_event));
85 hw_event.config = event_id[counter];
86 hw_event.record_type = 0;
87 hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL;
88 hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER;
89
90 if (scale)
91 hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
92 PERF_FORMAT_TOTAL_TIME_RUNNING;
93
94 if (system_wide) {
95 int cpu;
96 for (cpu = 0; cpu < nr_cpus; cpu ++) {
97 fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0);
98 if (fd[cpu][counter] < 0) {
99 printf("perfstat error: syscall returned with %d (%s)\n",
100 fd[cpu][counter], strerror(errno));
101 exit(-1);
102 }
103 }
104 } else {
105 hw_event.inherit = inherit;
106 hw_event.disabled = 1;
107
108 fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0);
109 if (fd[0][counter] < 0) {
110 printf("perfstat error: syscall returned with %d (%s)\n",
111 fd[0][counter], strerror(errno));
112 exit(-1);
113 }
114 }
115 }
116
117 /*
118 * Does the counter have nsecs as a unit?
119 */
120 static inline int nsec_counter(int counter)
121 {
122 if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK))
123 return 1;
124 if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
125 return 1;
126
127 return 0;
128 }
129
130 /*
131 * Read out the results of a single counter:
132 */
133 static void read_counter(int counter)
134 {
135 __u64 *count, single_count[3];
136 ssize_t res;
137 int cpu, nv;
138 int scaled;
139
140 count = event_res[counter];
141
142 count[0] = count[1] = count[2] = 0;
143
144 nv = scale ? 3 : 1;
145 for (cpu = 0; cpu < nr_cpus; cpu ++) {
146 res = read(fd[cpu][counter], single_count, nv * sizeof(__u64));
147 assert(res == nv * sizeof(__u64));
148
149 count[0] += single_count[0];
150 if (scale) {
151 count[1] += single_count[1];
152 count[2] += single_count[2];
153 }
154 }
155
156 scaled = 0;
157 if (scale) {
158 if (count[2] == 0) {
159 event_scaled[counter] = -1;
160 count[0] = 0;
161 return;
162 }
163
164 if (count[2] < count[1]) {
165 event_scaled[counter] = 1;
166 count[0] = (unsigned long long)
167 ((double)count[0] * count[1] / count[2] + 0.5);
168 }
169 }
170 /*
171 * Save the full runtime - to allow normalization during printout:
172 */
173 if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
174 runtime_nsecs = count[0];
175 }
176
177 /*
178 * Print out the results of a single counter:
179 */
180 static void print_counter(int counter)
181 {
182 __u64 *count;
183 int scaled;
184
185 count = event_res[counter];
186 scaled = event_scaled[counter];
187
188 if (scaled == -1) {
189 fprintf(stderr, " %14s %-20s\n",
190 "<not counted>", event_name(counter));
191 return;
192 }
193
194 if (nsec_counter(counter)) {
195 double msecs = (double)count[0] / 1000000;
196
197 fprintf(stderr, " %14.6f %-20s",
198 msecs, event_name(counter));
199 if (event_id[counter] ==
200 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) {
201
202 fprintf(stderr, " # %11.3f CPU utilization factor",
203 (double)count[0] / (double)walltime_nsecs);
204 }
205 } else {
206 fprintf(stderr, " %14Ld %-20s",
207 count[0], event_name(counter));
208 if (runtime_nsecs)
209 fprintf(stderr, " # %11.3f M/sec",
210 (double)count[0]/runtime_nsecs*1000.0);
211 }
212 if (scaled)
213 fprintf(stderr, " (scaled from %.2f%%)",
214 (double) count[2] / count[1] * 100);
215 fprintf(stderr, "\n");
216 }
217
218 static int do_perfstat(int argc, const char **argv)
219 {
220 unsigned long long t0, t1;
221 int counter;
222 int status;
223 int pid;
224
225 if (!system_wide)
226 nr_cpus = 1;
227
228 for (counter = 0; counter < nr_counters; counter++)
229 create_perfstat_counter(counter);
230
231 /*
232 * Enable counters and exec the command:
233 */
234 t0 = rdclock();
235 prctl(PR_TASK_PERF_COUNTERS_ENABLE);
236
237 if ((pid = fork()) < 0)
238 perror("failed to fork");
239 if (!pid) {
240 if (execvp(argv[0], (char **)argv)) {
241 perror(argv[0]);
242 exit(-1);
243 }
244 }
245 while (wait(&status) >= 0)
246 ;
247 prctl(PR_TASK_PERF_COUNTERS_DISABLE);
248 t1 = rdclock();
249
250 walltime_nsecs = t1 - t0;
251
252 fflush(stdout);
253
254 fprintf(stderr, "\n");
255 fprintf(stderr, " Performance counter stats for \'%s\':\n",
256 argv[0]);
257 fprintf(stderr, "\n");
258
259 for (counter = 0; counter < nr_counters; counter++)
260 read_counter(counter);
261
262 for (counter = 0; counter < nr_counters; counter++)
263 print_counter(counter);
264
265
266 fprintf(stderr, "\n");
267 fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n",
268 (double)(t1-t0)/1e6);
269 fprintf(stderr, "\n");
270
271 return 0;
272 }
273
274 static void skip_signal(int signo)
275 {
276 }
277
278 static const char * const stat_usage[] = {
279 "perf stat [<options>] <command>",
280 NULL
281 };
282
283 static char events_help_msg[EVENTS_HELP_MAX];
284
285 static const struct option options[] = {
286 OPT_CALLBACK('e', "event", NULL, "event",
287 events_help_msg, parse_events),
288 OPT_INTEGER('c', "count", &default_interval,
289 "event period to sample"),
290 OPT_BOOLEAN('i', "inherit", &inherit,
291 "child tasks inherit counters"),
292 OPT_INTEGER('p', "pid", &target_pid,
293 "stat events on existing pid"),
294 OPT_BOOLEAN('a', "all-cpus", &system_wide,
295 "system-wide collection from all CPUs"),
296 OPT_BOOLEAN('l', "scale", &scale,
297 "scale/normalize counters"),
298 OPT_END()
299 };
300
301 int cmd_stat(int argc, const char **argv, const char *prefix)
302 {
303 int counter;
304
305 page_size = sysconf(_SC_PAGE_SIZE);
306
307 create_events_help(events_help_msg);
308 memcpy(event_id, default_event_id, sizeof(default_event_id));
309
310 argc = parse_options(argc, argv, options, stat_usage, 0);
311 if (!argc)
312 usage_with_options(stat_usage, options);
313
314 if (!nr_counters) {
315 nr_counters = 8;
316 }
317
318 for (counter = 0; counter < nr_counters; counter++) {
319 if (event_count[counter])
320 continue;
321
322 event_count[counter] = default_interval;
323 }
324 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
325 assert(nr_cpus <= MAX_NR_CPUS);
326 assert(nr_cpus >= 0);
327
328 /*
329 * We dont want to block the signals - that would cause
330 * child tasks to inherit that and Ctrl-C would not work.
331 * What we want is for Ctrl-C to work in the exec()-ed
332 * task, but being ignored by perf stat itself:
333 */
334 signal(SIGINT, skip_signal);
335 signal(SIGALRM, skip_signal);
336 signal(SIGABRT, skip_signal);
337
338 return do_perfstat(argc, argv);
339 }