]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - tools/perf/builtin-stat.c
Merge branch 'trivial' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / tools / perf / builtin-stat.c
1 /*
2 * builtin-stat.c
3 *
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
6 *
7 * Sample output:
8
9 $ perf stat ./hackbench 10
10
11 Time: 0.118
12
13 Performance counter stats for './hackbench 10':
14
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
26
27 0.154822978 seconds time elapsed
28
29 *
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31 *
32 * Improvements and fixes by:
33 *
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
40 *
41 * Released under the GPL v2. (and only v2, not any later version)
42 */
43
44 #include "perf.h"
45 #include "builtin.h"
46 #include "util/util.h"
47 #include "util/parse-options.h"
48 #include "util/parse-events.h"
49 #include "util/event.h"
50 #include "util/evlist.h"
51 #include "util/evsel.h"
52 #include "util/debug.h"
53 #include "util/color.h"
54 #include "util/header.h"
55 #include "util/cpumap.h"
56 #include "util/thread.h"
57 #include "util/thread_map.h"
58
59 #include <sys/prctl.h>
60 #include <math.h>
61 #include <locale.h>
62
63 #define DEFAULT_SEPARATOR " "
64 #define CNTR_NOT_SUPPORTED "<not supported>"
65 #define CNTR_NOT_COUNTED "<not counted>"
66
67 static struct perf_event_attr default_attrs[] = {
68
69 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
70 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
71 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
72 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
73
74 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
75 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
76 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
77 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
78 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
79 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
80
81 };
82
83 /*
84 * Detailed stats (-d), covering the L1 and last level data caches:
85 */
86 static struct perf_event_attr detailed_attrs[] = {
87
88 { .type = PERF_TYPE_HW_CACHE,
89 .config =
90 PERF_COUNT_HW_CACHE_L1D << 0 |
91 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
92 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
93
94 { .type = PERF_TYPE_HW_CACHE,
95 .config =
96 PERF_COUNT_HW_CACHE_L1D << 0 |
97 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
98 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
99
100 { .type = PERF_TYPE_HW_CACHE,
101 .config =
102 PERF_COUNT_HW_CACHE_LL << 0 |
103 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
104 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
105
106 { .type = PERF_TYPE_HW_CACHE,
107 .config =
108 PERF_COUNT_HW_CACHE_LL << 0 |
109 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
110 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
111 };
112
113 /*
114 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
115 */
116 static struct perf_event_attr very_detailed_attrs[] = {
117
118 { .type = PERF_TYPE_HW_CACHE,
119 .config =
120 PERF_COUNT_HW_CACHE_L1I << 0 |
121 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
122 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
123
124 { .type = PERF_TYPE_HW_CACHE,
125 .config =
126 PERF_COUNT_HW_CACHE_L1I << 0 |
127 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
128 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
129
130 { .type = PERF_TYPE_HW_CACHE,
131 .config =
132 PERF_COUNT_HW_CACHE_DTLB << 0 |
133 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
134 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
135
136 { .type = PERF_TYPE_HW_CACHE,
137 .config =
138 PERF_COUNT_HW_CACHE_DTLB << 0 |
139 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
140 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
141
142 { .type = PERF_TYPE_HW_CACHE,
143 .config =
144 PERF_COUNT_HW_CACHE_ITLB << 0 |
145 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
146 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
147
148 { .type = PERF_TYPE_HW_CACHE,
149 .config =
150 PERF_COUNT_HW_CACHE_ITLB << 0 |
151 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
152 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
153
154 };
155
156 /*
157 * Very, very detailed stats (-d -d -d), adding prefetch events:
158 */
159 static struct perf_event_attr very_very_detailed_attrs[] = {
160
161 { .type = PERF_TYPE_HW_CACHE,
162 .config =
163 PERF_COUNT_HW_CACHE_L1D << 0 |
164 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
165 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
166
167 { .type = PERF_TYPE_HW_CACHE,
168 .config =
169 PERF_COUNT_HW_CACHE_L1D << 0 |
170 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
171 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
172 };
173
174
175
176 static struct perf_evlist *evsel_list;
177
178 static struct perf_target target = {
179 .uid = UINT_MAX,
180 };
181
182 static int run_idx = 0;
183 static int run_count = 1;
184 static bool no_inherit = false;
185 static bool scale = true;
186 static bool no_aggr = false;
187 static pid_t child_pid = -1;
188 static bool null_run = false;
189 static int detailed_run = 0;
190 static bool sync_run = false;
191 static bool big_num = true;
192 static int big_num_opt = -1;
193 static const char *csv_sep = NULL;
194 static bool csv_output = false;
195 static bool group = false;
196 static const char *output_name = NULL;
197 static FILE *output = NULL;
198 static int output_fd;
199
200 static volatile int done = 0;
201
202 struct stats
203 {
204 double n, mean, M2;
205 };
206
207 struct perf_stat {
208 struct stats res_stats[3];
209 };
210
211 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
212 {
213 evsel->priv = zalloc(sizeof(struct perf_stat));
214 return evsel->priv == NULL ? -ENOMEM : 0;
215 }
216
217 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
218 {
219 free(evsel->priv);
220 evsel->priv = NULL;
221 }
222
223 static void update_stats(struct stats *stats, u64 val)
224 {
225 double delta;
226
227 stats->n++;
228 delta = val - stats->mean;
229 stats->mean += delta / stats->n;
230 stats->M2 += delta*(val - stats->mean);
231 }
232
233 static double avg_stats(struct stats *stats)
234 {
235 return stats->mean;
236 }
237
238 /*
239 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
240 *
241 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
242 * s^2 = -------------------------------
243 * n - 1
244 *
245 * http://en.wikipedia.org/wiki/Stddev
246 *
247 * The std dev of the mean is related to the std dev by:
248 *
249 * s
250 * s_mean = -------
251 * sqrt(n)
252 *
253 */
254 static double stddev_stats(struct stats *stats)
255 {
256 double variance, variance_mean;
257
258 if (!stats->n)
259 return 0.0;
260
261 variance = stats->M2 / (stats->n - 1);
262 variance_mean = variance / stats->n;
263
264 return sqrt(variance_mean);
265 }
266
267 static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
268 static struct stats runtime_cycles_stats[MAX_NR_CPUS];
269 static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
270 static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
271 static struct stats runtime_branches_stats[MAX_NR_CPUS];
272 static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
273 static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
274 static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
275 static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
276 static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
277 static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
278 static struct stats walltime_nsecs_stats;
279
280 static int create_perf_stat_counter(struct perf_evsel *evsel,
281 struct perf_evsel *first)
282 {
283 struct perf_event_attr *attr = &evsel->attr;
284 struct xyarray *group_fd = NULL;
285 bool exclude_guest_missing = false;
286 int ret;
287
288 if (group && evsel != first)
289 group_fd = first->fd;
290
291 if (scale)
292 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
293 PERF_FORMAT_TOTAL_TIME_RUNNING;
294
295 attr->inherit = !no_inherit;
296
297 retry:
298 if (exclude_guest_missing)
299 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
300
301 if (perf_target__has_cpu(&target)) {
302 ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
303 group, group_fd);
304 if (ret)
305 goto check_ret;
306 return 0;
307 }
308
309 if (!perf_target__has_task(&target) && (!group || evsel == first)) {
310 attr->disabled = 1;
311 attr->enable_on_exec = 1;
312 }
313
314 ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
315 group, group_fd);
316 if (!ret)
317 return 0;
318 /* fall through */
319 check_ret:
320 if (ret && errno == EINVAL) {
321 if (!exclude_guest_missing &&
322 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
323 pr_debug("Old kernel, cannot exclude "
324 "guest or host samples.\n");
325 exclude_guest_missing = true;
326 goto retry;
327 }
328 }
329 return ret;
330 }
331
332 /*
333 * Does the counter have nsecs as a unit?
334 */
335 static inline int nsec_counter(struct perf_evsel *evsel)
336 {
337 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
338 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
339 return 1;
340
341 return 0;
342 }
343
344 /*
345 * Update various tracking values we maintain to print
346 * more semantic information such as miss/hit ratios,
347 * instruction rates, etc:
348 */
349 static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
350 {
351 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
352 update_stats(&runtime_nsecs_stats[0], count[0]);
353 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
354 update_stats(&runtime_cycles_stats[0], count[0]);
355 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
356 update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
357 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
358 update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
359 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
360 update_stats(&runtime_branches_stats[0], count[0]);
361 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
362 update_stats(&runtime_cacherefs_stats[0], count[0]);
363 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
364 update_stats(&runtime_l1_dcache_stats[0], count[0]);
365 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
366 update_stats(&runtime_l1_icache_stats[0], count[0]);
367 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
368 update_stats(&runtime_ll_cache_stats[0], count[0]);
369 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
370 update_stats(&runtime_dtlb_cache_stats[0], count[0]);
371 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
372 update_stats(&runtime_itlb_cache_stats[0], count[0]);
373 }
374
375 /*
376 * Read out the results of a single counter:
377 * aggregate counts across CPUs in system-wide mode
378 */
379 static int read_counter_aggr(struct perf_evsel *counter)
380 {
381 struct perf_stat *ps = counter->priv;
382 u64 *count = counter->counts->aggr.values;
383 int i;
384
385 if (__perf_evsel__read(counter, evsel_list->cpus->nr,
386 evsel_list->threads->nr, scale) < 0)
387 return -1;
388
389 for (i = 0; i < 3; i++)
390 update_stats(&ps->res_stats[i], count[i]);
391
392 if (verbose) {
393 fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
394 perf_evsel__name(counter), count[0], count[1], count[2]);
395 }
396
397 /*
398 * Save the full runtime - to allow normalization during printout:
399 */
400 update_shadow_stats(counter, count);
401
402 return 0;
403 }
404
405 /*
406 * Read out the results of a single counter:
407 * do not aggregate counts across CPUs in system-wide mode
408 */
409 static int read_counter(struct perf_evsel *counter)
410 {
411 u64 *count;
412 int cpu;
413
414 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
415 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
416 return -1;
417
418 count = counter->counts->cpu[cpu].values;
419
420 update_shadow_stats(counter, count);
421 }
422
423 return 0;
424 }
425
426 static int run_perf_stat(int argc __used, const char **argv)
427 {
428 unsigned long long t0, t1;
429 struct perf_evsel *counter, *first;
430 int status = 0;
431 int child_ready_pipe[2], go_pipe[2];
432 const bool forks = (argc > 0);
433 char buf;
434
435 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
436 perror("failed to create pipes");
437 exit(1);
438 }
439
440 if (forks) {
441 if ((child_pid = fork()) < 0)
442 perror("failed to fork");
443
444 if (!child_pid) {
445 close(child_ready_pipe[0]);
446 close(go_pipe[1]);
447 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
448
449 /*
450 * Do a dummy execvp to get the PLT entry resolved,
451 * so we avoid the resolver overhead on the real
452 * execvp call.
453 */
454 execvp("", (char **)argv);
455
456 /*
457 * Tell the parent we're ready to go
458 */
459 close(child_ready_pipe[1]);
460
461 /*
462 * Wait until the parent tells us to go.
463 */
464 if (read(go_pipe[0], &buf, 1) == -1)
465 perror("unable to read pipe");
466
467 execvp(argv[0], (char **)argv);
468
469 perror(argv[0]);
470 exit(-1);
471 }
472
473 if (perf_target__none(&target))
474 evsel_list->threads->map[0] = child_pid;
475
476 /*
477 * Wait for the child to be ready to exec.
478 */
479 close(child_ready_pipe[1]);
480 close(go_pipe[0]);
481 if (read(child_ready_pipe[0], &buf, 1) == -1)
482 perror("unable to read pipe");
483 close(child_ready_pipe[0]);
484 }
485
486 first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
487
488 list_for_each_entry(counter, &evsel_list->entries, node) {
489 if (create_perf_stat_counter(counter, first) < 0) {
490 /*
491 * PPC returns ENXIO for HW counters until 2.6.37
492 * (behavior changed with commit b0a873e).
493 */
494 if (errno == EINVAL || errno == ENOSYS ||
495 errno == ENOENT || errno == EOPNOTSUPP ||
496 errno == ENXIO) {
497 if (verbose)
498 ui__warning("%s event is not supported by the kernel.\n",
499 perf_evsel__name(counter));
500 counter->supported = false;
501 continue;
502 }
503
504 if (errno == EPERM || errno == EACCES) {
505 error("You may not have permission to collect %sstats.\n"
506 "\t Consider tweaking"
507 " /proc/sys/kernel/perf_event_paranoid or running as root.",
508 target.system_wide ? "system-wide " : "");
509 } else {
510 error("open_counter returned with %d (%s). "
511 "/bin/dmesg may provide additional information.\n",
512 errno, strerror(errno));
513 }
514 if (child_pid != -1)
515 kill(child_pid, SIGTERM);
516 die("Not all events could be opened.\n");
517 return -1;
518 }
519 counter->supported = true;
520 }
521
522 if (perf_evlist__set_filters(evsel_list)) {
523 error("failed to set filter with %d (%s)\n", errno,
524 strerror(errno));
525 return -1;
526 }
527
528 /*
529 * Enable counters and exec the command:
530 */
531 t0 = rdclock();
532
533 if (forks) {
534 close(go_pipe[1]);
535 wait(&status);
536 if (WIFSIGNALED(status))
537 psignal(WTERMSIG(status), argv[0]);
538 } else {
539 while(!done) sleep(1);
540 }
541
542 t1 = rdclock();
543
544 update_stats(&walltime_nsecs_stats, t1 - t0);
545
546 if (no_aggr) {
547 list_for_each_entry(counter, &evsel_list->entries, node) {
548 read_counter(counter);
549 perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
550 }
551 } else {
552 list_for_each_entry(counter, &evsel_list->entries, node) {
553 read_counter_aggr(counter);
554 perf_evsel__close_fd(counter, evsel_list->cpus->nr,
555 evsel_list->threads->nr);
556 }
557 }
558
559 return WEXITSTATUS(status);
560 }
561
562 static void print_noise_pct(double total, double avg)
563 {
564 double pct = 0.0;
565
566 if (avg)
567 pct = 100.0*total/avg;
568
569 if (csv_output)
570 fprintf(output, "%s%.2f%%", csv_sep, pct);
571 else if (pct)
572 fprintf(output, " ( +-%6.2f%% )", pct);
573 }
574
575 static void print_noise(struct perf_evsel *evsel, double avg)
576 {
577 struct perf_stat *ps;
578
579 if (run_count == 1)
580 return;
581
582 ps = evsel->priv;
583 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
584 }
585
586 static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
587 {
588 double msecs = avg / 1e6;
589 char cpustr[16] = { '\0', };
590 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
591
592 if (no_aggr)
593 sprintf(cpustr, "CPU%*d%s",
594 csv_output ? 0 : -4,
595 evsel_list->cpus->map[cpu], csv_sep);
596
597 fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
598
599 if (evsel->cgrp)
600 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
601
602 if (csv_output)
603 return;
604
605 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
606 fprintf(output, " # %8.3f CPUs utilized ",
607 avg / avg_stats(&walltime_nsecs_stats));
608 else
609 fprintf(output, " ");
610 }
611
612 /* used for get_ratio_color() */
613 enum grc_type {
614 GRC_STALLED_CYCLES_FE,
615 GRC_STALLED_CYCLES_BE,
616 GRC_CACHE_MISSES,
617 GRC_MAX_NR
618 };
619
620 static const char *get_ratio_color(enum grc_type type, double ratio)
621 {
622 static const double grc_table[GRC_MAX_NR][3] = {
623 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
624 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
625 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
626 };
627 const char *color = PERF_COLOR_NORMAL;
628
629 if (ratio > grc_table[type][0])
630 color = PERF_COLOR_RED;
631 else if (ratio > grc_table[type][1])
632 color = PERF_COLOR_MAGENTA;
633 else if (ratio > grc_table[type][2])
634 color = PERF_COLOR_YELLOW;
635
636 return color;
637 }
638
639 static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
640 {
641 double total, ratio = 0.0;
642 const char *color;
643
644 total = avg_stats(&runtime_cycles_stats[cpu]);
645
646 if (total)
647 ratio = avg / total * 100.0;
648
649 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
650
651 fprintf(output, " # ");
652 color_fprintf(output, color, "%6.2f%%", ratio);
653 fprintf(output, " frontend cycles idle ");
654 }
655
656 static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
657 {
658 double total, ratio = 0.0;
659 const char *color;
660
661 total = avg_stats(&runtime_cycles_stats[cpu]);
662
663 if (total)
664 ratio = avg / total * 100.0;
665
666 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
667
668 fprintf(output, " # ");
669 color_fprintf(output, color, "%6.2f%%", ratio);
670 fprintf(output, " backend cycles idle ");
671 }
672
673 static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
674 {
675 double total, ratio = 0.0;
676 const char *color;
677
678 total = avg_stats(&runtime_branches_stats[cpu]);
679
680 if (total)
681 ratio = avg / total * 100.0;
682
683 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
684
685 fprintf(output, " # ");
686 color_fprintf(output, color, "%6.2f%%", ratio);
687 fprintf(output, " of all branches ");
688 }
689
690 static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
691 {
692 double total, ratio = 0.0;
693 const char *color;
694
695 total = avg_stats(&runtime_l1_dcache_stats[cpu]);
696
697 if (total)
698 ratio = avg / total * 100.0;
699
700 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
701
702 fprintf(output, " # ");
703 color_fprintf(output, color, "%6.2f%%", ratio);
704 fprintf(output, " of all L1-dcache hits ");
705 }
706
707 static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
708 {
709 double total, ratio = 0.0;
710 const char *color;
711
712 total = avg_stats(&runtime_l1_icache_stats[cpu]);
713
714 if (total)
715 ratio = avg / total * 100.0;
716
717 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
718
719 fprintf(output, " # ");
720 color_fprintf(output, color, "%6.2f%%", ratio);
721 fprintf(output, " of all L1-icache hits ");
722 }
723
724 static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
725 {
726 double total, ratio = 0.0;
727 const char *color;
728
729 total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
730
731 if (total)
732 ratio = avg / total * 100.0;
733
734 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
735
736 fprintf(output, " # ");
737 color_fprintf(output, color, "%6.2f%%", ratio);
738 fprintf(output, " of all dTLB cache hits ");
739 }
740
741 static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
742 {
743 double total, ratio = 0.0;
744 const char *color;
745
746 total = avg_stats(&runtime_itlb_cache_stats[cpu]);
747
748 if (total)
749 ratio = avg / total * 100.0;
750
751 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
752
753 fprintf(output, " # ");
754 color_fprintf(output, color, "%6.2f%%", ratio);
755 fprintf(output, " of all iTLB cache hits ");
756 }
757
758 static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
759 {
760 double total, ratio = 0.0;
761 const char *color;
762
763 total = avg_stats(&runtime_ll_cache_stats[cpu]);
764
765 if (total)
766 ratio = avg / total * 100.0;
767
768 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
769
770 fprintf(output, " # ");
771 color_fprintf(output, color, "%6.2f%%", ratio);
772 fprintf(output, " of all LL-cache hits ");
773 }
774
775 static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
776 {
777 double total, ratio = 0.0;
778 char cpustr[16] = { '\0', };
779 const char *fmt;
780
781 if (csv_output)
782 fmt = "%s%.0f%s%s";
783 else if (big_num)
784 fmt = "%s%'18.0f%s%-25s";
785 else
786 fmt = "%s%18.0f%s%-25s";
787
788 if (no_aggr)
789 sprintf(cpustr, "CPU%*d%s",
790 csv_output ? 0 : -4,
791 evsel_list->cpus->map[cpu], csv_sep);
792 else
793 cpu = 0;
794
795 fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel));
796
797 if (evsel->cgrp)
798 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
799
800 if (csv_output)
801 return;
802
803 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
804 total = avg_stats(&runtime_cycles_stats[cpu]);
805
806 if (total)
807 ratio = avg / total;
808
809 fprintf(output, " # %5.2f insns per cycle ", ratio);
810
811 total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
812 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
813
814 if (total && avg) {
815 ratio = total / avg;
816 fprintf(output, "\n # %5.2f stalled cycles per insn", ratio);
817 }
818
819 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
820 runtime_branches_stats[cpu].n != 0) {
821 print_branch_misses(cpu, evsel, avg);
822 } else if (
823 evsel->attr.type == PERF_TYPE_HW_CACHE &&
824 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
825 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
826 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
827 runtime_l1_dcache_stats[cpu].n != 0) {
828 print_l1_dcache_misses(cpu, evsel, avg);
829 } else if (
830 evsel->attr.type == PERF_TYPE_HW_CACHE &&
831 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
832 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
833 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
834 runtime_l1_icache_stats[cpu].n != 0) {
835 print_l1_icache_misses(cpu, evsel, avg);
836 } else if (
837 evsel->attr.type == PERF_TYPE_HW_CACHE &&
838 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
839 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
840 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
841 runtime_dtlb_cache_stats[cpu].n != 0) {
842 print_dtlb_cache_misses(cpu, evsel, avg);
843 } else if (
844 evsel->attr.type == PERF_TYPE_HW_CACHE &&
845 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
846 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
847 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
848 runtime_itlb_cache_stats[cpu].n != 0) {
849 print_itlb_cache_misses(cpu, evsel, avg);
850 } else if (
851 evsel->attr.type == PERF_TYPE_HW_CACHE &&
852 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
853 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
854 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
855 runtime_ll_cache_stats[cpu].n != 0) {
856 print_ll_cache_misses(cpu, evsel, avg);
857 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
858 runtime_cacherefs_stats[cpu].n != 0) {
859 total = avg_stats(&runtime_cacherefs_stats[cpu]);
860
861 if (total)
862 ratio = avg * 100 / total;
863
864 fprintf(output, " # %8.3f %% of all cache refs ", ratio);
865
866 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
867 print_stalled_cycles_frontend(cpu, evsel, avg);
868 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
869 print_stalled_cycles_backend(cpu, evsel, avg);
870 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
871 total = avg_stats(&runtime_nsecs_stats[cpu]);
872
873 if (total)
874 ratio = 1.0 * avg / total;
875
876 fprintf(output, " # %8.3f GHz ", ratio);
877 } else if (runtime_nsecs_stats[cpu].n != 0) {
878 char unit = 'M';
879
880 total = avg_stats(&runtime_nsecs_stats[cpu]);
881
882 if (total)
883 ratio = 1000.0 * avg / total;
884 if (ratio < 0.001) {
885 ratio *= 1000;
886 unit = 'K';
887 }
888
889 fprintf(output, " # %8.3f %c/sec ", ratio, unit);
890 } else {
891 fprintf(output, " ");
892 }
893 }
894
895 /*
896 * Print out the results of a single counter:
897 * aggregated counts in system-wide mode
898 */
899 static void print_counter_aggr(struct perf_evsel *counter)
900 {
901 struct perf_stat *ps = counter->priv;
902 double avg = avg_stats(&ps->res_stats[0]);
903 int scaled = counter->counts->scaled;
904
905 if (scaled == -1) {
906 fprintf(output, "%*s%s%*s",
907 csv_output ? 0 : 18,
908 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
909 csv_sep,
910 csv_output ? 0 : -24,
911 perf_evsel__name(counter));
912
913 if (counter->cgrp)
914 fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
915
916 fputc('\n', output);
917 return;
918 }
919
920 if (nsec_counter(counter))
921 nsec_printout(-1, counter, avg);
922 else
923 abs_printout(-1, counter, avg);
924
925 print_noise(counter, avg);
926
927 if (csv_output) {
928 fputc('\n', output);
929 return;
930 }
931
932 if (scaled) {
933 double avg_enabled, avg_running;
934
935 avg_enabled = avg_stats(&ps->res_stats[1]);
936 avg_running = avg_stats(&ps->res_stats[2]);
937
938 fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled);
939 }
940 fprintf(output, "\n");
941 }
942
943 /*
944 * Print out the results of a single counter:
945 * does not use aggregated count in system-wide
946 */
947 static void print_counter(struct perf_evsel *counter)
948 {
949 u64 ena, run, val;
950 int cpu;
951
952 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
953 val = counter->counts->cpu[cpu].val;
954 ena = counter->counts->cpu[cpu].ena;
955 run = counter->counts->cpu[cpu].run;
956 if (run == 0 || ena == 0) {
957 fprintf(output, "CPU%*d%s%*s%s%*s",
958 csv_output ? 0 : -4,
959 evsel_list->cpus->map[cpu], csv_sep,
960 csv_output ? 0 : 18,
961 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
962 csv_sep,
963 csv_output ? 0 : -24,
964 perf_evsel__name(counter));
965
966 if (counter->cgrp)
967 fprintf(output, "%s%s",
968 csv_sep, counter->cgrp->name);
969
970 fputc('\n', output);
971 continue;
972 }
973
974 if (nsec_counter(counter))
975 nsec_printout(cpu, counter, val);
976 else
977 abs_printout(cpu, counter, val);
978
979 if (!csv_output) {
980 print_noise(counter, 1.0);
981
982 if (run != ena)
983 fprintf(output, " (%.2f%%)",
984 100.0 * run / ena);
985 }
986 fputc('\n', output);
987 }
988 }
989
990 static void print_stat(int argc, const char **argv)
991 {
992 struct perf_evsel *counter;
993 int i;
994
995 fflush(stdout);
996
997 if (!csv_output) {
998 fprintf(output, "\n");
999 fprintf(output, " Performance counter stats for ");
1000 if (!perf_target__has_task(&target)) {
1001 fprintf(output, "\'%s", argv[0]);
1002 for (i = 1; i < argc; i++)
1003 fprintf(output, " %s", argv[i]);
1004 } else if (target.pid)
1005 fprintf(output, "process id \'%s", target.pid);
1006 else
1007 fprintf(output, "thread id \'%s", target.tid);
1008
1009 fprintf(output, "\'");
1010 if (run_count > 1)
1011 fprintf(output, " (%d runs)", run_count);
1012 fprintf(output, ":\n\n");
1013 }
1014
1015 if (no_aggr) {
1016 list_for_each_entry(counter, &evsel_list->entries, node)
1017 print_counter(counter);
1018 } else {
1019 list_for_each_entry(counter, &evsel_list->entries, node)
1020 print_counter_aggr(counter);
1021 }
1022
1023 if (!csv_output) {
1024 if (!null_run)
1025 fprintf(output, "\n");
1026 fprintf(output, " %17.9f seconds time elapsed",
1027 avg_stats(&walltime_nsecs_stats)/1e9);
1028 if (run_count > 1) {
1029 fprintf(output, " ");
1030 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
1031 avg_stats(&walltime_nsecs_stats));
1032 }
1033 fprintf(output, "\n\n");
1034 }
1035 }
1036
1037 static volatile int signr = -1;
1038
1039 static void skip_signal(int signo)
1040 {
1041 if(child_pid == -1)
1042 done = 1;
1043
1044 signr = signo;
1045 }
1046
1047 static void sig_atexit(void)
1048 {
1049 if (child_pid != -1)
1050 kill(child_pid, SIGTERM);
1051
1052 if (signr == -1)
1053 return;
1054
1055 signal(signr, SIG_DFL);
1056 kill(getpid(), signr);
1057 }
1058
1059 static const char * const stat_usage[] = {
1060 "perf stat [<options>] [<command>]",
1061 NULL
1062 };
1063
1064 static int stat__set_big_num(const struct option *opt __used,
1065 const char *s __used, int unset)
1066 {
1067 big_num_opt = unset ? 0 : 1;
1068 return 0;
1069 }
1070
1071 static bool append_file;
1072
1073 static const struct option options[] = {
1074 OPT_CALLBACK('e', "event", &evsel_list, "event",
1075 "event selector. use 'perf list' to list available events",
1076 parse_events_option),
1077 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1078 "event filter", parse_filter),
1079 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1080 "child tasks do not inherit counters"),
1081 OPT_STRING('p', "pid", &target.pid, "pid",
1082 "stat events on existing process id"),
1083 OPT_STRING('t', "tid", &target.tid, "tid",
1084 "stat events on existing thread id"),
1085 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1086 "system-wide collection from all CPUs"),
1087 OPT_BOOLEAN('g', "group", &group,
1088 "put the counters into a counter group"),
1089 OPT_BOOLEAN('c', "scale", &scale,
1090 "scale/normalize counters"),
1091 OPT_INCR('v', "verbose", &verbose,
1092 "be more verbose (show counter open errors, etc)"),
1093 OPT_INTEGER('r', "repeat", &run_count,
1094 "repeat command and print average + stddev (max: 100)"),
1095 OPT_BOOLEAN('n', "null", &null_run,
1096 "null run - dont start any counters"),
1097 OPT_INCR('d', "detailed", &detailed_run,
1098 "detailed run - start a lot of events"),
1099 OPT_BOOLEAN('S', "sync", &sync_run,
1100 "call sync() before starting a run"),
1101 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1102 "print large numbers with thousands\' separators",
1103 stat__set_big_num),
1104 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1105 "list of cpus to monitor in system-wide"),
1106 OPT_BOOLEAN('A', "no-aggr", &no_aggr,
1107 "disable CPU count aggregation"),
1108 OPT_STRING('x', "field-separator", &csv_sep, "separator",
1109 "print counts with custom separator"),
1110 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1111 "monitor event in cgroup name only",
1112 parse_cgroups),
1113 OPT_STRING('o', "output", &output_name, "file",
1114 "output file name"),
1115 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1116 OPT_INTEGER(0, "log-fd", &output_fd,
1117 "log output to fd, instead of stderr"),
1118 OPT_END()
1119 };
1120
1121 /*
1122 * Add default attributes, if there were no attributes specified or
1123 * if -d/--detailed, -d -d or -d -d -d is used:
1124 */
1125 static int add_default_attributes(void)
1126 {
1127 /* Set attrs if no event is selected and !null_run: */
1128 if (null_run)
1129 return 0;
1130
1131 if (!evsel_list->nr_entries) {
1132 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1133 return -1;
1134 }
1135
1136 /* Detailed events get appended to the event list: */
1137
1138 if (detailed_run < 1)
1139 return 0;
1140
1141 /* Append detailed run extra attributes: */
1142 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1143 return -1;
1144
1145 if (detailed_run < 2)
1146 return 0;
1147
1148 /* Append very detailed run extra attributes: */
1149 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1150 return -1;
1151
1152 if (detailed_run < 3)
1153 return 0;
1154
1155 /* Append very, very detailed run extra attributes: */
1156 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1157 }
1158
1159 int cmd_stat(int argc, const char **argv, const char *prefix __used)
1160 {
1161 struct perf_evsel *pos;
1162 int status = -ENOMEM;
1163 const char *mode;
1164
1165 setlocale(LC_ALL, "");
1166
1167 evsel_list = perf_evlist__new(NULL, NULL);
1168 if (evsel_list == NULL)
1169 return -ENOMEM;
1170
1171 argc = parse_options(argc, argv, options, stat_usage,
1172 PARSE_OPT_STOP_AT_NON_OPTION);
1173
1174 output = stderr;
1175 if (output_name && strcmp(output_name, "-"))
1176 output = NULL;
1177
1178 if (output_name && output_fd) {
1179 fprintf(stderr, "cannot use both --output and --log-fd\n");
1180 usage_with_options(stat_usage, options);
1181 }
1182
1183 if (output_fd < 0) {
1184 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1185 usage_with_options(stat_usage, options);
1186 }
1187
1188 if (!output) {
1189 struct timespec tm;
1190 mode = append_file ? "a" : "w";
1191
1192 output = fopen(output_name, mode);
1193 if (!output) {
1194 perror("failed to create output file");
1195 exit(-1);
1196 }
1197 clock_gettime(CLOCK_REALTIME, &tm);
1198 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1199 } else if (output_fd > 0) {
1200 mode = append_file ? "a" : "w";
1201 output = fdopen(output_fd, mode);
1202 if (!output) {
1203 perror("Failed opening logfd");
1204 return -errno;
1205 }
1206 }
1207
1208 if (csv_sep) {
1209 csv_output = true;
1210 if (!strcmp(csv_sep, "\\t"))
1211 csv_sep = "\t";
1212 } else
1213 csv_sep = DEFAULT_SEPARATOR;
1214
1215 /*
1216 * let the spreadsheet do the pretty-printing
1217 */
1218 if (csv_output) {
1219 /* User explicitly passed -B? */
1220 if (big_num_opt == 1) {
1221 fprintf(stderr, "-B option not supported with -x\n");
1222 usage_with_options(stat_usage, options);
1223 } else /* Nope, so disable big number formatting */
1224 big_num = false;
1225 } else if (big_num_opt == 0) /* User passed --no-big-num */
1226 big_num = false;
1227
1228 if (!argc && !perf_target__has_task(&target))
1229 usage_with_options(stat_usage, options);
1230 if (run_count <= 0)
1231 usage_with_options(stat_usage, options);
1232
1233 /* no_aggr, cgroup are for system-wide only */
1234 if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
1235 fprintf(stderr, "both cgroup and no-aggregation "
1236 "modes only available in system-wide mode\n");
1237
1238 usage_with_options(stat_usage, options);
1239 }
1240
1241 if (add_default_attributes())
1242 goto out;
1243
1244 perf_target__validate(&target);
1245
1246 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1247 if (perf_target__has_task(&target))
1248 pr_err("Problems finding threads of monitor\n");
1249 if (perf_target__has_cpu(&target))
1250 perror("failed to parse CPUs map");
1251
1252 usage_with_options(stat_usage, options);
1253 return -1;
1254 }
1255
1256 list_for_each_entry(pos, &evsel_list->entries, node) {
1257 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
1258 perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
1259 goto out_free_fd;
1260 }
1261
1262 /*
1263 * We dont want to block the signals - that would cause
1264 * child tasks to inherit that and Ctrl-C would not work.
1265 * What we want is for Ctrl-C to work in the exec()-ed
1266 * task, but being ignored by perf stat itself:
1267 */
1268 atexit(sig_atexit);
1269 signal(SIGINT, skip_signal);
1270 signal(SIGALRM, skip_signal);
1271 signal(SIGABRT, skip_signal);
1272
1273 status = 0;
1274 for (run_idx = 0; run_idx < run_count; run_idx++) {
1275 if (run_count != 1 && verbose)
1276 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1277 run_idx + 1);
1278
1279 if (sync_run)
1280 sync();
1281
1282 status = run_perf_stat(argc, argv);
1283 }
1284
1285 if (status != -1)
1286 print_stat(argc, argv);
1287 out_free_fd:
1288 list_for_each_entry(pos, &evsel_list->entries, node)
1289 perf_evsel__free_stat_priv(pos);
1290 perf_evlist__delete_maps(evsel_list);
1291 out:
1292 perf_evlist__delete(evsel_list);
1293 return status;
1294 }