4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ./hackbench 10
13 Performance counter stats for './hackbench 10':
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
27 0.154822978 seconds time elapsed
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32 * Improvements and fixes by:
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 * Released under the GPL v2. (and only v2, not any later version)
46 #include "util/cgroup.h"
47 #include "util/util.h"
48 #include <subcmd/parse-options.h>
49 #include "util/parse-events.h"
51 #include "util/event.h"
52 #include "util/evlist.h"
53 #include "util/evsel.h"
54 #include "util/debug.h"
55 #include "util/drv_configs.h"
56 #include "util/color.h"
57 #include "util/stat.h"
58 #include "util/header.h"
59 #include "util/cpumap.h"
60 #include "util/thread.h"
61 #include "util/thread_map.h"
62 #include "util/counts.h"
63 #include "util/group.h"
64 #include "util/session.h"
65 #include "util/tool.h"
66 #include "util/string2.h"
67 #include "util/metricgroup.h"
71 #include <linux/time64.h>
72 #include <api/fs/fs.h>
76 #include <sys/prctl.h>
80 #include <sys/types.h>
85 #include <sys/resource.h>
88 #include "sane_ctype.h"
90 #define DEFAULT_SEPARATOR " "
91 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
93 static void print_counters(struct timespec
*ts
, int argc
, const char **argv
);
95 /* Default events used for perf stat -T */
96 static const char *transaction_attrs
= {
108 /* More limited version when the CPU does not have all events. */
109 static const char * transaction_limited_attrs
= {
119 static const char * topdown_attrs
[] = {
120 "topdown-total-slots",
121 "topdown-slots-retired",
122 "topdown-recovery-bubbles",
123 "topdown-fetch-bubbles",
124 "topdown-slots-issued",
128 static const char *smi_cost_attrs
= {
136 static struct perf_evlist
*evsel_list
;
138 static struct target target
= {
142 #define METRIC_ONLY_LEN 20
144 static volatile pid_t child_pid
= -1;
145 static int detailed_run
= 0;
146 static bool transaction_run
;
147 static bool topdown_run
= false;
148 static bool smi_cost
= false;
149 static bool smi_reset
= false;
150 static int big_num_opt
= -1;
151 static bool group
= false;
152 static const char *pre_cmd
= NULL
;
153 static const char *post_cmd
= NULL
;
154 static bool sync_run
= false;
155 static bool forever
= false;
156 static bool force_metric_only
= false;
157 static struct timespec ref_time
;
158 static bool append_file
;
159 static bool interval_count
;
160 static const char *output_name
;
161 static int output_fd
;
165 struct perf_data data
;
166 struct perf_session
*session
;
168 struct perf_tool tool
;
170 struct cpu_map
*cpus
;
171 struct thread_map
*threads
;
172 enum aggr_mode aggr_mode
;
175 static struct perf_stat perf_stat
;
176 #define STAT_RECORD perf_stat.record
178 static volatile int done
= 0;
180 static struct perf_stat_config stat_config
= {
181 .aggr_mode
= AGGR_GLOBAL
,
183 .unit_width
= 4, /* strlen("unit") */
185 .metric_only_len
= METRIC_ONLY_LEN
,
186 .walltime_nsecs_stats
= &walltime_nsecs_stats
,
190 static inline void diff_timespec(struct timespec
*r
, struct timespec
*a
,
193 r
->tv_sec
= a
->tv_sec
- b
->tv_sec
;
194 if (a
->tv_nsec
< b
->tv_nsec
) {
195 r
->tv_nsec
= a
->tv_nsec
+ NSEC_PER_SEC
- b
->tv_nsec
;
198 r
->tv_nsec
= a
->tv_nsec
- b
->tv_nsec
;
202 static void perf_stat__reset_stats(void)
206 perf_evlist__reset_stats(evsel_list
);
207 perf_stat__reset_shadow_stats();
209 for (i
= 0; i
< stat_config
.stats_num
; i
++)
210 perf_stat__reset_shadow_per_stat(&stat_config
.stats
[i
]);
213 static int process_synthesized_event(struct perf_tool
*tool __maybe_unused
,
214 union perf_event
*event
,
215 struct perf_sample
*sample __maybe_unused
,
216 struct machine
*machine __maybe_unused
)
218 if (perf_data__write(&perf_stat
.data
, event
, event
->header
.size
) < 0) {
219 pr_err("failed to write perf data, error: %m\n");
223 perf_stat
.bytes_written
+= event
->header
.size
;
227 static int write_stat_round_event(u64 tm
, u64 type
)
229 return perf_event__synthesize_stat_round(NULL
, tm
, type
,
230 process_synthesized_event
,
234 #define WRITE_STAT_ROUND_EVENT(time, interval) \
235 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
237 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
240 perf_evsel__write_stat_event(struct perf_evsel
*counter
, u32 cpu
, u32 thread
,
241 struct perf_counts_values
*count
)
243 struct perf_sample_id
*sid
= SID(counter
, cpu
, thread
);
245 return perf_event__synthesize_stat(NULL
, cpu
, thread
, sid
->id
, count
,
246 process_synthesized_event
, NULL
);
250 * Read out the results of a single counter:
251 * do not aggregate counts across CPUs in system-wide mode
253 static int read_counter(struct perf_evsel
*counter
)
255 int nthreads
= thread_map__nr(evsel_list
->threads
);
256 int ncpus
, cpu
, thread
;
258 if (target__has_cpu(&target
) && !target__has_per_thread(&target
))
259 ncpus
= perf_evsel__nr_cpus(counter
);
263 if (!counter
->supported
)
266 if (counter
->system_wide
)
269 for (thread
= 0; thread
< nthreads
; thread
++) {
270 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
271 struct perf_counts_values
*count
;
273 count
= perf_counts(counter
->counts
, cpu
, thread
);
276 * The leader's group read loads data into its group members
277 * (via perf_evsel__read_counter) and sets threir count->loaded.
279 if (!count
->loaded
&&
280 perf_evsel__read_counter(counter
, cpu
, thread
)) {
281 counter
->counts
->scaled
= -1;
282 perf_counts(counter
->counts
, cpu
, thread
)->ena
= 0;
283 perf_counts(counter
->counts
, cpu
, thread
)->run
= 0;
287 count
->loaded
= false;
290 if (perf_evsel__write_stat_event(counter
, cpu
, thread
, count
)) {
291 pr_err("failed to write stat event\n");
297 fprintf(stat_config
.output
,
298 "%s: %d: %" PRIu64
" %" PRIu64
" %" PRIu64
"\n",
299 perf_evsel__name(counter
),
301 count
->val
, count
->ena
, count
->run
);
309 static void read_counters(void)
311 struct perf_evsel
*counter
;
314 evlist__for_each_entry(evsel_list
, counter
) {
315 ret
= read_counter(counter
);
317 pr_debug("failed to read counter %s\n", counter
->name
);
319 if (ret
== 0 && perf_stat_process_counter(&stat_config
, counter
))
320 pr_warning("failed to process counter %s\n", counter
->name
);
324 static void process_interval(void)
326 struct timespec ts
, rs
;
330 clock_gettime(CLOCK_MONOTONIC
, &ts
);
331 diff_timespec(&rs
, &ts
, &ref_time
);
334 if (WRITE_STAT_ROUND_EVENT(rs
.tv_sec
* NSEC_PER_SEC
+ rs
.tv_nsec
, INTERVAL
))
335 pr_err("failed to write stat round event\n");
338 init_stats(&walltime_nsecs_stats
);
339 update_stats(&walltime_nsecs_stats
, stat_config
.interval
* 1000000);
340 print_counters(&rs
, 0, NULL
);
343 static void enable_counters(void)
345 if (stat_config
.initial_delay
)
346 usleep(stat_config
.initial_delay
* USEC_PER_MSEC
);
349 * We need to enable counters only if:
350 * - we don't have tracee (attaching to task or cpu)
351 * - we have initial delay configured
353 if (!target__none(&target
) || stat_config
.initial_delay
)
354 perf_evlist__enable(evsel_list
);
357 static void disable_counters(void)
360 * If we don't have tracee (attaching to task or cpu), counters may
361 * still be running. To get accurate group ratios, we must stop groups
362 * from counting before reading their constituent counters.
364 if (!target__none(&target
))
365 perf_evlist__disable(evsel_list
);
368 static volatile int workload_exec_errno
;
371 * perf_evlist__prepare_workload will send a SIGUSR1
372 * if the fork fails, since we asked by setting its
373 * want_signal to true.
375 static void workload_exec_failed_signal(int signo __maybe_unused
, siginfo_t
*info
,
376 void *ucontext __maybe_unused
)
378 workload_exec_errno
= info
->si_value
.sival_int
;
381 static bool perf_evsel__should_store_id(struct perf_evsel
*counter
)
383 return STAT_RECORD
|| counter
->attr
.read_format
& PERF_FORMAT_ID
;
386 static struct perf_evsel
*perf_evsel__reset_weak_group(struct perf_evsel
*evsel
)
388 struct perf_evsel
*c2
, *leader
;
391 leader
= evsel
->leader
;
392 pr_debug("Weak group for %s/%d failed\n",
393 leader
->name
, leader
->nr_members
);
396 * for_each_group_member doesn't work here because it doesn't
397 * include the first entry.
399 evlist__for_each_entry(evsel_list
, c2
) {
402 if (c2
->leader
== leader
) {
404 perf_evsel__close(c2
);
412 static int __run_perf_stat(int argc
, const char **argv
, int run_idx
)
414 int interval
= stat_config
.interval
;
415 int times
= stat_config
.times
;
416 int timeout
= stat_config
.timeout
;
418 unsigned long long t0
, t1
;
419 struct perf_evsel
*counter
;
423 const bool forks
= (argc
> 0);
424 bool is_pipe
= STAT_RECORD
? perf_stat
.data
.is_pipe
: false;
425 struct perf_evsel_config_term
*err_term
;
428 ts
.tv_sec
= interval
/ USEC_PER_MSEC
;
429 ts
.tv_nsec
= (interval
% USEC_PER_MSEC
) * NSEC_PER_MSEC
;
430 } else if (timeout
) {
431 ts
.tv_sec
= timeout
/ USEC_PER_MSEC
;
432 ts
.tv_nsec
= (timeout
% USEC_PER_MSEC
) * NSEC_PER_MSEC
;
439 if (perf_evlist__prepare_workload(evsel_list
, &target
, argv
, is_pipe
,
440 workload_exec_failed_signal
) < 0) {
441 perror("failed to prepare workload");
444 child_pid
= evsel_list
->workload
.pid
;
448 perf_evlist__set_leader(evsel_list
);
450 evlist__for_each_entry(evsel_list
, counter
) {
452 if (create_perf_stat_counter(counter
, &stat_config
, &target
) < 0) {
454 /* Weak group failed. Reset the group. */
455 if ((errno
== EINVAL
|| errno
== EBADF
) &&
456 counter
->leader
!= counter
&&
457 counter
->weak_group
) {
458 counter
= perf_evsel__reset_weak_group(counter
);
463 * PPC returns ENXIO for HW counters until 2.6.37
464 * (behavior changed with commit b0a873e).
466 if (errno
== EINVAL
|| errno
== ENOSYS
||
467 errno
== ENOENT
|| errno
== EOPNOTSUPP
||
470 ui__warning("%s event is not supported by the kernel.\n",
471 perf_evsel__name(counter
));
472 counter
->supported
= false;
474 if ((counter
->leader
!= counter
) ||
475 !(counter
->leader
->nr_members
> 1))
477 } else if (perf_evsel__fallback(counter
, errno
, msg
, sizeof(msg
))) {
479 ui__warning("%s\n", msg
);
481 } else if (target__has_per_thread(&target
) &&
482 evsel_list
->threads
&&
483 evsel_list
->threads
->err_thread
!= -1) {
485 * For global --per-thread case, skip current
488 if (!thread_map__remove(evsel_list
->threads
,
489 evsel_list
->threads
->err_thread
)) {
490 evsel_list
->threads
->err_thread
= -1;
495 perf_evsel__open_strerror(counter
, &target
,
496 errno
, msg
, sizeof(msg
));
497 ui__error("%s\n", msg
);
500 kill(child_pid
, SIGTERM
);
504 counter
->supported
= true;
506 l
= strlen(counter
->unit
);
507 if (l
> stat_config
.unit_width
)
508 stat_config
.unit_width
= l
;
510 if (perf_evsel__should_store_id(counter
) &&
511 perf_evsel__store_ids(counter
, evsel_list
))
515 if (perf_evlist__apply_filters(evsel_list
, &counter
)) {
516 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
517 counter
->filter
, perf_evsel__name(counter
), errno
,
518 str_error_r(errno
, msg
, sizeof(msg
)));
522 if (perf_evlist__apply_drv_configs(evsel_list
, &counter
, &err_term
)) {
523 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
524 err_term
->val
.drv_cfg
, perf_evsel__name(counter
), errno
,
525 str_error_r(errno
, msg
, sizeof(msg
)));
530 int err
, fd
= perf_data__fd(&perf_stat
.data
);
533 err
= perf_header__write_pipe(perf_data__fd(&perf_stat
.data
));
535 err
= perf_session__write_header(perf_stat
.session
, evsel_list
,
542 err
= perf_stat_synthesize_config(&stat_config
, NULL
, evsel_list
,
543 process_synthesized_event
, is_pipe
);
549 * Enable counters and exec the command:
552 clock_gettime(CLOCK_MONOTONIC
, &ref_time
);
555 perf_evlist__start_workload(evsel_list
);
558 if (interval
|| timeout
) {
559 while (!waitpid(child_pid
, &status
, WNOHANG
)) {
560 nanosleep(&ts
, NULL
);
564 if (interval_count
&& !(--times
))
568 wait4(child_pid
, &status
, 0, &stat_config
.ru_data
);
570 if (workload_exec_errno
) {
571 const char *emsg
= str_error_r(workload_exec_errno
, msg
, sizeof(msg
));
572 pr_err("Workload failed: %s\n", emsg
);
576 if (WIFSIGNALED(status
))
577 psignal(WTERMSIG(status
), argv
[0]);
581 nanosleep(&ts
, NULL
);
586 if (interval_count
&& !(--times
))
596 if (stat_config
.walltime_run_table
)
597 stat_config
.walltime_run
[run_idx
] = t1
- t0
;
599 update_stats(&walltime_nsecs_stats
, t1
- t0
);
602 * Closing a group leader splits the group, and as we only disable
603 * group leaders, results in remaining events becoming enabled. To
604 * avoid arbitrary skew, we must read all counters before closing any
608 perf_evlist__close(evsel_list
);
610 return WEXITSTATUS(status
);
613 static int run_perf_stat(int argc
, const char **argv
, int run_idx
)
618 ret
= system(pre_cmd
);
626 ret
= __run_perf_stat(argc
, argv
, run_idx
);
631 ret
= system(post_cmd
);
639 static void print_counters(struct timespec
*ts
, int argc
, const char **argv
)
641 /* Do not print anything if we record to the pipe. */
642 if (STAT_RECORD
&& perf_stat
.data
.is_pipe
)
645 perf_evlist__print_counters(evsel_list
, &stat_config
, &target
,
649 static volatile int signr
= -1;
651 static void skip_signal(int signo
)
653 if ((child_pid
== -1) || stat_config
.interval
)
658 * render child_pid harmless
659 * won't send SIGTERM to a random
660 * process in case of race condition
661 * and fast PID recycling
666 static void sig_atexit(void)
671 * avoid race condition with SIGCHLD handler
672 * in skip_signal() which is modifying child_pid
673 * goal is to avoid send SIGTERM to a random
677 sigaddset(&set
, SIGCHLD
);
678 sigprocmask(SIG_BLOCK
, &set
, &oset
);
681 kill(child_pid
, SIGTERM
);
683 sigprocmask(SIG_SETMASK
, &oset
, NULL
);
688 signal(signr
, SIG_DFL
);
689 kill(getpid(), signr
);
692 static int stat__set_big_num(const struct option
*opt __maybe_unused
,
693 const char *s __maybe_unused
, int unset
)
695 big_num_opt
= unset
? 0 : 1;
699 static int enable_metric_only(const struct option
*opt __maybe_unused
,
700 const char *s __maybe_unused
, int unset
)
702 force_metric_only
= true;
703 stat_config
.metric_only
= !unset
;
707 static int parse_metric_groups(const struct option
*opt
,
709 int unset __maybe_unused
)
711 return metricgroup__parse_groups(opt
, str
, &stat_config
.metric_events
);
714 static const struct option stat_options
[] = {
715 OPT_BOOLEAN('T', "transaction", &transaction_run
,
716 "hardware transaction statistics"),
717 OPT_CALLBACK('e', "event", &evsel_list
, "event",
718 "event selector. use 'perf list' to list available events",
719 parse_events_option
),
720 OPT_CALLBACK(0, "filter", &evsel_list
, "filter",
721 "event filter", parse_filter
),
722 OPT_BOOLEAN('i', "no-inherit", &stat_config
.no_inherit
,
723 "child tasks do not inherit counters"),
724 OPT_STRING('p', "pid", &target
.pid
, "pid",
725 "stat events on existing process id"),
726 OPT_STRING('t', "tid", &target
.tid
, "tid",
727 "stat events on existing thread id"),
728 OPT_BOOLEAN('a', "all-cpus", &target
.system_wide
,
729 "system-wide collection from all CPUs"),
730 OPT_BOOLEAN('g', "group", &group
,
731 "put the counters into a counter group"),
732 OPT_BOOLEAN('c', "scale", &stat_config
.scale
, "scale/normalize counters"),
733 OPT_INCR('v', "verbose", &verbose
,
734 "be more verbose (show counter open errors, etc)"),
735 OPT_INTEGER('r', "repeat", &stat_config
.run_count
,
736 "repeat command and print average + stddev (max: 100, forever: 0)"),
737 OPT_BOOLEAN(0, "table", &stat_config
.walltime_run_table
,
738 "display details about each run (only with -r option)"),
739 OPT_BOOLEAN('n', "null", &stat_config
.null_run
,
740 "null run - dont start any counters"),
741 OPT_INCR('d', "detailed", &detailed_run
,
742 "detailed run - start a lot of events"),
743 OPT_BOOLEAN('S', "sync", &sync_run
,
744 "call sync() before starting a run"),
745 OPT_CALLBACK_NOOPT('B', "big-num", NULL
, NULL
,
746 "print large numbers with thousands\' separators",
748 OPT_STRING('C', "cpu", &target
.cpu_list
, "cpu",
749 "list of cpus to monitor in system-wide"),
750 OPT_SET_UINT('A', "no-aggr", &stat_config
.aggr_mode
,
751 "disable CPU count aggregation", AGGR_NONE
),
752 OPT_BOOLEAN(0, "no-merge", &stat_config
.no_merge
, "Do not merge identical named events"),
753 OPT_STRING('x', "field-separator", &stat_config
.csv_sep
, "separator",
754 "print counts with custom separator"),
755 OPT_CALLBACK('G', "cgroup", &evsel_list
, "name",
756 "monitor event in cgroup name only", parse_cgroups
),
757 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
758 OPT_BOOLEAN(0, "append", &append_file
, "append to the output file"),
759 OPT_INTEGER(0, "log-fd", &output_fd
,
760 "log output to fd, instead of stderr"),
761 OPT_STRING(0, "pre", &pre_cmd
, "command",
762 "command to run prior to the measured command"),
763 OPT_STRING(0, "post", &post_cmd
, "command",
764 "command to run after to the measured command"),
765 OPT_UINTEGER('I', "interval-print", &stat_config
.interval
,
766 "print counts at regular interval in ms "
767 "(overhead is possible for values <= 100ms)"),
768 OPT_INTEGER(0, "interval-count", &stat_config
.times
,
769 "print counts for fixed number of times"),
770 OPT_BOOLEAN(0, "interval-clear", &stat_config
.interval_clear
,
771 "clear screen in between new interval"),
772 OPT_UINTEGER(0, "timeout", &stat_config
.timeout
,
773 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
774 OPT_SET_UINT(0, "per-socket", &stat_config
.aggr_mode
,
775 "aggregate counts per processor socket", AGGR_SOCKET
),
776 OPT_SET_UINT(0, "per-core", &stat_config
.aggr_mode
,
777 "aggregate counts per physical processor core", AGGR_CORE
),
778 OPT_SET_UINT(0, "per-thread", &stat_config
.aggr_mode
,
779 "aggregate counts per thread", AGGR_THREAD
),
780 OPT_UINTEGER('D', "delay", &stat_config
.initial_delay
,
781 "ms to wait before starting measurement after program start"),
782 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config
.metric_only
, NULL
,
783 "Only print computed metrics. No raw values", enable_metric_only
),
784 OPT_BOOLEAN(0, "topdown", &topdown_run
,
785 "measure topdown level 1 statistics"),
786 OPT_BOOLEAN(0, "smi-cost", &smi_cost
,
788 OPT_CALLBACK('M', "metrics", &evsel_list
, "metric/metric group list",
789 "monitor specified metrics or metric groups (separated by ,)",
790 parse_metric_groups
),
794 static int perf_stat__get_socket(struct perf_stat_config
*config __maybe_unused
,
795 struct cpu_map
*map
, int cpu
)
797 return cpu_map__get_socket(map
, cpu
, NULL
);
800 static int perf_stat__get_core(struct perf_stat_config
*config __maybe_unused
,
801 struct cpu_map
*map
, int cpu
)
803 return cpu_map__get_core(map
, cpu
, NULL
);
806 static int cpu_map__get_max(struct cpu_map
*map
)
810 for (i
= 0; i
< map
->nr
; i
++) {
811 if (map
->map
[i
] > max
)
818 static int perf_stat__get_aggr(struct perf_stat_config
*config
,
819 aggr_get_id_t get_id
, struct cpu_map
*map
, int idx
)
828 if (config
->cpus_aggr_map
->map
[cpu
] == -1)
829 config
->cpus_aggr_map
->map
[cpu
] = get_id(config
, map
, idx
);
831 return config
->cpus_aggr_map
->map
[cpu
];
834 static int perf_stat__get_socket_cached(struct perf_stat_config
*config
,
835 struct cpu_map
*map
, int idx
)
837 return perf_stat__get_aggr(config
, perf_stat__get_socket
, map
, idx
);
840 static int perf_stat__get_core_cached(struct perf_stat_config
*config
,
841 struct cpu_map
*map
, int idx
)
843 return perf_stat__get_aggr(config
, perf_stat__get_core
, map
, idx
);
846 static int perf_stat_init_aggr_mode(void)
850 switch (stat_config
.aggr_mode
) {
852 if (cpu_map__build_socket_map(evsel_list
->cpus
, &stat_config
.aggr_map
)) {
853 perror("cannot build socket map");
856 stat_config
.aggr_get_id
= perf_stat__get_socket_cached
;
859 if (cpu_map__build_core_map(evsel_list
->cpus
, &stat_config
.aggr_map
)) {
860 perror("cannot build core map");
863 stat_config
.aggr_get_id
= perf_stat__get_core_cached
;
874 * The evsel_list->cpus is the base we operate on,
875 * taking the highest cpu number to be the size of
876 * the aggregation translate cpumap.
878 nr
= cpu_map__get_max(evsel_list
->cpus
);
879 stat_config
.cpus_aggr_map
= cpu_map__empty_new(nr
+ 1);
880 return stat_config
.cpus_aggr_map
? 0 : -ENOMEM
;
883 static void perf_stat__exit_aggr_mode(void)
885 cpu_map__put(stat_config
.aggr_map
);
886 cpu_map__put(stat_config
.cpus_aggr_map
);
887 stat_config
.aggr_map
= NULL
;
888 stat_config
.cpus_aggr_map
= NULL
;
891 static inline int perf_env__get_cpu(struct perf_env
*env
, struct cpu_map
*map
, int idx
)
900 if (cpu
>= env
->nr_cpus_avail
)
906 static int perf_env__get_socket(struct cpu_map
*map
, int idx
, void *data
)
908 struct perf_env
*env
= data
;
909 int cpu
= perf_env__get_cpu(env
, map
, idx
);
911 return cpu
== -1 ? -1 : env
->cpu
[cpu
].socket_id
;
914 static int perf_env__get_core(struct cpu_map
*map
, int idx
, void *data
)
916 struct perf_env
*env
= data
;
917 int core
= -1, cpu
= perf_env__get_cpu(env
, map
, idx
);
920 int socket_id
= env
->cpu
[cpu
].socket_id
;
923 * Encode socket in upper 16 bits
924 * core_id is relative to socket, and
925 * we need a global id. So we combine
928 core
= (socket_id
<< 16) | (env
->cpu
[cpu
].core_id
& 0xffff);
934 static int perf_env__build_socket_map(struct perf_env
*env
, struct cpu_map
*cpus
,
935 struct cpu_map
**sockp
)
937 return cpu_map__build_map(cpus
, sockp
, perf_env__get_socket
, env
);
940 static int perf_env__build_core_map(struct perf_env
*env
, struct cpu_map
*cpus
,
941 struct cpu_map
**corep
)
943 return cpu_map__build_map(cpus
, corep
, perf_env__get_core
, env
);
946 static int perf_stat__get_socket_file(struct perf_stat_config
*config __maybe_unused
,
947 struct cpu_map
*map
, int idx
)
949 return perf_env__get_socket(map
, idx
, &perf_stat
.session
->header
.env
);
952 static int perf_stat__get_core_file(struct perf_stat_config
*config __maybe_unused
,
953 struct cpu_map
*map
, int idx
)
955 return perf_env__get_core(map
, idx
, &perf_stat
.session
->header
.env
);
958 static int perf_stat_init_aggr_mode_file(struct perf_stat
*st
)
960 struct perf_env
*env
= &st
->session
->header
.env
;
962 switch (stat_config
.aggr_mode
) {
964 if (perf_env__build_socket_map(env
, evsel_list
->cpus
, &stat_config
.aggr_map
)) {
965 perror("cannot build socket map");
968 stat_config
.aggr_get_id
= perf_stat__get_socket_file
;
971 if (perf_env__build_core_map(env
, evsel_list
->cpus
, &stat_config
.aggr_map
)) {
972 perror("cannot build core map");
975 stat_config
.aggr_get_id
= perf_stat__get_core_file
;
988 static int topdown_filter_events(const char **attr
, char **str
, bool use_group
)
995 for (i
= 0; attr
[i
]; i
++) {
996 if (pmu_have_event("cpu", attr
[i
])) {
997 len
+= strlen(attr
[i
]) + 1;
998 attr
[i
- off
] = attr
[i
];
1002 attr
[i
- off
] = NULL
;
1004 *str
= malloc(len
+ 1 + 2);
1014 for (i
= 0; attr
[i
]; i
++) {
1027 __weak
bool arch_topdown_check_group(bool *warn
)
1033 __weak
void arch_topdown_group_warn(void)
1038 * Add default attributes, if there were no attributes specified or
1039 * if -d/--detailed, -d -d or -d -d -d is used:
1041 static int add_default_attributes(void)
1044 struct perf_event_attr default_attrs0
[] = {
1046 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_TASK_CLOCK
},
1047 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CONTEXT_SWITCHES
},
1048 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_CPU_MIGRATIONS
},
1049 { .type
= PERF_TYPE_SOFTWARE
, .config
= PERF_COUNT_SW_PAGE_FAULTS
},
1051 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_CPU_CYCLES
},
1053 struct perf_event_attr frontend_attrs
[] = {
1054 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
1056 struct perf_event_attr backend_attrs
[] = {
1057 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
1059 struct perf_event_attr default_attrs1
[] = {
1060 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_INSTRUCTIONS
},
1061 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
1062 { .type
= PERF_TYPE_HARDWARE
, .config
= PERF_COUNT_HW_BRANCH_MISSES
},
1067 * Detailed stats (-d), covering the L1 and last level data caches:
1069 struct perf_event_attr detailed_attrs
[] = {
1071 { .type
= PERF_TYPE_HW_CACHE
,
1073 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1074 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1075 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1077 { .type
= PERF_TYPE_HW_CACHE
,
1079 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1080 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1081 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1083 { .type
= PERF_TYPE_HW_CACHE
,
1085 PERF_COUNT_HW_CACHE_LL
<< 0 |
1086 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1087 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1089 { .type
= PERF_TYPE_HW_CACHE
,
1091 PERF_COUNT_HW_CACHE_LL
<< 0 |
1092 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1093 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1097 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1099 struct perf_event_attr very_detailed_attrs
[] = {
1101 { .type
= PERF_TYPE_HW_CACHE
,
1103 PERF_COUNT_HW_CACHE_L1I
<< 0 |
1104 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1105 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1107 { .type
= PERF_TYPE_HW_CACHE
,
1109 PERF_COUNT_HW_CACHE_L1I
<< 0 |
1110 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1111 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1113 { .type
= PERF_TYPE_HW_CACHE
,
1115 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
1116 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1117 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1119 { .type
= PERF_TYPE_HW_CACHE
,
1121 PERF_COUNT_HW_CACHE_DTLB
<< 0 |
1122 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1123 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1125 { .type
= PERF_TYPE_HW_CACHE
,
1127 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
1128 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1129 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1131 { .type
= PERF_TYPE_HW_CACHE
,
1133 PERF_COUNT_HW_CACHE_ITLB
<< 0 |
1134 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1135 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1140 * Very, very detailed stats (-d -d -d), adding prefetch events:
1142 struct perf_event_attr very_very_detailed_attrs
[] = {
1144 { .type
= PERF_TYPE_HW_CACHE
,
1146 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1147 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
1148 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16) },
1150 { .type
= PERF_TYPE_HW_CACHE
,
1152 PERF_COUNT_HW_CACHE_L1D
<< 0 |
1153 (PERF_COUNT_HW_CACHE_OP_PREFETCH
<< 8) |
1154 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16) },
1156 struct parse_events_error errinfo
;
1158 /* Set attrs if no event is selected and !null_run: */
1159 if (stat_config
.null_run
)
1162 if (transaction_run
) {
1163 /* Handle -T as -M transaction. Once platform specific metrics
1164 * support has been added to the json files, all archictures
1165 * will use this approach. To determine transaction support
1166 * on an architecture test for such a metric name.
1168 if (metricgroup__has_metric("transaction")) {
1169 struct option opt
= { .value
= &evsel_list
};
1171 return metricgroup__parse_groups(&opt
, "transaction",
1172 &stat_config
.metric_events
);
1175 if (pmu_have_event("cpu", "cycles-ct") &&
1176 pmu_have_event("cpu", "el-start"))
1177 err
= parse_events(evsel_list
, transaction_attrs
,
1180 err
= parse_events(evsel_list
,
1181 transaction_limited_attrs
,
1184 fprintf(stderr
, "Cannot set up transaction events\n");
1185 parse_events_print_error(&errinfo
, transaction_attrs
);
1194 if (sysfs__read_int(FREEZE_ON_SMI_PATH
, &smi
) < 0) {
1195 fprintf(stderr
, "freeze_on_smi is not supported.\n");
1200 if (sysfs__write_int(FREEZE_ON_SMI_PATH
, 1) < 0) {
1201 fprintf(stderr
, "Failed to set freeze_on_smi.\n");
1207 if (pmu_have_event("msr", "aperf") &&
1208 pmu_have_event("msr", "smi")) {
1209 if (!force_metric_only
)
1210 stat_config
.metric_only
= true;
1211 err
= parse_events(evsel_list
, smi_cost_attrs
, &errinfo
);
1213 fprintf(stderr
, "To measure SMI cost, it needs "
1214 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
1215 parse_events_print_error(&errinfo
, smi_cost_attrs
);
1219 fprintf(stderr
, "Cannot set up SMI cost events\n");
1229 if (stat_config
.aggr_mode
!= AGGR_GLOBAL
&&
1230 stat_config
.aggr_mode
!= AGGR_CORE
) {
1231 pr_err("top down event configuration requires --per-core mode\n");
1234 stat_config
.aggr_mode
= AGGR_CORE
;
1235 if (nr_cgroups
|| !target__has_cpu(&target
)) {
1236 pr_err("top down event configuration requires system-wide mode (-a)\n");
1240 if (!force_metric_only
)
1241 stat_config
.metric_only
= true;
1242 if (topdown_filter_events(topdown_attrs
, &str
,
1243 arch_topdown_check_group(&warn
)) < 0) {
1244 pr_err("Out of memory\n");
1247 if (topdown_attrs
[0] && str
) {
1249 arch_topdown_group_warn();
1250 err
= parse_events(evsel_list
, str
, &errinfo
);
1253 "Cannot set up top down events %s: %d\n",
1256 parse_events_print_error(&errinfo
, str
);
1260 fprintf(stderr
, "System does not support topdown\n");
1266 if (!evsel_list
->nr_entries
) {
1267 if (target__has_cpu(&target
))
1268 default_attrs0
[0].config
= PERF_COUNT_SW_CPU_CLOCK
;
1270 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs0
) < 0)
1272 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
1273 if (perf_evlist__add_default_attrs(evsel_list
,
1274 frontend_attrs
) < 0)
1277 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
1278 if (perf_evlist__add_default_attrs(evsel_list
,
1282 if (perf_evlist__add_default_attrs(evsel_list
, default_attrs1
) < 0)
1286 /* Detailed events get appended to the event list: */
1288 if (detailed_run
< 1)
1291 /* Append detailed run extra attributes: */
1292 if (perf_evlist__add_default_attrs(evsel_list
, detailed_attrs
) < 0)
1295 if (detailed_run
< 2)
1298 /* Append very detailed run extra attributes: */
1299 if (perf_evlist__add_default_attrs(evsel_list
, very_detailed_attrs
) < 0)
1302 if (detailed_run
< 3)
1305 /* Append very, very detailed run extra attributes: */
1306 return perf_evlist__add_default_attrs(evsel_list
, very_very_detailed_attrs
);
1309 static const char * const stat_record_usage
[] = {
1310 "perf stat record [<options>]",
1314 static void init_features(struct perf_session
*session
)
1318 for (feat
= HEADER_FIRST_FEATURE
; feat
< HEADER_LAST_FEATURE
; feat
++)
1319 perf_header__set_feat(&session
->header
, feat
);
1321 perf_header__clear_feat(&session
->header
, HEADER_BUILD_ID
);
1322 perf_header__clear_feat(&session
->header
, HEADER_TRACING_DATA
);
1323 perf_header__clear_feat(&session
->header
, HEADER_BRANCH_STACK
);
1324 perf_header__clear_feat(&session
->header
, HEADER_AUXTRACE
);
1327 static int __cmd_record(int argc
, const char **argv
)
1329 struct perf_session
*session
;
1330 struct perf_data
*data
= &perf_stat
.data
;
1332 argc
= parse_options(argc
, argv
, stat_options
, stat_record_usage
,
1333 PARSE_OPT_STOP_AT_NON_OPTION
);
1336 data
->file
.path
= output_name
;
1338 if (stat_config
.run_count
!= 1 || forever
) {
1339 pr_err("Cannot use -r option with perf stat record.\n");
1343 session
= perf_session__new(data
, false, NULL
);
1344 if (session
== NULL
) {
1345 pr_err("Perf session creation failed.\n");
1349 init_features(session
);
1351 session
->evlist
= evsel_list
;
1352 perf_stat
.session
= session
;
1353 perf_stat
.record
= true;
1357 static int process_stat_round_event(struct perf_session
*session
,
1358 union perf_event
*event
)
1360 struct stat_round_event
*stat_round
= &event
->stat_round
;
1361 struct perf_evsel
*counter
;
1362 struct timespec tsh
, *ts
= NULL
;
1363 const char **argv
= session
->header
.env
.cmdline_argv
;
1364 int argc
= session
->header
.env
.nr_cmdline
;
1366 evlist__for_each_entry(evsel_list
, counter
)
1367 perf_stat_process_counter(&stat_config
, counter
);
1369 if (stat_round
->type
== PERF_STAT_ROUND_TYPE__FINAL
)
1370 update_stats(&walltime_nsecs_stats
, stat_round
->time
);
1372 if (stat_config
.interval
&& stat_round
->time
) {
1373 tsh
.tv_sec
= stat_round
->time
/ NSEC_PER_SEC
;
1374 tsh
.tv_nsec
= stat_round
->time
% NSEC_PER_SEC
;
1378 print_counters(ts
, argc
, argv
);
1383 int process_stat_config_event(struct perf_session
*session
,
1384 union perf_event
*event
)
1386 struct perf_tool
*tool
= session
->tool
;
1387 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
1389 perf_event__read_stat_config(&stat_config
, &event
->stat_config
);
1391 if (cpu_map__empty(st
->cpus
)) {
1392 if (st
->aggr_mode
!= AGGR_UNSET
)
1393 pr_warning("warning: processing task data, aggregation mode not set\n");
1397 if (st
->aggr_mode
!= AGGR_UNSET
)
1398 stat_config
.aggr_mode
= st
->aggr_mode
;
1400 if (perf_stat
.data
.is_pipe
)
1401 perf_stat_init_aggr_mode();
1403 perf_stat_init_aggr_mode_file(st
);
1408 static int set_maps(struct perf_stat
*st
)
1410 if (!st
->cpus
|| !st
->threads
)
1413 if (WARN_ONCE(st
->maps_allocated
, "stats double allocation\n"))
1416 perf_evlist__set_maps(evsel_list
, st
->cpus
, st
->threads
);
1418 if (perf_evlist__alloc_stats(evsel_list
, true))
1421 st
->maps_allocated
= true;
1426 int process_thread_map_event(struct perf_session
*session
,
1427 union perf_event
*event
)
1429 struct perf_tool
*tool
= session
->tool
;
1430 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
1433 pr_warning("Extra thread map event, ignoring.\n");
1437 st
->threads
= thread_map__new_event(&event
->thread_map
);
1441 return set_maps(st
);
1445 int process_cpu_map_event(struct perf_session
*session
,
1446 union perf_event
*event
)
1448 struct perf_tool
*tool
= session
->tool
;
1449 struct perf_stat
*st
= container_of(tool
, struct perf_stat
, tool
);
1450 struct cpu_map
*cpus
;
1453 pr_warning("Extra cpu map event, ignoring.\n");
1457 cpus
= cpu_map__new_data(&event
->cpu_map
.data
);
1462 return set_maps(st
);
1465 static int runtime_stat_new(struct perf_stat_config
*config
, int nthreads
)
1469 config
->stats
= calloc(nthreads
, sizeof(struct runtime_stat
));
1473 config
->stats_num
= nthreads
;
1475 for (i
= 0; i
< nthreads
; i
++)
1476 runtime_stat__init(&config
->stats
[i
]);
1481 static void runtime_stat_delete(struct perf_stat_config
*config
)
1488 for (i
= 0; i
< config
->stats_num
; i
++)
1489 runtime_stat__exit(&config
->stats
[i
]);
1491 free(config
->stats
);
1494 static const char * const stat_report_usage
[] = {
1495 "perf stat report [<options>]",
1499 static struct perf_stat perf_stat
= {
1501 .attr
= perf_event__process_attr
,
1502 .event_update
= perf_event__process_event_update
,
1503 .thread_map
= process_thread_map_event
,
1504 .cpu_map
= process_cpu_map_event
,
1505 .stat_config
= process_stat_config_event
,
1506 .stat
= perf_event__process_stat_event
,
1507 .stat_round
= process_stat_round_event
,
1509 .aggr_mode
= AGGR_UNSET
,
1512 static int __cmd_report(int argc
, const char **argv
)
1514 struct perf_session
*session
;
1515 const struct option options
[] = {
1516 OPT_STRING('i', "input", &input_name
, "file", "input file name"),
1517 OPT_SET_UINT(0, "per-socket", &perf_stat
.aggr_mode
,
1518 "aggregate counts per processor socket", AGGR_SOCKET
),
1519 OPT_SET_UINT(0, "per-core", &perf_stat
.aggr_mode
,
1520 "aggregate counts per physical processor core", AGGR_CORE
),
1521 OPT_SET_UINT('A', "no-aggr", &perf_stat
.aggr_mode
,
1522 "disable CPU count aggregation", AGGR_NONE
),
1528 argc
= parse_options(argc
, argv
, options
, stat_report_usage
, 0);
1530 if (!input_name
|| !strlen(input_name
)) {
1531 if (!fstat(STDIN_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
1534 input_name
= "perf.data";
1537 perf_stat
.data
.file
.path
= input_name
;
1538 perf_stat
.data
.mode
= PERF_DATA_MODE_READ
;
1540 session
= perf_session__new(&perf_stat
.data
, false, &perf_stat
.tool
);
1541 if (session
== NULL
)
1544 perf_stat
.session
= session
;
1545 stat_config
.output
= stderr
;
1546 evsel_list
= session
->evlist
;
1548 ret
= perf_session__process_events(session
);
1552 perf_session__delete(session
);
1556 static void setup_system_wide(int forks
)
1559 * Make system wide (-a) the default target if
1560 * no target was specified and one of following
1561 * conditions is met:
1563 * - there's no workload specified
1564 * - there is workload specified but all requested
1565 * events are system wide events
1567 if (!target__none(&target
))
1571 target
.system_wide
= true;
1573 struct perf_evsel
*counter
;
1575 evlist__for_each_entry(evsel_list
, counter
) {
1576 if (!counter
->system_wide
)
1580 if (evsel_list
->nr_entries
)
1581 target
.system_wide
= true;
1585 int cmd_stat(int argc
, const char **argv
)
1587 const char * const stat_usage
[] = {
1588 "perf stat [<options>] [<command>]",
1591 int status
= -EINVAL
, run_idx
;
1593 FILE *output
= stderr
;
1594 unsigned int interval
, timeout
;
1595 const char * const stat_subcommands
[] = { "record", "report" };
1597 setlocale(LC_ALL
, "");
1599 evsel_list
= perf_evlist__new();
1600 if (evsel_list
== NULL
)
1603 parse_events__shrink_config_terms();
1604 argc
= parse_options_subcommand(argc
, argv
, stat_options
, stat_subcommands
,
1605 (const char **) stat_usage
,
1606 PARSE_OPT_STOP_AT_NON_OPTION
);
1607 perf_stat__collect_metric_expr(evsel_list
);
1608 perf_stat__init_shadow_stats();
1610 if (stat_config
.csv_sep
) {
1611 stat_config
.csv_output
= true;
1612 if (!strcmp(stat_config
.csv_sep
, "\\t"))
1613 stat_config
.csv_sep
= "\t";
1615 stat_config
.csv_sep
= DEFAULT_SEPARATOR
;
1617 if (argc
&& !strncmp(argv
[0], "rec", 3)) {
1618 argc
= __cmd_record(argc
, argv
);
1621 } else if (argc
&& !strncmp(argv
[0], "rep", 3))
1622 return __cmd_report(argc
, argv
);
1624 interval
= stat_config
.interval
;
1625 timeout
= stat_config
.timeout
;
1628 * For record command the -o is already taken care of.
1630 if (!STAT_RECORD
&& output_name
&& strcmp(output_name
, "-"))
1633 if (output_name
&& output_fd
) {
1634 fprintf(stderr
, "cannot use both --output and --log-fd\n");
1635 parse_options_usage(stat_usage
, stat_options
, "o", 1);
1636 parse_options_usage(NULL
, stat_options
, "log-fd", 0);
1640 if (stat_config
.metric_only
&& stat_config
.aggr_mode
== AGGR_THREAD
) {
1641 fprintf(stderr
, "--metric-only is not supported with --per-thread\n");
1645 if (stat_config
.metric_only
&& stat_config
.run_count
> 1) {
1646 fprintf(stderr
, "--metric-only is not supported with -r\n");
1650 if (stat_config
.walltime_run_table
&& stat_config
.run_count
<= 1) {
1651 fprintf(stderr
, "--table is only supported with -r\n");
1652 parse_options_usage(stat_usage
, stat_options
, "r", 1);
1653 parse_options_usage(NULL
, stat_options
, "table", 0);
1657 if (output_fd
< 0) {
1658 fprintf(stderr
, "argument to --log-fd must be a > 0\n");
1659 parse_options_usage(stat_usage
, stat_options
, "log-fd", 0);
1665 mode
= append_file
? "a" : "w";
1667 output
= fopen(output_name
, mode
);
1669 perror("failed to create output file");
1672 clock_gettime(CLOCK_REALTIME
, &tm
);
1673 fprintf(output
, "# started on %s\n", ctime(&tm
.tv_sec
));
1674 } else if (output_fd
> 0) {
1675 mode
= append_file
? "a" : "w";
1676 output
= fdopen(output_fd
, mode
);
1678 perror("Failed opening logfd");
1683 stat_config
.output
= output
;
1686 * let the spreadsheet do the pretty-printing
1688 if (stat_config
.csv_output
) {
1689 /* User explicitly passed -B? */
1690 if (big_num_opt
== 1) {
1691 fprintf(stderr
, "-B option not supported with -x\n");
1692 parse_options_usage(stat_usage
, stat_options
, "B", 1);
1693 parse_options_usage(NULL
, stat_options
, "x", 1);
1695 } else /* Nope, so disable big number formatting */
1696 stat_config
.big_num
= false;
1697 } else if (big_num_opt
== 0) /* User passed --no-big-num */
1698 stat_config
.big_num
= false;
1700 setup_system_wide(argc
);
1703 * Display user/system times only for single
1704 * run and when there's specified tracee.
1706 if ((stat_config
.run_count
== 1) && target__none(&target
))
1707 stat_config
.ru_display
= true;
1709 if (stat_config
.run_count
< 0) {
1710 pr_err("Run count must be a positive number\n");
1711 parse_options_usage(stat_usage
, stat_options
, "r", 1);
1713 } else if (stat_config
.run_count
== 0) {
1715 stat_config
.run_count
= 1;
1718 if (stat_config
.walltime_run_table
) {
1719 stat_config
.walltime_run
= zalloc(stat_config
.run_count
* sizeof(stat_config
.walltime_run
[0]));
1720 if (!stat_config
.walltime_run
) {
1721 pr_err("failed to setup -r option");
1726 if ((stat_config
.aggr_mode
== AGGR_THREAD
) &&
1727 !target__has_task(&target
)) {
1728 if (!target
.system_wide
|| target
.cpu_list
) {
1729 fprintf(stderr
, "The --per-thread option is only "
1730 "available when monitoring via -p -t -a "
1731 "options or only --per-thread.\n");
1732 parse_options_usage(NULL
, stat_options
, "p", 1);
1733 parse_options_usage(NULL
, stat_options
, "t", 1);
1739 * no_aggr, cgroup are for system-wide only
1740 * --per-thread is aggregated per thread, we dont mix it with cpu mode
1742 if (((stat_config
.aggr_mode
!= AGGR_GLOBAL
&&
1743 stat_config
.aggr_mode
!= AGGR_THREAD
) || nr_cgroups
) &&
1744 !target__has_cpu(&target
)) {
1745 fprintf(stderr
, "both cgroup and no-aggregation "
1746 "modes only available in system-wide mode\n");
1748 parse_options_usage(stat_usage
, stat_options
, "G", 1);
1749 parse_options_usage(NULL
, stat_options
, "A", 1);
1750 parse_options_usage(NULL
, stat_options
, "a", 1);
1754 if (add_default_attributes())
1757 target__validate(&target
);
1759 if ((stat_config
.aggr_mode
== AGGR_THREAD
) && (target
.system_wide
))
1760 target
.per_thread
= true;
1762 if (perf_evlist__create_maps(evsel_list
, &target
) < 0) {
1763 if (target__has_task(&target
)) {
1764 pr_err("Problems finding threads of monitor\n");
1765 parse_options_usage(stat_usage
, stat_options
, "p", 1);
1766 parse_options_usage(NULL
, stat_options
, "t", 1);
1767 } else if (target__has_cpu(&target
)) {
1768 perror("failed to parse CPUs map");
1769 parse_options_usage(stat_usage
, stat_options
, "C", 1);
1770 parse_options_usage(NULL
, stat_options
, "a", 1);
1776 * Initialize thread_map with comm names,
1777 * so we could print it out on output.
1779 if (stat_config
.aggr_mode
== AGGR_THREAD
) {
1780 thread_map__read_comms(evsel_list
->threads
);
1781 if (target
.system_wide
) {
1782 if (runtime_stat_new(&stat_config
,
1783 thread_map__nr(evsel_list
->threads
))) {
1789 if (stat_config
.times
&& interval
)
1790 interval_count
= true;
1791 else if (stat_config
.times
&& !interval
) {
1792 pr_err("interval-count option should be used together with "
1793 "interval-print.\n");
1794 parse_options_usage(stat_usage
, stat_options
, "interval-count", 0);
1795 parse_options_usage(stat_usage
, stat_options
, "I", 1);
1799 if (timeout
&& timeout
< 100) {
1801 pr_err("timeout must be >= 10ms.\n");
1802 parse_options_usage(stat_usage
, stat_options
, "timeout", 0);
1805 pr_warning("timeout < 100ms. "
1806 "The overhead percentage could be high in some cases. "
1807 "Please proceed with caution.\n");
1809 if (timeout
&& interval
) {
1810 pr_err("timeout option is not supported with interval-print.\n");
1811 parse_options_usage(stat_usage
, stat_options
, "timeout", 0);
1812 parse_options_usage(stat_usage
, stat_options
, "I", 1);
1816 if (perf_evlist__alloc_stats(evsel_list
, interval
))
1819 if (perf_stat_init_aggr_mode())
1823 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
1824 * while avoiding that older tools show confusing messages.
1826 * However for pipe sessions we need to keep it zero,
1827 * because script's perf_evsel__check_attr is triggered
1828 * by attr->sample_type != 0, and we can't run it on
1831 stat_config
.identifier
= !(STAT_RECORD
&& perf_stat
.data
.is_pipe
);
1834 * We dont want to block the signals - that would cause
1835 * child tasks to inherit that and Ctrl-C would not work.
1836 * What we want is for Ctrl-C to work in the exec()-ed
1837 * task, but being ignored by perf stat itself:
1841 signal(SIGINT
, skip_signal
);
1842 signal(SIGCHLD
, skip_signal
);
1843 signal(SIGALRM
, skip_signal
);
1844 signal(SIGABRT
, skip_signal
);
1847 for (run_idx
= 0; forever
|| run_idx
< stat_config
.run_count
; run_idx
++) {
1848 if (stat_config
.run_count
!= 1 && verbose
> 0)
1849 fprintf(output
, "[ perf stat: executing run #%d ... ]\n",
1852 status
= run_perf_stat(argc
, argv
, run_idx
);
1853 if (forever
&& status
!= -1) {
1854 print_counters(NULL
, argc
, argv
);
1855 perf_stat__reset_stats();
1859 if (!forever
&& status
!= -1 && !interval
)
1860 print_counters(NULL
, argc
, argv
);
1864 * We synthesize the kernel mmap record just so that older tools
1865 * don't emit warnings about not being able to resolve symbols
1866 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
1867 * a saner message about no samples being in the perf.data file.
1869 * This also serves to suppress a warning about f_header.data.size == 0
1870 * in header.c at the moment 'perf stat record' gets introduced, which
1871 * is not really needed once we start adding the stat specific PERF_RECORD_
1872 * records, but the need to suppress the kptr_restrict messages in older
1873 * tools remain -acme
1875 int fd
= perf_data__fd(&perf_stat
.data
);
1876 int err
= perf_event__synthesize_kernel_mmap((void *)&perf_stat
,
1877 process_synthesized_event
,
1878 &perf_stat
.session
->machines
.host
);
1880 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
1881 "older tools may produce warnings about this file\n.");
1885 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats
.max
, FINAL
))
1886 pr_err("failed to write stat round event\n");
1889 if (!perf_stat
.data
.is_pipe
) {
1890 perf_stat
.session
->header
.data_size
+= perf_stat
.bytes_written
;
1891 perf_session__write_header(perf_stat
.session
, evsel_list
, fd
, true);
1894 perf_session__delete(perf_stat
.session
);
1897 perf_stat__exit_aggr_mode();
1898 perf_evlist__free_stats(evsel_list
);
1900 free(stat_config
.walltime_run
);
1902 if (smi_cost
&& smi_reset
)
1903 sysfs__write_int(FREEZE_ON_SMI_PATH
, 0);
1905 perf_evlist__delete(evsel_list
);
1907 runtime_stat_delete(&stat_config
);