4 #include "parse-events.h"
10 typedef void (*setup_probe_fn_t
)(struct perf_evsel
*evsel
);
12 static int perf_do_probe_api(setup_probe_fn_t fn
, int cpu
, const char *str
)
14 struct perf_evlist
*evlist
;
15 struct perf_evsel
*evsel
;
16 unsigned long flags
= perf_event_open_cloexec_flag();
17 int err
= -EAGAIN
, fd
;
18 static pid_t pid
= -1;
20 evlist
= perf_evlist__new();
24 if (parse_events(evlist
, str
, NULL
))
27 evsel
= perf_evlist__first(evlist
);
30 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1, flags
);
32 if (pid
== -1 && errno
== EACCES
) {
44 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1, flags
);
54 perf_evlist__delete(evlist
);
58 static bool perf_probe_api(setup_probe_fn_t fn
)
60 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL
};
64 cpus
= cpu_map__new(NULL
);
71 ret
= perf_do_probe_api(fn
, cpu
, try[i
++]);
74 } while (ret
== -EAGAIN
&& try[i
]);
79 static void perf_probe_sample_identifier(struct perf_evsel
*evsel
)
81 evsel
->attr
.sample_type
|= PERF_SAMPLE_IDENTIFIER
;
84 static void perf_probe_comm_exec(struct perf_evsel
*evsel
)
86 evsel
->attr
.comm_exec
= 1;
89 static void perf_probe_context_switch(struct perf_evsel
*evsel
)
91 evsel
->attr
.context_switch
= 1;
94 bool perf_can_sample_identifier(void)
96 return perf_probe_api(perf_probe_sample_identifier
);
99 static bool perf_can_comm_exec(void)
101 return perf_probe_api(perf_probe_comm_exec
);
104 bool perf_can_record_switch_events(void)
106 return perf_probe_api(perf_probe_context_switch
);
109 bool perf_can_record_cpu_wide(void)
111 struct perf_event_attr attr
= {
112 .type
= PERF_TYPE_SOFTWARE
,
113 .config
= PERF_COUNT_SW_CPU_CLOCK
,
116 struct cpu_map
*cpus
;
119 cpus
= cpu_map__new(NULL
);
125 fd
= sys_perf_event_open(&attr
, -1, cpu
, -1, 0);
133 void perf_evlist__config(struct perf_evlist
*evlist
, struct record_opts
*opts
,
134 struct callchain_param
*callchain
)
136 struct perf_evsel
*evsel
;
137 bool use_sample_identifier
= false;
141 * Set the evsel leader links before we configure attributes,
142 * since some might depend on this info.
145 perf_evlist__set_leader(evlist
);
147 if (evlist
->cpus
->map
[0] < 0)
148 opts
->no_inherit
= true;
150 use_comm_exec
= perf_can_comm_exec();
152 evlist__for_each_entry(evlist
, evsel
) {
153 perf_evsel__config(evsel
, opts
, callchain
);
154 if (evsel
->tracking
&& use_comm_exec
)
155 evsel
->attr
.comm_exec
= 1;
158 if (opts
->full_auxtrace
) {
160 * Need to be able to synthesize and parse selected events with
161 * arbitrary sample types, which requires always being able to
164 use_sample_identifier
= perf_can_sample_identifier();
165 evlist__for_each_entry(evlist
, evsel
)
166 perf_evsel__set_sample_id(evsel
, use_sample_identifier
);
167 } else if (evlist
->nr_entries
> 1) {
168 struct perf_evsel
*first
= perf_evlist__first(evlist
);
170 evlist__for_each_entry(evlist
, evsel
) {
171 if (evsel
->attr
.sample_type
== first
->attr
.sample_type
)
173 use_sample_identifier
= perf_can_sample_identifier();
176 evlist__for_each_entry(evlist
, evsel
)
177 perf_evsel__set_sample_id(evsel
, use_sample_identifier
);
180 perf_evlist__set_id_pos(evlist
);
183 static int get_max_rate(unsigned int *rate
)
185 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate
);
188 static int record_opts__config_freq(struct record_opts
*opts
)
190 bool user_freq
= opts
->user_freq
!= UINT_MAX
;
191 unsigned int max_rate
;
193 if (opts
->user_interval
!= ULLONG_MAX
)
194 opts
->default_interval
= opts
->user_interval
;
196 opts
->freq
= opts
->user_freq
;
199 * User specified count overrides default frequency.
201 if (opts
->default_interval
)
203 else if (opts
->freq
) {
204 opts
->default_interval
= opts
->freq
;
206 pr_err("frequency and count are zero, aborting\n");
210 if (get_max_rate(&max_rate
))
214 * User specified frequency is over current maximum.
216 if (user_freq
&& (max_rate
< opts
->freq
)) {
217 pr_err("Maximum frequency rate (%u) reached.\n"
218 "Please use -F freq option with lower value or consider\n"
219 "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
225 * Default frequency is over current maximum.
227 if (max_rate
< opts
->freq
) {
228 pr_warning("Lowering default frequency rate to %u.\n"
229 "Please consider tweaking "
230 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
232 opts
->freq
= max_rate
;
238 int record_opts__config(struct record_opts
*opts
)
240 return record_opts__config_freq(opts
);
243 bool perf_evlist__can_select_event(struct perf_evlist
*evlist
, const char *str
)
245 struct perf_evlist
*temp_evlist
;
246 struct perf_evsel
*evsel
;
251 temp_evlist
= perf_evlist__new();
255 err
= parse_events(temp_evlist
, str
, NULL
);
259 evsel
= perf_evlist__last(temp_evlist
);
261 if (!evlist
|| cpu_map__empty(evlist
->cpus
)) {
262 struct cpu_map
*cpus
= cpu_map__new(NULL
);
264 cpu
= cpus
? cpus
->map
[0] : 0;
267 cpu
= evlist
->cpus
->map
[0];
271 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1,
272 perf_event_open_cloexec_flag());
274 if (pid
== -1 && errno
== EACCES
) {
286 perf_evlist__delete(temp_evlist
);