]>
Commit | Line | Data |
---|---|---|
361c99a6 ACM |
1 | #ifndef __PERF_EVLIST_H |
2 | #define __PERF_EVLIST_H 1 | |
3 | ||
7143849a | 4 | #include <linux/atomic.h> |
361c99a6 | 5 | #include <linux/list.h> |
1b85337d | 6 | #include <api/fd/array.h> |
50d08e47 | 7 | #include <stdio.h> |
70db7533 | 8 | #include "../perf.h" |
04391deb | 9 | #include "event.h" |
0c21f736 | 10 | #include "evsel.h" |
50d08e47 | 11 | #include "util.h" |
718c602d | 12 | #include "auxtrace.h" |
35b9d88e | 13 | #include <unistd.h> |
361c99a6 | 14 | |
5c581041 | 15 | struct pollfd; |
f8a95309 ACM |
16 | struct thread_map; |
17 | struct cpu_map; | |
b4006796 | 18 | struct record_opts; |
5c581041 | 19 | |
70db7533 ACM |
20 | #define PERF_EVLIST__HLIST_BITS 8 |
21 | #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) | |
22 | ||
82396986 ACM |
23 | /** |
24 | * struct perf_mmap - perf's ring buffer mmap details | |
25 | * | |
26 | * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this | |
27 | */ | |
0479b8b9 DA |
28 | struct perf_mmap { |
29 | void *base; | |
30 | int mask; | |
d4c6fb36 | 31 | int fd; |
7143849a | 32 | atomic_t refcnt; |
7b8283b5 | 33 | u64 prev; |
718c602d | 34 | struct auxtrace_mmap auxtrace_mmap; |
a73b6c19 | 35 | char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); |
0479b8b9 DA |
36 | }; |
37 | ||
8db6d6b1 WN |
38 | static inline size_t |
39 | perf_mmap__mmap_len(struct perf_mmap *map) | |
40 | { | |
41 | return map->mask + 1 + page_size; | |
42 | } | |
43 | ||
54cc54de WN |
44 | /* |
45 | * State machine of bkw_mmap_state: | |
46 | * | |
47 | * .________________(forbid)_____________. | |
48 | * | V | |
49 | * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY | |
50 | * ^ ^ | ^ | | |
51 | * | |__(forbid)____/ |___(forbid)___/| | |
52 | * | | | |
53 | * \_________________(3)_______________/ | |
54 | * | |
55 | * NOTREADY : Backward ring buffers are not ready | |
56 | * RUNNING : Backward ring buffers are recording | |
57 | * DATA_PENDING : We are required to collect data from backward ring buffers | |
58 | * EMPTY : We have collected data from backward ring buffers. | |
59 | * | |
60 | * (0): Setup backward ring buffer | |
61 | * (1): Pause ring buffers for reading | |
62 | * (2): Read from ring buffers | |
63 | * (3): Resume ring buffers for recording | |
64 | */ | |
65 | enum bkw_mmap_state { | |
66 | BKW_MMAP_NOTREADY, | |
67 | BKW_MMAP_RUNNING, | |
68 | BKW_MMAP_DATA_PENDING, | |
69 | BKW_MMAP_EMPTY, | |
70 | }; | |
71 | ||
361c99a6 ACM |
72 | struct perf_evlist { |
73 | struct list_head entries; | |
70db7533 | 74 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; |
361c99a6 | 75 | int nr_entries; |
97f63e4a | 76 | int nr_groups; |
aece948f | 77 | int nr_mmaps; |
86066064 | 78 | bool overwrite; |
2b56bcfb | 79 | bool enabled; |
ec9a77a7 | 80 | bool has_user_cpus; |
994a1f78 | 81 | size_t mmap_len; |
75562573 AH |
82 | int id_pos; |
83 | int is_pos; | |
84 | u64 combined_sample_type; | |
54cc54de | 85 | enum bkw_mmap_state bkw_mmap_state; |
35b9d88e ACM |
86 | struct { |
87 | int cork_fd; | |
88 | pid_t pid; | |
89 | } workload; | |
1b85337d | 90 | struct fdarray pollfd; |
70db7533 | 91 | struct perf_mmap *mmap; |
b2cb615d | 92 | struct perf_mmap *backward_mmap; |
7e2ed097 ACM |
93 | struct thread_map *threads; |
94 | struct cpu_map *cpus; | |
81cce8de | 95 | struct perf_evsel *selected; |
75be989a | 96 | struct events_stats stats; |
2c07144d | 97 | struct perf_env *env; |
361c99a6 ACM |
98 | }; |
99 | ||
ee29be62 ACM |
100 | struct perf_evsel_str_handler { |
101 | const char *name; | |
102 | void *handler; | |
103 | }; | |
104 | ||
334fe7a3 | 105 | struct perf_evlist *perf_evlist__new(void); |
b22d54b0 | 106 | struct perf_evlist *perf_evlist__new_default(void); |
5bae0250 | 107 | struct perf_evlist *perf_evlist__new_dummy(void); |
7e2ed097 ACM |
108 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
109 | struct thread_map *threads); | |
ef1d1af2 | 110 | void perf_evlist__exit(struct perf_evlist *evlist); |
361c99a6 ACM |
111 | void perf_evlist__delete(struct perf_evlist *evlist); |
112 | ||
113 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); | |
4768230a | 114 | void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel); |
361c99a6 | 115 | int perf_evlist__add_default(struct perf_evlist *evlist); |
79695e1b ACM |
116 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
117 | struct perf_event_attr *attrs, size_t nr_attrs); | |
e60fc847 | 118 | |
79695e1b ACM |
119 | #define perf_evlist__add_default_attrs(evlist, array) \ |
120 | __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) | |
361c99a6 | 121 | |
5bae0250 ACM |
122 | int perf_evlist__add_dummy(struct perf_evlist *evlist); |
123 | ||
39876e7d ACM |
124 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
125 | const char *sys, const char *name, void *handler); | |
126 | ||
22c8a376 ACM |
127 | void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, |
128 | enum perf_event_sample_format bit); | |
129 | void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, | |
130 | enum perf_event_sample_format bit); | |
131 | ||
132 | #define perf_evlist__set_sample_bit(evlist, bit) \ | |
133 | __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit) | |
134 | ||
135 | #define perf_evlist__reset_sample_bit(evlist, bit) \ | |
136 | __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit) | |
137 | ||
745cefc5 | 138 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); |
cfd70a26 | 139 | int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid); |
be199ada | 140 | int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids); |
745cefc5 | 141 | |
da378962 ACM |
142 | struct perf_evsel * |
143 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); | |
144 | ||
a2f2804a DA |
145 | struct perf_evsel * |
146 | perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | |
147 | const char *name); | |
148 | ||
a91e5431 ACM |
149 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
150 | int cpu, int thread, u64 id); | |
1c59612d JO |
151 | int perf_evlist__id_add_fd(struct perf_evlist *evlist, |
152 | struct perf_evsel *evsel, | |
153 | int cpu, int thread, int fd); | |
3d3b5e95 | 154 | |
ad6765dd ACM |
155 | int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); |
156 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); | |
1ddec7f0 ACM |
157 | int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask); |
158 | ||
f66a889d ACM |
159 | int perf_evlist__poll(struct perf_evlist *evlist, int timeout); |
160 | ||
70db7533 | 161 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); |
dddcf6ab AH |
162 | struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, |
163 | u64 id); | |
70db7533 | 164 | |
932a3594 JO |
165 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); |
166 | ||
54cc54de WN |
167 | void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state); |
168 | ||
8db6d6b1 WN |
169 | union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup); |
170 | union perf_event *perf_mmap__read_backward(struct perf_mmap *map); | |
171 | ||
172 | void perf_mmap__read_catchup(struct perf_mmap *md); | |
173 | void perf_mmap__consume(struct perf_mmap *md, bool overwrite); | |
174 | ||
316c7136 | 175 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); |
04391deb | 176 | |
5a5ddeb6 WN |
177 | union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, |
178 | int idx); | |
e24c7520 WN |
179 | union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, |
180 | int idx); | |
181 | void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx); | |
182 | ||
8e50d384 ZZ |
183 | void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); |
184 | ||
6a4bb04c | 185 | int perf_evlist__open(struct perf_evlist *evlist); |
a74b4b66 | 186 | void perf_evlist__close(struct perf_evlist *evlist); |
727ab04e | 187 | |
e68ae9cf ACM |
188 | struct callchain_param; |
189 | ||
75562573 AH |
190 | void perf_evlist__set_id_pos(struct perf_evlist *evlist); |
191 | bool perf_can_sample_identifier(void); | |
b757bb09 | 192 | bool perf_can_record_switch_events(void); |
83509565 | 193 | bool perf_can_record_cpu_wide(void); |
e68ae9cf ACM |
194 | void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, |
195 | struct callchain_param *callchain); | |
b4006796 | 196 | int record_opts__config(struct record_opts *opts); |
0f82ebc4 | 197 | |
35b9d88e | 198 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, |
602ad878 | 199 | struct target *target, |
55e162ea | 200 | const char *argv[], bool pipe_output, |
735f7e0b ACM |
201 | void (*exec_error)(int signo, siginfo_t *info, |
202 | void *ucontext)); | |
35b9d88e ACM |
203 | int perf_evlist__start_workload(struct perf_evlist *evlist); |
204 | ||
724ce97e ACM |
205 | struct option; |
206 | ||
e9db1310 | 207 | int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str); |
994a1f78 JO |
208 | int perf_evlist__parse_mmap_pages(const struct option *opt, |
209 | const char *str, | |
210 | int unset); | |
211 | ||
f5e7150c ACM |
212 | unsigned long perf_event_mlock_kb_in_pages(void); |
213 | ||
718c602d AH |
214 | int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, |
215 | bool overwrite, unsigned int auxtrace_pages, | |
216 | bool auxtrace_overwrite); | |
50a682ce ACM |
217 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, |
218 | bool overwrite); | |
7e2ed097 ACM |
219 | void perf_evlist__munmap(struct perf_evlist *evlist); |
220 | ||
4152ab37 | 221 | void perf_evlist__disable(struct perf_evlist *evlist); |
764e16a3 | 222 | void perf_evlist__enable(struct perf_evlist *evlist); |
2b56bcfb | 223 | void perf_evlist__toggle_enable(struct perf_evlist *evlist); |
4152ab37 | 224 | |
1c65056c AH |
225 | int perf_evlist__enable_event_idx(struct perf_evlist *evlist, |
226 | struct perf_evsel *evsel, int idx); | |
395c3070 | 227 | |
81cce8de ACM |
228 | void perf_evlist__set_selected(struct perf_evlist *evlist, |
229 | struct perf_evsel *evsel); | |
230 | ||
d5bc056e AH |
231 | void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, |
232 | struct thread_map *threads); | |
602ad878 | 233 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); |
23d4aad4 | 234 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); |
f8a95309 | 235 | |
63dab225 ACM |
236 | void __perf_evlist__set_leader(struct list_head *list); |
237 | void perf_evlist__set_leader(struct perf_evlist *evlist); | |
238 | ||
9ede473c | 239 | u64 perf_evlist__read_format(struct perf_evlist *evlist); |
75562573 AH |
240 | u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist); |
241 | u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist); | |
98df858e | 242 | u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist); |
0c21f736 ACM |
243 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist); |
244 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); | |
74429964 | 245 | |
a3f698fe | 246 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
0807d2d8 | 247 | struct perf_sample *sample); |
cb0b29e0 | 248 | |
0c21f736 ACM |
249 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); |
250 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); | |
9ede473c | 251 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist); |
0529bc1f JO |
252 | |
253 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | |
f114d6ef | 254 | struct list_head *list); |
0c21f736 ACM |
255 | |
256 | static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) | |
257 | { | |
258 | return list_entry(evlist->entries.next, struct perf_evsel, node); | |
259 | } | |
260 | ||
261 | static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) | |
262 | { | |
263 | return list_entry(evlist->entries.prev, struct perf_evsel, node); | |
264 | } | |
78f067b3 ACM |
265 | |
266 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); | |
0479b8b9 | 267 | |
a8f23d8f | 268 | int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); |
956fa571 | 269 | int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); |
6ef068cb | 270 | |
7b8283b5 | 271 | static inline u64 perf_mmap__read_head(struct perf_mmap *mm) |
0479b8b9 DA |
272 | { |
273 | struct perf_event_mmap_page *pc = mm->base; | |
7b8283b5 | 274 | u64 head = ACCESS_ONCE(pc->data_head); |
0479b8b9 DA |
275 | rmb(); |
276 | return head; | |
277 | } | |
278 | ||
7b8283b5 | 279 | static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) |
0479b8b9 DA |
280 | { |
281 | struct perf_event_mmap_page *pc = md->base; | |
282 | ||
283 | /* | |
284 | * ensure all reads are done before we write the tail out. | |
285 | */ | |
a94d342b | 286 | mb(); |
0479b8b9 DA |
287 | pc->data_tail = tail; |
288 | } | |
289 | ||
c09ec622 | 290 | bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); |
a025e4f0 AH |
291 | void perf_evlist__to_front(struct perf_evlist *evlist, |
292 | struct perf_evsel *move_evsel); | |
293 | ||
0050f7aa | 294 | /** |
e5cadb93 | 295 | * __evlist__for_each_entry - iterate thru all the evsels |
0050f7aa ACM |
296 | * @list: list_head instance to iterate |
297 | * @evsel: struct evsel iterator | |
298 | */ | |
e5cadb93 | 299 | #define __evlist__for_each_entry(list, evsel) \ |
0050f7aa ACM |
300 | list_for_each_entry(evsel, list, node) |
301 | ||
302 | /** | |
e5cadb93 | 303 | * evlist__for_each_entry - iterate thru all the evsels |
0050f7aa ACM |
304 | * @evlist: evlist instance to iterate |
305 | * @evsel: struct evsel iterator | |
306 | */ | |
e5cadb93 ACM |
307 | #define evlist__for_each_entry(evlist, evsel) \ |
308 | __evlist__for_each_entry(&(evlist)->entries, evsel) | |
0050f7aa ACM |
309 | |
310 | /** | |
e5cadb93 | 311 | * __evlist__for_each_entry_continue - continue iteration thru all the evsels |
0050f7aa ACM |
312 | * @list: list_head instance to iterate |
313 | * @evsel: struct evsel iterator | |
314 | */ | |
e5cadb93 | 315 | #define __evlist__for_each_entry_continue(list, evsel) \ |
0050f7aa ACM |
316 | list_for_each_entry_continue(evsel, list, node) |
317 | ||
318 | /** | |
e5cadb93 | 319 | * evlist__for_each_entry_continue - continue iteration thru all the evsels |
0050f7aa ACM |
320 | * @evlist: evlist instance to iterate |
321 | * @evsel: struct evsel iterator | |
322 | */ | |
e5cadb93 ACM |
323 | #define evlist__for_each_entry_continue(evlist, evsel) \ |
324 | __evlist__for_each_entry_continue(&(evlist)->entries, evsel) | |
0050f7aa ACM |
325 | |
326 | /** | |
e5cadb93 | 327 | * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order |
0050f7aa ACM |
328 | * @list: list_head instance to iterate |
329 | * @evsel: struct evsel iterator | |
330 | */ | |
e5cadb93 | 331 | #define __evlist__for_each_entry_reverse(list, evsel) \ |
0050f7aa ACM |
332 | list_for_each_entry_reverse(evsel, list, node) |
333 | ||
334 | /** | |
e5cadb93 | 335 | * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order |
0050f7aa ACM |
336 | * @evlist: evlist instance to iterate |
337 | * @evsel: struct evsel iterator | |
338 | */ | |
e5cadb93 ACM |
339 | #define evlist__for_each_entry_reverse(evlist, evsel) \ |
340 | __evlist__for_each_entry_reverse(&(evlist)->entries, evsel) | |
0050f7aa ACM |
341 | |
342 | /** | |
e5cadb93 | 343 | * __evlist__for_each_entry_safe - safely iterate thru all the evsels |
0050f7aa ACM |
344 | * @list: list_head instance to iterate |
345 | * @tmp: struct evsel temp iterator | |
346 | * @evsel: struct evsel iterator | |
347 | */ | |
e5cadb93 | 348 | #define __evlist__for_each_entry_safe(list, tmp, evsel) \ |
0050f7aa ACM |
349 | list_for_each_entry_safe(evsel, tmp, list, node) |
350 | ||
351 | /** | |
e5cadb93 | 352 | * evlist__for_each_entry_safe - safely iterate thru all the evsels |
0050f7aa ACM |
353 | * @evlist: evlist instance to iterate |
354 | * @evsel: struct evsel iterator | |
355 | * @tmp: struct evsel temp iterator | |
356 | */ | |
e5cadb93 ACM |
357 | #define evlist__for_each_entry_safe(evlist, tmp, evsel) \ |
358 | __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel) | |
c09ec622 | 359 | |
60b0896c AH |
360 | void perf_evlist__set_tracking_event(struct perf_evlist *evlist, |
361 | struct perf_evsel *tracking_evsel); | |
45cf6c33 JO |
362 | |
363 | void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr); | |
7630b3e2 WN |
364 | |
365 | struct perf_evsel * | |
366 | perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); | |
7cb5c5ac JO |
367 | |
368 | struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, | |
369 | union perf_event *event); | |
361c99a6 | 370 | #endif /* __PERF_EVLIST_H */ |