]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/evlist.h
Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-linus
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / evlist.h
1 #ifndef __PERF_EVLIST_H
2 #define __PERF_EVLIST_H 1
3
4 #include <linux/compiler.h>
5 #include <linux/kernel.h>
6 #include <linux/refcount.h>
7 #include <linux/list.h>
8 #include <api/fd/array.h>
9 #include <stdio.h>
10 #include "../perf.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "util.h"
14 #include "auxtrace.h"
15 #include <signal.h>
16 #include <unistd.h>
17
18 struct pollfd;
19 struct thread_map;
20 struct cpu_map;
21 struct record_opts;
22
23 #define PERF_EVLIST__HLIST_BITS 8
24 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
25
26 /**
27 * struct perf_mmap - perf's ring buffer mmap details
28 *
29 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
30 */
31 struct perf_mmap {
32 void *base;
33 int mask;
34 int fd;
35 refcount_t refcnt;
36 u64 prev;
37 struct auxtrace_mmap auxtrace_mmap;
38 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
39 };
40
41 static inline size_t
42 perf_mmap__mmap_len(struct perf_mmap *map)
43 {
44 return map->mask + 1 + page_size;
45 }
46
47 /*
48 * State machine of bkw_mmap_state:
49 *
50 * .________________(forbid)_____________.
51 * | V
52 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
53 * ^ ^ | ^ |
54 * | |__(forbid)____/ |___(forbid)___/|
55 * | |
56 * \_________________(3)_______________/
57 *
58 * NOTREADY : Backward ring buffers are not ready
59 * RUNNING : Backward ring buffers are recording
60 * DATA_PENDING : We are required to collect data from backward ring buffers
61 * EMPTY : We have collected data from backward ring buffers.
62 *
63 * (0): Setup backward ring buffer
64 * (1): Pause ring buffers for reading
65 * (2): Read from ring buffers
66 * (3): Resume ring buffers for recording
67 */
68 enum bkw_mmap_state {
69 BKW_MMAP_NOTREADY,
70 BKW_MMAP_RUNNING,
71 BKW_MMAP_DATA_PENDING,
72 BKW_MMAP_EMPTY,
73 };
74
75 struct perf_evlist {
76 struct list_head entries;
77 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
78 int nr_entries;
79 int nr_groups;
80 int nr_mmaps;
81 bool overwrite;
82 bool enabled;
83 bool has_user_cpus;
84 size_t mmap_len;
85 int id_pos;
86 int is_pos;
87 u64 combined_sample_type;
88 enum bkw_mmap_state bkw_mmap_state;
89 struct {
90 int cork_fd;
91 pid_t pid;
92 } workload;
93 struct fdarray pollfd;
94 struct perf_mmap *mmap;
95 struct perf_mmap *backward_mmap;
96 struct thread_map *threads;
97 struct cpu_map *cpus;
98 struct perf_evsel *selected;
99 struct events_stats stats;
100 struct perf_env *env;
101 };
102
103 struct perf_evsel_str_handler {
104 const char *name;
105 void *handler;
106 };
107
108 struct perf_evlist *perf_evlist__new(void);
109 struct perf_evlist *perf_evlist__new_default(void);
110 struct perf_evlist *perf_evlist__new_dummy(void);
111 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
112 struct thread_map *threads);
113 void perf_evlist__exit(struct perf_evlist *evlist);
114 void perf_evlist__delete(struct perf_evlist *evlist);
115
116 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
117 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
118 int perf_evlist__add_default(struct perf_evlist *evlist);
119 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
120 struct perf_event_attr *attrs, size_t nr_attrs);
121
122 #define perf_evlist__add_default_attrs(evlist, array) \
123 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
124
125 int perf_evlist__add_dummy(struct perf_evlist *evlist);
126
127 int perf_evlist__add_newtp(struct perf_evlist *evlist,
128 const char *sys, const char *name, void *handler);
129
130 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
131 enum perf_event_sample_format bit);
132 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
133 enum perf_event_sample_format bit);
134
135 #define perf_evlist__set_sample_bit(evlist, bit) \
136 __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
137
138 #define perf_evlist__reset_sample_bit(evlist, bit) \
139 __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
140
141 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
142 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
143 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
144
145 struct perf_evsel *
146 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
147
148 struct perf_evsel *
149 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
150 const char *name);
151
152 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
153 int cpu, int thread, u64 id);
154 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
155 struct perf_evsel *evsel,
156 int cpu, int thread, int fd);
157
158 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
159 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
160 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
161
162 int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
163
164 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
165 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
166 u64 id);
167
168 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
169
170 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
171
172 union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
173 union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
174
175 void perf_mmap__read_catchup(struct perf_mmap *md);
176 void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
177
178 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
179
180 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
181 int idx);
182 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
183 int idx);
184 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
185
186 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
187
188 int perf_evlist__open(struct perf_evlist *evlist);
189 void perf_evlist__close(struct perf_evlist *evlist);
190
191 struct callchain_param;
192
193 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
194 bool perf_can_sample_identifier(void);
195 bool perf_can_record_switch_events(void);
196 bool perf_can_record_cpu_wide(void);
197 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
198 struct callchain_param *callchain);
199 int record_opts__config(struct record_opts *opts);
200
201 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
202 struct target *target,
203 const char *argv[], bool pipe_output,
204 void (*exec_error)(int signo, siginfo_t *info,
205 void *ucontext));
206 int perf_evlist__start_workload(struct perf_evlist *evlist);
207
208 struct option;
209
210 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
211 int perf_evlist__parse_mmap_pages(const struct option *opt,
212 const char *str,
213 int unset);
214
215 unsigned long perf_event_mlock_kb_in_pages(void);
216
217 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
218 bool overwrite, unsigned int auxtrace_pages,
219 bool auxtrace_overwrite);
220 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
221 bool overwrite);
222 void perf_evlist__munmap(struct perf_evlist *evlist);
223
224 size_t perf_evlist__mmap_size(unsigned long pages);
225
226 void perf_evlist__disable(struct perf_evlist *evlist);
227 void perf_evlist__enable(struct perf_evlist *evlist);
228 void perf_evlist__toggle_enable(struct perf_evlist *evlist);
229
230 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
231 struct perf_evsel *evsel, int idx);
232
233 void perf_evlist__set_selected(struct perf_evlist *evlist,
234 struct perf_evsel *evsel);
235
236 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
237 struct thread_map *threads);
238 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
239 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
240
241 void __perf_evlist__set_leader(struct list_head *list);
242 void perf_evlist__set_leader(struct perf_evlist *evlist);
243
244 u64 perf_evlist__read_format(struct perf_evlist *evlist);
245 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
246 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
247 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
248 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
249 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
250
251 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
252 struct perf_sample *sample);
253
254 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
255 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
256 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
257
258 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
259 struct list_head *list);
260
261 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
262 {
263 return list_entry(evlist->entries.next, struct perf_evsel, node);
264 }
265
266 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
267 {
268 return list_entry(evlist->entries.prev, struct perf_evsel, node);
269 }
270
271 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
272
273 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
274 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
275
276 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
277 {
278 struct perf_event_mmap_page *pc = mm->base;
279 u64 head = ACCESS_ONCE(pc->data_head);
280 rmb();
281 return head;
282 }
283
284 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
285 {
286 struct perf_event_mmap_page *pc = md->base;
287
288 /*
289 * ensure all reads are done before we write the tail out.
290 */
291 mb();
292 pc->data_tail = tail;
293 }
294
295 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
296 void perf_evlist__to_front(struct perf_evlist *evlist,
297 struct perf_evsel *move_evsel);
298
299 /**
300 * __evlist__for_each_entry - iterate thru all the evsels
301 * @list: list_head instance to iterate
302 * @evsel: struct evsel iterator
303 */
304 #define __evlist__for_each_entry(list, evsel) \
305 list_for_each_entry(evsel, list, node)
306
307 /**
308 * evlist__for_each_entry - iterate thru all the evsels
309 * @evlist: evlist instance to iterate
310 * @evsel: struct evsel iterator
311 */
312 #define evlist__for_each_entry(evlist, evsel) \
313 __evlist__for_each_entry(&(evlist)->entries, evsel)
314
315 /**
316 * __evlist__for_each_entry_continue - continue iteration thru all the evsels
317 * @list: list_head instance to iterate
318 * @evsel: struct evsel iterator
319 */
320 #define __evlist__for_each_entry_continue(list, evsel) \
321 list_for_each_entry_continue(evsel, list, node)
322
323 /**
324 * evlist__for_each_entry_continue - continue iteration thru all the evsels
325 * @evlist: evlist instance to iterate
326 * @evsel: struct evsel iterator
327 */
328 #define evlist__for_each_entry_continue(evlist, evsel) \
329 __evlist__for_each_entry_continue(&(evlist)->entries, evsel)
330
331 /**
332 * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
333 * @list: list_head instance to iterate
334 * @evsel: struct evsel iterator
335 */
336 #define __evlist__for_each_entry_reverse(list, evsel) \
337 list_for_each_entry_reverse(evsel, list, node)
338
339 /**
340 * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
341 * @evlist: evlist instance to iterate
342 * @evsel: struct evsel iterator
343 */
344 #define evlist__for_each_entry_reverse(evlist, evsel) \
345 __evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
346
347 /**
348 * __evlist__for_each_entry_safe - safely iterate thru all the evsels
349 * @list: list_head instance to iterate
350 * @tmp: struct evsel temp iterator
351 * @evsel: struct evsel iterator
352 */
353 #define __evlist__for_each_entry_safe(list, tmp, evsel) \
354 list_for_each_entry_safe(evsel, tmp, list, node)
355
356 /**
357 * evlist__for_each_entry_safe - safely iterate thru all the evsels
358 * @evlist: evlist instance to iterate
359 * @evsel: struct evsel iterator
360 * @tmp: struct evsel temp iterator
361 */
362 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
363 __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
364
365 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
366 struct perf_evsel *tracking_evsel);
367
368 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
369
370 struct perf_evsel *
371 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
372
373 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
374 union perf_event *event);
375 #endif /* __PERF_EVLIST_H */