]>
Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
a8c9ae18 | 9 | #include "util.h" |
956fa571 | 10 | #include <api/fs/fs.h> |
5c581041 | 11 | #include <poll.h> |
f8a95309 ACM |
12 | #include "cpumap.h" |
13 | #include "thread_map.h" | |
12864b31 | 14 | #include "target.h" |
361c99a6 ACM |
15 | #include "evlist.h" |
16 | #include "evsel.h" | |
e3e1a54f | 17 | #include "debug.h" |
54cc54de | 18 | #include "asm/bug.h" |
35b9d88e | 19 | #include <unistd.h> |
361c99a6 | 20 | |
50d08e47 | 21 | #include "parse-events.h" |
4b6ab94e | 22 | #include <subcmd/parse-options.h> |
50d08e47 | 23 | |
f8a95309 ACM |
24 | #include <sys/mman.h> |
25 | ||
70db7533 ACM |
26 | #include <linux/bitops.h> |
27 | #include <linux/hash.h> | |
0389cd1f | 28 | #include <linux/log2.h> |
8dd2a131 | 29 | #include <linux/err.h> |
70db7533 | 30 | |
8db6d6b1 | 31 | static void perf_mmap__munmap(struct perf_mmap *map); |
4876075b | 32 | static void perf_mmap__put(struct perf_mmap *map); |
e4b356b5 | 33 | |
f8a95309 | 34 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
a91e5431 | 35 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
f8a95309 | 36 | |
7e2ed097 ACM |
37 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
38 | struct thread_map *threads) | |
ef1d1af2 ACM |
39 | { |
40 | int i; | |
41 | ||
42 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | |
43 | INIT_HLIST_HEAD(&evlist->heads[i]); | |
44 | INIT_LIST_HEAD(&evlist->entries); | |
7e2ed097 | 45 | perf_evlist__set_maps(evlist, cpus, threads); |
1b85337d | 46 | fdarray__init(&evlist->pollfd, 64); |
35b9d88e | 47 | evlist->workload.pid = -1; |
54cc54de | 48 | evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; |
ef1d1af2 ACM |
49 | } |
50 | ||
334fe7a3 | 51 | struct perf_evlist *perf_evlist__new(void) |
361c99a6 ACM |
52 | { |
53 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | |
54 | ||
ef1d1af2 | 55 | if (evlist != NULL) |
334fe7a3 | 56 | perf_evlist__init(evlist, NULL, NULL); |
361c99a6 ACM |
57 | |
58 | return evlist; | |
59 | } | |
60 | ||
b22d54b0 JO |
61 | struct perf_evlist *perf_evlist__new_default(void) |
62 | { | |
63 | struct perf_evlist *evlist = perf_evlist__new(); | |
64 | ||
65 | if (evlist && perf_evlist__add_default(evlist)) { | |
66 | perf_evlist__delete(evlist); | |
67 | evlist = NULL; | |
68 | } | |
69 | ||
70 | return evlist; | |
71 | } | |
72 | ||
5bae0250 ACM |
73 | struct perf_evlist *perf_evlist__new_dummy(void) |
74 | { | |
75 | struct perf_evlist *evlist = perf_evlist__new(); | |
76 | ||
77 | if (evlist && perf_evlist__add_dummy(evlist)) { | |
78 | perf_evlist__delete(evlist); | |
79 | evlist = NULL; | |
80 | } | |
81 | ||
82 | return evlist; | |
83 | } | |
84 | ||
75562573 AH |
85 | /** |
86 | * perf_evlist__set_id_pos - set the positions of event ids. | |
87 | * @evlist: selected event list | |
88 | * | |
89 | * Events with compatible sample types all have the same id_pos | |
90 | * and is_pos. For convenience, put a copy on evlist. | |
91 | */ | |
92 | void perf_evlist__set_id_pos(struct perf_evlist *evlist) | |
93 | { | |
94 | struct perf_evsel *first = perf_evlist__first(evlist); | |
95 | ||
96 | evlist->id_pos = first->id_pos; | |
97 | evlist->is_pos = first->is_pos; | |
98 | } | |
99 | ||
733cd2fe AH |
100 | static void perf_evlist__update_id_pos(struct perf_evlist *evlist) |
101 | { | |
102 | struct perf_evsel *evsel; | |
103 | ||
e5cadb93 | 104 | evlist__for_each_entry(evlist, evsel) |
733cd2fe AH |
105 | perf_evsel__calc_id_pos(evsel); |
106 | ||
107 | perf_evlist__set_id_pos(evlist); | |
108 | } | |
109 | ||
361c99a6 ACM |
110 | static void perf_evlist__purge(struct perf_evlist *evlist) |
111 | { | |
112 | struct perf_evsel *pos, *n; | |
113 | ||
e5cadb93 | 114 | evlist__for_each_entry_safe(evlist, n, pos) { |
361c99a6 | 115 | list_del_init(&pos->node); |
d49e4695 | 116 | pos->evlist = NULL; |
361c99a6 ACM |
117 | perf_evsel__delete(pos); |
118 | } | |
119 | ||
120 | evlist->nr_entries = 0; | |
121 | } | |
122 | ||
ef1d1af2 | 123 | void perf_evlist__exit(struct perf_evlist *evlist) |
361c99a6 | 124 | { |
04662523 | 125 | zfree(&evlist->mmap); |
b2cb615d | 126 | zfree(&evlist->backward_mmap); |
1b85337d | 127 | fdarray__exit(&evlist->pollfd); |
ef1d1af2 ACM |
128 | } |
129 | ||
130 | void perf_evlist__delete(struct perf_evlist *evlist) | |
131 | { | |
0b04b3dc ACM |
132 | if (evlist == NULL) |
133 | return; | |
134 | ||
983874d1 | 135 | perf_evlist__munmap(evlist); |
f26e1c7c | 136 | perf_evlist__close(evlist); |
f30a79b0 | 137 | cpu_map__put(evlist->cpus); |
186fbb74 | 138 | thread_map__put(evlist->threads); |
03ad9747 ACM |
139 | evlist->cpus = NULL; |
140 | evlist->threads = NULL; | |
ef1d1af2 ACM |
141 | perf_evlist__purge(evlist); |
142 | perf_evlist__exit(evlist); | |
361c99a6 ACM |
143 | free(evlist); |
144 | } | |
145 | ||
adc0c3e8 AH |
146 | static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, |
147 | struct perf_evsel *evsel) | |
148 | { | |
149 | /* | |
150 | * We already have cpus for evsel (via PMU sysfs) so | |
151 | * keep it, if there's no target cpu list defined. | |
152 | */ | |
153 | if (!evsel->own_cpus || evlist->has_user_cpus) { | |
154 | cpu_map__put(evsel->cpus); | |
155 | evsel->cpus = cpu_map__get(evlist->cpus); | |
156 | } else if (evsel->cpus != evsel->own_cpus) { | |
157 | cpu_map__put(evsel->cpus); | |
158 | evsel->cpus = cpu_map__get(evsel->own_cpus); | |
159 | } | |
160 | ||
161 | thread_map__put(evsel->threads); | |
162 | evsel->threads = thread_map__get(evlist->threads); | |
163 | } | |
164 | ||
165 | static void perf_evlist__propagate_maps(struct perf_evlist *evlist) | |
166 | { | |
167 | struct perf_evsel *evsel; | |
168 | ||
e5cadb93 | 169 | evlist__for_each_entry(evlist, evsel) |
adc0c3e8 AH |
170 | __perf_evlist__propagate_maps(evlist, evsel); |
171 | } | |
172 | ||
361c99a6 ACM |
173 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) |
174 | { | |
d49e4695 | 175 | entry->evlist = evlist; |
361c99a6 | 176 | list_add_tail(&entry->node, &evlist->entries); |
ef503831 | 177 | entry->idx = evlist->nr_entries; |
60b0896c | 178 | entry->tracking = !entry->idx; |
ef503831 | 179 | |
75562573 AH |
180 | if (!evlist->nr_entries++) |
181 | perf_evlist__set_id_pos(evlist); | |
44c42d71 AH |
182 | |
183 | __perf_evlist__propagate_maps(evlist, entry); | |
361c99a6 ACM |
184 | } |
185 | ||
4768230a AH |
186 | void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel) |
187 | { | |
188 | evsel->evlist = NULL; | |
189 | list_del_init(&evsel->node); | |
190 | evlist->nr_entries -= 1; | |
191 | } | |
192 | ||
0529bc1f | 193 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
f114d6ef | 194 | struct list_head *list) |
50d08e47 | 195 | { |
f114d6ef | 196 | struct perf_evsel *evsel, *temp; |
75562573 | 197 | |
e5cadb93 | 198 | __evlist__for_each_entry_safe(list, temp, evsel) { |
f114d6ef AH |
199 | list_del_init(&evsel->node); |
200 | perf_evlist__add(evlist, evsel); | |
201 | } | |
50d08e47 ACM |
202 | } |
203 | ||
63dab225 ACM |
204 | void __perf_evlist__set_leader(struct list_head *list) |
205 | { | |
206 | struct perf_evsel *evsel, *leader; | |
207 | ||
208 | leader = list_entry(list->next, struct perf_evsel, node); | |
97f63e4a NK |
209 | evsel = list_entry(list->prev, struct perf_evsel, node); |
210 | ||
211 | leader->nr_members = evsel->idx - leader->idx + 1; | |
63dab225 | 212 | |
e5cadb93 | 213 | __evlist__for_each_entry(list, evsel) { |
74b2133d | 214 | evsel->leader = leader; |
63dab225 ACM |
215 | } |
216 | } | |
217 | ||
218 | void perf_evlist__set_leader(struct perf_evlist *evlist) | |
6a4bb04c | 219 | { |
97f63e4a NK |
220 | if (evlist->nr_entries) { |
221 | evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; | |
63dab225 | 222 | __perf_evlist__set_leader(&evlist->entries); |
97f63e4a | 223 | } |
6a4bb04c JO |
224 | } |
225 | ||
45cf6c33 | 226 | void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr) |
7f8d1ade ACM |
227 | { |
228 | attr->precise_ip = 3; | |
229 | ||
230 | while (attr->precise_ip != 0) { | |
231 | int fd = sys_perf_event_open(attr, 0, -1, -1, 0); | |
232 | if (fd != -1) { | |
233 | close(fd); | |
234 | break; | |
235 | } | |
236 | --attr->precise_ip; | |
237 | } | |
238 | } | |
239 | ||
361c99a6 ACM |
240 | int perf_evlist__add_default(struct perf_evlist *evlist) |
241 | { | |
7c48dcfd | 242 | struct perf_evsel *evsel = perf_evsel__new_cycles(); |
1aed2671 | 243 | |
361c99a6 | 244 | if (evsel == NULL) |
7c48dcfd | 245 | return -ENOMEM; |
361c99a6 ACM |
246 | |
247 | perf_evlist__add(evlist, evsel); | |
248 | return 0; | |
249 | } | |
5c581041 | 250 | |
5bae0250 ACM |
251 | int perf_evlist__add_dummy(struct perf_evlist *evlist) |
252 | { | |
253 | struct perf_event_attr attr = { | |
254 | .type = PERF_TYPE_SOFTWARE, | |
255 | .config = PERF_COUNT_SW_DUMMY, | |
256 | .size = sizeof(attr), /* to capture ABI version */ | |
257 | }; | |
258 | struct perf_evsel *evsel = perf_evsel__new(&attr); | |
259 | ||
260 | if (evsel == NULL) | |
261 | return -ENOMEM; | |
262 | ||
263 | perf_evlist__add(evlist, evsel); | |
264 | return 0; | |
265 | } | |
266 | ||
e60fc847 ACM |
267 | static int perf_evlist__add_attrs(struct perf_evlist *evlist, |
268 | struct perf_event_attr *attrs, size_t nr_attrs) | |
50d08e47 ACM |
269 | { |
270 | struct perf_evsel *evsel, *n; | |
271 | LIST_HEAD(head); | |
272 | size_t i; | |
273 | ||
274 | for (i = 0; i < nr_attrs; i++) { | |
ef503831 | 275 | evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); |
50d08e47 ACM |
276 | if (evsel == NULL) |
277 | goto out_delete_partial_list; | |
278 | list_add_tail(&evsel->node, &head); | |
279 | } | |
280 | ||
f114d6ef | 281 | perf_evlist__splice_list_tail(evlist, &head); |
50d08e47 ACM |
282 | |
283 | return 0; | |
284 | ||
285 | out_delete_partial_list: | |
e5cadb93 | 286 | __evlist__for_each_entry_safe(&head, n, evsel) |
50d08e47 ACM |
287 | perf_evsel__delete(evsel); |
288 | return -1; | |
289 | } | |
290 | ||
79695e1b ACM |
291 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
292 | struct perf_event_attr *attrs, size_t nr_attrs) | |
293 | { | |
294 | size_t i; | |
295 | ||
296 | for (i = 0; i < nr_attrs; i++) | |
297 | event_attr_init(attrs + i); | |
298 | ||
299 | return perf_evlist__add_attrs(evlist, attrs, nr_attrs); | |
300 | } | |
301 | ||
da378962 ACM |
302 | struct perf_evsel * |
303 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) | |
ee29be62 ACM |
304 | { |
305 | struct perf_evsel *evsel; | |
306 | ||
e5cadb93 | 307 | evlist__for_each_entry(evlist, evsel) { |
ee29be62 ACM |
308 | if (evsel->attr.type == PERF_TYPE_TRACEPOINT && |
309 | (int)evsel->attr.config == id) | |
310 | return evsel; | |
311 | } | |
312 | ||
313 | return NULL; | |
314 | } | |
315 | ||
a2f2804a DA |
316 | struct perf_evsel * |
317 | perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | |
318 | const char *name) | |
319 | { | |
320 | struct perf_evsel *evsel; | |
321 | ||
e5cadb93 | 322 | evlist__for_each_entry(evlist, evsel) { |
a2f2804a DA |
323 | if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && |
324 | (strcmp(evsel->name, name) == 0)) | |
325 | return evsel; | |
326 | } | |
327 | ||
328 | return NULL; | |
329 | } | |
330 | ||
39876e7d ACM |
331 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
332 | const char *sys, const char *name, void *handler) | |
333 | { | |
ef503831 | 334 | struct perf_evsel *evsel = perf_evsel__newtp(sys, name); |
39876e7d | 335 | |
8dd2a131 | 336 | if (IS_ERR(evsel)) |
39876e7d ACM |
337 | return -1; |
338 | ||
744a9719 | 339 | evsel->handler = handler; |
39876e7d ACM |
340 | perf_evlist__add(evlist, evsel); |
341 | return 0; | |
342 | } | |
343 | ||
bf8e8f4b AH |
344 | static int perf_evlist__nr_threads(struct perf_evlist *evlist, |
345 | struct perf_evsel *evsel) | |
346 | { | |
347 | if (evsel->system_wide) | |
348 | return 1; | |
349 | else | |
350 | return thread_map__nr(evlist->threads); | |
351 | } | |
352 | ||
4152ab37 ACM |
353 | void perf_evlist__disable(struct perf_evlist *evlist) |
354 | { | |
4152ab37 | 355 | struct perf_evsel *pos; |
3e27c920 | 356 | |
e5cadb93 | 357 | evlist__for_each_entry(evlist, pos) { |
3e27c920 JO |
358 | if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
359 | continue; | |
360 | perf_evsel__disable(pos); | |
4152ab37 | 361 | } |
2b56bcfb ACM |
362 | |
363 | evlist->enabled = false; | |
4152ab37 ACM |
364 | } |
365 | ||
764e16a3 DA |
366 | void perf_evlist__enable(struct perf_evlist *evlist) |
367 | { | |
764e16a3 | 368 | struct perf_evsel *pos; |
3e27c920 | 369 | |
e5cadb93 | 370 | evlist__for_each_entry(evlist, pos) { |
3e27c920 JO |
371 | if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
372 | continue; | |
373 | perf_evsel__enable(pos); | |
764e16a3 | 374 | } |
2b56bcfb ACM |
375 | |
376 | evlist->enabled = true; | |
377 | } | |
378 | ||
379 | void perf_evlist__toggle_enable(struct perf_evlist *evlist) | |
380 | { | |
381 | (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist); | |
764e16a3 DA |
382 | } |
383 | ||
1c65056c AH |
384 | static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, |
385 | struct perf_evsel *evsel, int cpu) | |
386 | { | |
387 | int thread, err; | |
388 | int nr_threads = perf_evlist__nr_threads(evlist, evsel); | |
389 | ||
390 | if (!evsel->fd) | |
391 | return -EINVAL; | |
392 | ||
393 | for (thread = 0; thread < nr_threads; thread++) { | |
394 | err = ioctl(FD(evsel, cpu, thread), | |
395 | PERF_EVENT_IOC_ENABLE, 0); | |
396 | if (err) | |
397 | return err; | |
398 | } | |
399 | return 0; | |
400 | } | |
401 | ||
402 | static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, | |
403 | struct perf_evsel *evsel, | |
404 | int thread) | |
405 | { | |
406 | int cpu, err; | |
407 | int nr_cpus = cpu_map__nr(evlist->cpus); | |
408 | ||
409 | if (!evsel->fd) | |
410 | return -EINVAL; | |
411 | ||
412 | for (cpu = 0; cpu < nr_cpus; cpu++) { | |
413 | err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); | |
414 | if (err) | |
415 | return err; | |
416 | } | |
417 | return 0; | |
418 | } | |
419 | ||
420 | int perf_evlist__enable_event_idx(struct perf_evlist *evlist, | |
421 | struct perf_evsel *evsel, int idx) | |
422 | { | |
423 | bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); | |
424 | ||
425 | if (per_cpu_mmaps) | |
426 | return perf_evlist__enable_event_cpu(evlist, evsel, idx); | |
427 | else | |
428 | return perf_evlist__enable_event_thread(evlist, evsel, idx); | |
429 | } | |
430 | ||
ad6765dd | 431 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
5c581041 | 432 | { |
b3a319d5 NK |
433 | int nr_cpus = cpu_map__nr(evlist->cpus); |
434 | int nr_threads = thread_map__nr(evlist->threads); | |
bf8e8f4b AH |
435 | int nfds = 0; |
436 | struct perf_evsel *evsel; | |
437 | ||
e5cadb93 | 438 | evlist__for_each_entry(evlist, evsel) { |
bf8e8f4b AH |
439 | if (evsel->system_wide) |
440 | nfds += nr_cpus; | |
441 | else | |
442 | nfds += nr_cpus * nr_threads; | |
443 | } | |
444 | ||
1b85337d ACM |
445 | if (fdarray__available_entries(&evlist->pollfd) < nfds && |
446 | fdarray__grow(&evlist->pollfd, nfds) < 0) | |
ad6765dd ACM |
447 | return -ENOMEM; |
448 | ||
449 | return 0; | |
5c581041 | 450 | } |
70082dd9 | 451 | |
4876075b WN |
452 | static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, |
453 | struct perf_mmap *map, short revent) | |
e4b356b5 | 454 | { |
f3058a1c | 455 | int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); |
e4b356b5 ACM |
456 | /* |
457 | * Save the idx so that when we filter out fds POLLHUP'ed we can | |
458 | * close the associated evlist->mmap[] entry. | |
459 | */ | |
460 | if (pos >= 0) { | |
4876075b | 461 | evlist->pollfd.priv[pos].ptr = map; |
e4b356b5 ACM |
462 | |
463 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
464 | } | |
465 | ||
466 | return pos; | |
467 | } | |
468 | ||
ad6765dd | 469 | int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) |
70082dd9 | 470 | { |
4876075b | 471 | return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN); |
e4b356b5 ACM |
472 | } |
473 | ||
258e4bfc WN |
474 | static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, |
475 | void *arg __maybe_unused) | |
e4b356b5 | 476 | { |
4876075b | 477 | struct perf_mmap *map = fda->priv[fd].ptr; |
1b85337d | 478 | |
4876075b WN |
479 | if (map) |
480 | perf_mmap__put(map); | |
70082dd9 | 481 | } |
70db7533 | 482 | |
1ddec7f0 ACM |
483 | int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) |
484 | { | |
e4b356b5 | 485 | return fdarray__filter(&evlist->pollfd, revents_and_mask, |
258e4bfc | 486 | perf_evlist__munmap_filtered, NULL); |
1ddec7f0 ACM |
487 | } |
488 | ||
f66a889d ACM |
489 | int perf_evlist__poll(struct perf_evlist *evlist, int timeout) |
490 | { | |
1b85337d | 491 | return fdarray__poll(&evlist->pollfd, timeout); |
f66a889d ACM |
492 | } |
493 | ||
a91e5431 ACM |
494 | static void perf_evlist__id_hash(struct perf_evlist *evlist, |
495 | struct perf_evsel *evsel, | |
496 | int cpu, int thread, u64 id) | |
3d3b5e95 ACM |
497 | { |
498 | int hash; | |
499 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
500 | ||
501 | sid->id = id; | |
502 | sid->evsel = evsel; | |
503 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | |
504 | hlist_add_head(&sid->node, &evlist->heads[hash]); | |
505 | } | |
506 | ||
a91e5431 ACM |
507 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
508 | int cpu, int thread, u64 id) | |
509 | { | |
510 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | |
511 | evsel->id[evsel->ids++] = id; | |
512 | } | |
513 | ||
1c59612d JO |
514 | int perf_evlist__id_add_fd(struct perf_evlist *evlist, |
515 | struct perf_evsel *evsel, | |
516 | int cpu, int thread, int fd) | |
f8a95309 | 517 | { |
f8a95309 | 518 | u64 read_data[4] = { 0, }; |
3d3b5e95 | 519 | int id_idx = 1; /* The first entry is the counter value */ |
e2b5abe0 JO |
520 | u64 id; |
521 | int ret; | |
522 | ||
523 | ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); | |
524 | if (!ret) | |
525 | goto add; | |
526 | ||
527 | if (errno != ENOTTY) | |
528 | return -1; | |
529 | ||
530 | /* Legacy way to get event id.. All hail to old kernels! */ | |
f8a95309 | 531 | |
c4861afe JO |
532 | /* |
533 | * This way does not work with group format read, so bail | |
534 | * out in that case. | |
535 | */ | |
536 | if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) | |
537 | return -1; | |
538 | ||
f8a95309 ACM |
539 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || |
540 | read(fd, &read_data, sizeof(read_data)) == -1) | |
541 | return -1; | |
542 | ||
543 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
544 | ++id_idx; | |
545 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
546 | ++id_idx; | |
547 | ||
e2b5abe0 JO |
548 | id = read_data[id_idx]; |
549 | ||
550 | add: | |
551 | perf_evlist__id_add(evlist, evsel, cpu, thread, id); | |
f8a95309 ACM |
552 | return 0; |
553 | } | |
554 | ||
3c659eed AH |
555 | static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, |
556 | struct perf_evsel *evsel, int idx, int cpu, | |
557 | int thread) | |
558 | { | |
559 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
560 | sid->idx = idx; | |
561 | if (evlist->cpus && cpu >= 0) | |
562 | sid->cpu = evlist->cpus->map[cpu]; | |
563 | else | |
564 | sid->cpu = -1; | |
565 | if (!evsel->system_wide && evlist->threads && thread >= 0) | |
e13798c7 | 566 | sid->tid = thread_map__pid(evlist->threads, thread); |
3c659eed AH |
567 | else |
568 | sid->tid = -1; | |
569 | } | |
570 | ||
932a3594 | 571 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) |
70db7533 ACM |
572 | { |
573 | struct hlist_head *head; | |
70db7533 ACM |
574 | struct perf_sample_id *sid; |
575 | int hash; | |
576 | ||
70db7533 ACM |
577 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
578 | head = &evlist->heads[hash]; | |
579 | ||
b67bfe0d | 580 | hlist_for_each_entry(sid, head, node) |
70db7533 | 581 | if (sid->id == id) |
932a3594 JO |
582 | return sid; |
583 | ||
584 | return NULL; | |
585 | } | |
586 | ||
587 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |
588 | { | |
589 | struct perf_sample_id *sid; | |
590 | ||
05169df5 | 591 | if (evlist->nr_entries == 1 || !id) |
932a3594 JO |
592 | return perf_evlist__first(evlist); |
593 | ||
594 | sid = perf_evlist__id2sid(evlist, id); | |
595 | if (sid) | |
596 | return sid->evsel; | |
30e68bcc NK |
597 | |
598 | if (!perf_evlist__sample_id_all(evlist)) | |
0c21f736 | 599 | return perf_evlist__first(evlist); |
30e68bcc | 600 | |
70db7533 ACM |
601 | return NULL; |
602 | } | |
04391deb | 603 | |
dddcf6ab AH |
604 | struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, |
605 | u64 id) | |
606 | { | |
607 | struct perf_sample_id *sid; | |
608 | ||
609 | if (!id) | |
610 | return NULL; | |
611 | ||
612 | sid = perf_evlist__id2sid(evlist, id); | |
613 | if (sid) | |
614 | return sid->evsel; | |
615 | ||
616 | return NULL; | |
617 | } | |
618 | ||
75562573 AH |
619 | static int perf_evlist__event2id(struct perf_evlist *evlist, |
620 | union perf_event *event, u64 *id) | |
621 | { | |
622 | const u64 *array = event->sample.array; | |
623 | ssize_t n; | |
624 | ||
625 | n = (event->header.size - sizeof(event->header)) >> 3; | |
626 | ||
627 | if (event->header.type == PERF_RECORD_SAMPLE) { | |
628 | if (evlist->id_pos >= n) | |
629 | return -1; | |
630 | *id = array[evlist->id_pos]; | |
631 | } else { | |
632 | if (evlist->is_pos > n) | |
633 | return -1; | |
634 | n -= evlist->is_pos; | |
635 | *id = array[n]; | |
636 | } | |
637 | return 0; | |
638 | } | |
639 | ||
7cb5c5ac JO |
640 | struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, |
641 | union perf_event *event) | |
75562573 | 642 | { |
98be6966 | 643 | struct perf_evsel *first = perf_evlist__first(evlist); |
75562573 AH |
644 | struct hlist_head *head; |
645 | struct perf_sample_id *sid; | |
646 | int hash; | |
647 | u64 id; | |
648 | ||
649 | if (evlist->nr_entries == 1) | |
98be6966 AH |
650 | return first; |
651 | ||
652 | if (!first->attr.sample_id_all && | |
653 | event->header.type != PERF_RECORD_SAMPLE) | |
654 | return first; | |
75562573 AH |
655 | |
656 | if (perf_evlist__event2id(evlist, event, &id)) | |
657 | return NULL; | |
658 | ||
659 | /* Synthesized events have an id of zero */ | |
660 | if (!id) | |
98be6966 | 661 | return first; |
75562573 AH |
662 | |
663 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | |
664 | head = &evlist->heads[hash]; | |
665 | ||
666 | hlist_for_each_entry(sid, head, node) { | |
667 | if (sid->id == id) | |
668 | return sid->evsel; | |
669 | } | |
670 | return NULL; | |
671 | } | |
672 | ||
65aea233 WN |
673 | static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) |
674 | { | |
675 | int i; | |
676 | ||
078c3386 WN |
677 | if (!evlist->backward_mmap) |
678 | return 0; | |
679 | ||
65aea233 | 680 | for (i = 0; i < evlist->nr_mmaps; i++) { |
078c3386 | 681 | int fd = evlist->backward_mmap[i].fd; |
65aea233 WN |
682 | int err; |
683 | ||
684 | if (fd < 0) | |
685 | continue; | |
686 | err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); | |
687 | if (err) | |
688 | return err; | |
689 | } | |
690 | return 0; | |
691 | } | |
692 | ||
f6cdff83 | 693 | static int perf_evlist__pause(struct perf_evlist *evlist) |
65aea233 WN |
694 | { |
695 | return perf_evlist__set_paused(evlist, true); | |
696 | } | |
697 | ||
f6cdff83 | 698 | static int perf_evlist__resume(struct perf_evlist *evlist) |
65aea233 WN |
699 | { |
700 | return perf_evlist__set_paused(evlist, false); | |
701 | } | |
702 | ||
b6b85dad | 703 | /* When check_messup is true, 'end' must points to a good entry */ |
0f4ccd11 | 704 | static union perf_event * |
b6b85dad WN |
705 | perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, |
706 | u64 end, u64 *prev) | |
04391deb | 707 | { |
04391deb | 708 | unsigned char *data = md->base + page_size; |
8115d60c | 709 | union perf_event *event = NULL; |
b6b85dad | 710 | int diff = end - start; |
04391deb | 711 | |
b6b85dad | 712 | if (check_messup) { |
04391deb | 713 | /* |
7bb41152 ACM |
714 | * If we're further behind than half the buffer, there's a chance |
715 | * the writer will bite our tail and mess up the samples under us. | |
716 | * | |
b6b85dad | 717 | * If we somehow ended up ahead of the 'end', we got messed up. |
7bb41152 | 718 | * |
b6b85dad | 719 | * In either case, truncate and restart at 'end'. |
04391deb | 720 | */ |
7bb41152 ACM |
721 | if (diff > md->mask / 2 || diff < 0) { |
722 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | |
723 | ||
724 | /* | |
b6b85dad | 725 | * 'end' points to a known good entry, start there. |
7bb41152 | 726 | */ |
b6b85dad | 727 | start = end; |
b04b7023 | 728 | diff = 0; |
7bb41152 | 729 | } |
04391deb ACM |
730 | } |
731 | ||
b04b7023 | 732 | if (diff >= (int)sizeof(event->header)) { |
04391deb ACM |
733 | size_t size; |
734 | ||
b6b85dad | 735 | event = (union perf_event *)&data[start & md->mask]; |
04391deb ACM |
736 | size = event->header.size; |
737 | ||
b04b7023 WN |
738 | if (size < sizeof(event->header) || diff < (int)size) { |
739 | event = NULL; | |
740 | goto broken_event; | |
741 | } | |
742 | ||
04391deb ACM |
743 | /* |
744 | * Event straddles the mmap boundary -- header should always | |
745 | * be inside due to u64 alignment of output. | |
746 | */ | |
b6b85dad WN |
747 | if ((start & md->mask) + size != ((start + size) & md->mask)) { |
748 | unsigned int offset = start; | |
04391deb | 749 | unsigned int len = min(sizeof(*event), size), cpy; |
a65cb4b9 | 750 | void *dst = md->event_copy; |
04391deb ACM |
751 | |
752 | do { | |
753 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
754 | memcpy(dst, &data[offset & md->mask], cpy); | |
755 | offset += cpy; | |
756 | dst += cpy; | |
757 | len -= cpy; | |
758 | } while (len); | |
759 | ||
a65cb4b9 | 760 | event = (union perf_event *) md->event_copy; |
04391deb ACM |
761 | } |
762 | ||
b6b85dad | 763 | start += size; |
04391deb ACM |
764 | } |
765 | ||
b04b7023 | 766 | broken_event: |
0f4ccd11 | 767 | if (prev) |
b6b85dad | 768 | *prev = start; |
7bb41152 | 769 | |
04391deb ACM |
770 | return event; |
771 | } | |
f8a95309 | 772 | |
8db6d6b1 | 773 | union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup) |
0f4ccd11 | 774 | { |
0f4ccd11 WN |
775 | u64 head; |
776 | u64 old = md->prev; | |
777 | ||
778 | /* | |
779 | * Check if event was unmapped due to a POLLHUP/POLLERR. | |
780 | */ | |
781 | if (!atomic_read(&md->refcnt)) | |
782 | return NULL; | |
783 | ||
784 | head = perf_mmap__read_head(md); | |
785 | ||
8db6d6b1 | 786 | return perf_mmap__read(md, check_messup, old, head, &md->prev); |
0f4ccd11 WN |
787 | } |
788 | ||
e24c7520 | 789 | union perf_event * |
8db6d6b1 | 790 | perf_mmap__read_backward(struct perf_mmap *md) |
e24c7520 | 791 | { |
e24c7520 WN |
792 | u64 head, end; |
793 | u64 start = md->prev; | |
794 | ||
795 | /* | |
796 | * Check if event was unmapped due to a POLLHUP/POLLERR. | |
797 | */ | |
798 | if (!atomic_read(&md->refcnt)) | |
799 | return NULL; | |
800 | ||
801 | head = perf_mmap__read_head(md); | |
802 | if (!head) | |
803 | return NULL; | |
804 | ||
805 | /* | |
806 | * 'head' pointer starts from 0. Kernel minus sizeof(record) form | |
807 | * it each time when kernel writes to it, so in fact 'head' is | |
808 | * negative. 'end' pointer is made manually by adding the size of | |
809 | * the ring buffer to 'head' pointer, means the validate data can | |
810 | * read is the whole ring buffer. If 'end' is positive, the ring | |
811 | * buffer has not fully filled, so we must adjust 'end' to 0. | |
812 | * | |
813 | * However, since both 'head' and 'end' is unsigned, we can't | |
814 | * simply compare 'end' against 0. Here we compare '-head' and | |
815 | * the size of the ring buffer, where -head is the number of bytes | |
816 | * kernel write to the ring buffer. | |
817 | */ | |
818 | if (-head < (u64)(md->mask + 1)) | |
819 | end = 0; | |
820 | else | |
821 | end = head + md->mask + 1; | |
822 | ||
823 | return perf_mmap__read(md, false, start, end, &md->prev); | |
824 | } | |
825 | ||
8db6d6b1 WN |
826 | union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) |
827 | { | |
828 | struct perf_mmap *md = &evlist->mmap[idx]; | |
829 | ||
830 | /* | |
831 | * Check messup is required for forward overwritable ring buffer: | |
832 | * memory pointed by md->prev can be overwritten in this case. | |
833 | * No need for read-write ring buffer: kernel stop outputting when | |
834 | * it hit md->prev (perf_mmap__consume()). | |
835 | */ | |
836 | return perf_mmap__read_forward(md, evlist->overwrite); | |
837 | } | |
838 | ||
839 | union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) | |
840 | { | |
841 | struct perf_mmap *md = &evlist->mmap[idx]; | |
842 | ||
843 | /* | |
844 | * No need to check messup for backward ring buffer: | |
845 | * We can always read arbitrary long data from a backward | |
846 | * ring buffer unless we forget to pause it before reading. | |
847 | */ | |
848 | return perf_mmap__read_backward(md); | |
849 | } | |
850 | ||
5a5ddeb6 WN |
851 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
852 | { | |
a0c6f451 | 853 | return perf_evlist__mmap_read_forward(evlist, idx); |
5a5ddeb6 WN |
854 | } |
855 | ||
8db6d6b1 | 856 | void perf_mmap__read_catchup(struct perf_mmap *md) |
e24c7520 | 857 | { |
e24c7520 WN |
858 | u64 head; |
859 | ||
860 | if (!atomic_read(&md->refcnt)) | |
861 | return; | |
862 | ||
863 | head = perf_mmap__read_head(md); | |
864 | md->prev = head; | |
865 | } | |
866 | ||
8db6d6b1 WN |
867 | void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) |
868 | { | |
869 | perf_mmap__read_catchup(&evlist->mmap[idx]); | |
870 | } | |
871 | ||
82396986 ACM |
872 | static bool perf_mmap__empty(struct perf_mmap *md) |
873 | { | |
b72e74d1 | 874 | return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; |
82396986 ACM |
875 | } |
876 | ||
8db6d6b1 | 877 | static void perf_mmap__get(struct perf_mmap *map) |
82396986 | 878 | { |
8db6d6b1 | 879 | atomic_inc(&map->refcnt); |
82396986 ACM |
880 | } |
881 | ||
8db6d6b1 | 882 | static void perf_mmap__put(struct perf_mmap *md) |
82396986 | 883 | { |
e10e4ef6 | 884 | BUG_ON(md->base && atomic_read(&md->refcnt) == 0); |
82396986 | 885 | |
e10e4ef6 | 886 | if (atomic_dec_and_test(&md->refcnt)) |
8db6d6b1 | 887 | perf_mmap__munmap(md); |
82396986 ACM |
888 | } |
889 | ||
8db6d6b1 WN |
890 | void perf_mmap__consume(struct perf_mmap *md, bool overwrite) |
891 | { | |
892 | if (!overwrite) { | |
7b8283b5 | 893 | u64 old = md->prev; |
8e50d384 ZZ |
894 | |
895 | perf_mmap__write_tail(md, old); | |
896 | } | |
82396986 | 897 | |
7143849a | 898 | if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) |
8db6d6b1 WN |
899 | perf_mmap__put(md); |
900 | } | |
901 | ||
902 | void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) | |
903 | { | |
904 | perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); | |
8e50d384 ZZ |
905 | } |
906 | ||
718c602d AH |
907 | int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, |
908 | struct auxtrace_mmap_params *mp __maybe_unused, | |
909 | void *userpg __maybe_unused, | |
910 | int fd __maybe_unused) | |
911 | { | |
912 | return 0; | |
913 | } | |
914 | ||
915 | void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) | |
916 | { | |
917 | } | |
918 | ||
919 | void __weak auxtrace_mmap_params__init( | |
920 | struct auxtrace_mmap_params *mp __maybe_unused, | |
921 | off_t auxtrace_offset __maybe_unused, | |
922 | unsigned int auxtrace_pages __maybe_unused, | |
923 | bool auxtrace_overwrite __maybe_unused) | |
924 | { | |
925 | } | |
926 | ||
927 | void __weak auxtrace_mmap_params__set_idx( | |
928 | struct auxtrace_mmap_params *mp __maybe_unused, | |
929 | struct perf_evlist *evlist __maybe_unused, | |
930 | int idx __maybe_unused, | |
931 | bool per_cpu __maybe_unused) | |
932 | { | |
933 | } | |
934 | ||
8db6d6b1 | 935 | static void perf_mmap__munmap(struct perf_mmap *map) |
93edcbd9 | 936 | { |
8db6d6b1 WN |
937 | if (map->base != NULL) { |
938 | munmap(map->base, perf_mmap__mmap_len(map)); | |
939 | map->base = NULL; | |
940 | map->fd = -1; | |
941 | atomic_set(&map->refcnt, 0); | |
93edcbd9 | 942 | } |
8db6d6b1 WN |
943 | auxtrace_mmap__munmap(&map->auxtrace_mmap); |
944 | } | |
945 | ||
a1f72618 | 946 | static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) |
f8a95309 | 947 | { |
aece948f | 948 | int i; |
f8a95309 | 949 | |
b2cb615d WN |
950 | if (evlist->mmap) |
951 | for (i = 0; i < evlist->nr_mmaps; i++) | |
952 | perf_mmap__munmap(&evlist->mmap[i]); | |
983874d1 | 953 | |
b2cb615d WN |
954 | if (evlist->backward_mmap) |
955 | for (i = 0; i < evlist->nr_mmaps; i++) | |
956 | perf_mmap__munmap(&evlist->backward_mmap[i]); | |
a1f72618 | 957 | } |
aece948f | 958 | |
a1f72618 WN |
959 | void perf_evlist__munmap(struct perf_evlist *evlist) |
960 | { | |
961 | perf_evlist__munmap_nofree(evlist); | |
04662523 | 962 | zfree(&evlist->mmap); |
b2cb615d | 963 | zfree(&evlist->backward_mmap); |
f8a95309 ACM |
964 | } |
965 | ||
8db6d6b1 | 966 | static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
f8a95309 | 967 | { |
d4c6fb36 | 968 | int i; |
8db6d6b1 | 969 | struct perf_mmap *map; |
d4c6fb36 | 970 | |
a14bb7a6 | 971 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
ec1e7e43 | 972 | if (cpu_map__empty(evlist->cpus)) |
b3a319d5 | 973 | evlist->nr_mmaps = thread_map__nr(evlist->threads); |
8db6d6b1 WN |
974 | map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
975 | if (!map) | |
976 | return NULL; | |
946ae1d4 | 977 | |
d4c6fb36 | 978 | for (i = 0; i < evlist->nr_mmaps; i++) |
8db6d6b1 WN |
979 | map[i].fd = -1; |
980 | return map; | |
f8a95309 ACM |
981 | } |
982 | ||
a8a8f3eb AH |
983 | struct mmap_params { |
984 | int prot; | |
985 | int mask; | |
718c602d | 986 | struct auxtrace_mmap_params auxtrace_mp; |
a8a8f3eb AH |
987 | }; |
988 | ||
8db6d6b1 WN |
989 | static int perf_mmap__mmap(struct perf_mmap *map, |
990 | struct mmap_params *mp, int fd) | |
f8a95309 | 991 | { |
82396986 ACM |
992 | /* |
993 | * The last one will be done at perf_evlist__mmap_consume(), so that we | |
994 | * make sure we don't prevent tools from consuming every last event in | |
995 | * the ring buffer. | |
996 | * | |
997 | * I.e. we can get the POLLHUP meaning that the fd doesn't exist | |
998 | * anymore, but the last events for it are still in the ring buffer, | |
999 | * waiting to be consumed. | |
1000 | * | |
1001 | * Tools can chose to ignore this at their own discretion, but the | |
1002 | * evlist layer can't just drop it when filtering events in | |
1003 | * perf_evlist__filter_pollfd(). | |
1004 | */ | |
8db6d6b1 WN |
1005 | atomic_set(&map->refcnt, 2); |
1006 | map->prev = 0; | |
1007 | map->mask = mp->mask; | |
1008 | map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, | |
1009 | MAP_SHARED, fd, 0); | |
1010 | if (map->base == MAP_FAILED) { | |
02635965 AH |
1011 | pr_debug2("failed to mmap perf event ring buffer, error %d\n", |
1012 | errno); | |
8db6d6b1 | 1013 | map->base = NULL; |
f8a95309 | 1014 | return -1; |
301b195d | 1015 | } |
8db6d6b1 | 1016 | map->fd = fd; |
ad6765dd | 1017 | |
8db6d6b1 WN |
1018 | if (auxtrace_mmap__mmap(&map->auxtrace_mmap, |
1019 | &mp->auxtrace_mp, map->base, fd)) | |
718c602d AH |
1020 | return -1; |
1021 | ||
f8a95309 ACM |
1022 | return 0; |
1023 | } | |
1024 | ||
f3058a1c WN |
1025 | static bool |
1026 | perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, | |
1027 | struct perf_evsel *evsel) | |
1028 | { | |
32a951b4 | 1029 | if (evsel->attr.write_backward) |
f3058a1c WN |
1030 | return false; |
1031 | return true; | |
1032 | } | |
1033 | ||
04e21314 | 1034 | static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, |
a8a8f3eb | 1035 | struct mmap_params *mp, int cpu, |
078c3386 | 1036 | int thread, int *_output, int *_output_backward) |
aece948f ACM |
1037 | { |
1038 | struct perf_evsel *evsel; | |
f3058a1c | 1039 | int revent; |
04e21314 | 1040 | |
e5cadb93 | 1041 | evlist__for_each_entry(evlist, evsel) { |
078c3386 WN |
1042 | struct perf_mmap *maps = evlist->mmap; |
1043 | int *output = _output; | |
bf8e8f4b AH |
1044 | int fd; |
1045 | ||
078c3386 WN |
1046 | if (evsel->attr.write_backward) { |
1047 | output = _output_backward; | |
1048 | maps = evlist->backward_mmap; | |
1049 | ||
1050 | if (!maps) { | |
1051 | maps = perf_evlist__alloc_mmap(evlist); | |
1052 | if (!maps) | |
1053 | return -1; | |
1054 | evlist->backward_mmap = maps; | |
54cc54de WN |
1055 | if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) |
1056 | perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); | |
078c3386 WN |
1057 | } |
1058 | } | |
f3058a1c | 1059 | |
bf8e8f4b AH |
1060 | if (evsel->system_wide && thread) |
1061 | continue; | |
1062 | ||
1063 | fd = FD(evsel, cpu, thread); | |
04e21314 AH |
1064 | |
1065 | if (*output == -1) { | |
1066 | *output = fd; | |
078c3386 WN |
1067 | |
1068 | if (perf_mmap__mmap(&maps[idx], mp, *output) < 0) | |
04e21314 AH |
1069 | return -1; |
1070 | } else { | |
1071 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) | |
1072 | return -1; | |
82396986 | 1073 | |
078c3386 | 1074 | perf_mmap__get(&maps[idx]); |
04e21314 AH |
1075 | } |
1076 | ||
f3058a1c WN |
1077 | revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; |
1078 | ||
f90d194a AH |
1079 | /* |
1080 | * The system_wide flag causes a selected event to be opened | |
1081 | * always without a pid. Consequently it will never get a | |
1082 | * POLLHUP, but it is used for tracking in combination with | |
1083 | * other events, so it should not need to be polled anyway. | |
1084 | * Therefore don't add it for polling. | |
1085 | */ | |
1086 | if (!evsel->system_wide && | |
078c3386 WN |
1087 | __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { |
1088 | perf_mmap__put(&maps[idx]); | |
ad6765dd | 1089 | return -1; |
82396986 | 1090 | } |
033fa713 | 1091 | |
3c659eed AH |
1092 | if (evsel->attr.read_format & PERF_FORMAT_ID) { |
1093 | if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, | |
1094 | fd) < 0) | |
1095 | return -1; | |
1096 | perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, | |
1097 | thread); | |
1098 | } | |
04e21314 AH |
1099 | } |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
a8a8f3eb AH |
1104 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, |
1105 | struct mmap_params *mp) | |
04e21314 | 1106 | { |
aece948f | 1107 | int cpu, thread; |
b3a319d5 NK |
1108 | int nr_cpus = cpu_map__nr(evlist->cpus); |
1109 | int nr_threads = thread_map__nr(evlist->threads); | |
aece948f | 1110 | |
e3e1a54f | 1111 | pr_debug2("perf event ring buffer mmapped per cpu\n"); |
b3a319d5 | 1112 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
aece948f | 1113 | int output = -1; |
078c3386 | 1114 | int output_backward = -1; |
aece948f | 1115 | |
718c602d AH |
1116 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, |
1117 | true); | |
1118 | ||
b3a319d5 | 1119 | for (thread = 0; thread < nr_threads; thread++) { |
a8a8f3eb | 1120 | if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, |
078c3386 | 1121 | thread, &output, &output_backward)) |
04e21314 | 1122 | goto out_unmap; |
aece948f ACM |
1123 | } |
1124 | } | |
1125 | ||
1126 | return 0; | |
1127 | ||
1128 | out_unmap: | |
a1f72618 | 1129 | perf_evlist__munmap_nofree(evlist); |
aece948f ACM |
1130 | return -1; |
1131 | } | |
1132 | ||
a8a8f3eb AH |
1133 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, |
1134 | struct mmap_params *mp) | |
aece948f | 1135 | { |
aece948f | 1136 | int thread; |
b3a319d5 | 1137 | int nr_threads = thread_map__nr(evlist->threads); |
aece948f | 1138 | |
e3e1a54f | 1139 | pr_debug2("perf event ring buffer mmapped per thread\n"); |
b3a319d5 | 1140 | for (thread = 0; thread < nr_threads; thread++) { |
aece948f | 1141 | int output = -1; |
078c3386 | 1142 | int output_backward = -1; |
aece948f | 1143 | |
718c602d AH |
1144 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, |
1145 | false); | |
1146 | ||
a8a8f3eb | 1147 | if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, |
078c3386 | 1148 | &output, &output_backward)) |
04e21314 | 1149 | goto out_unmap; |
aece948f ACM |
1150 | } |
1151 | ||
1152 | return 0; | |
1153 | ||
1154 | out_unmap: | |
a1f72618 | 1155 | perf_evlist__munmap_nofree(evlist); |
aece948f ACM |
1156 | return -1; |
1157 | } | |
1158 | ||
f5e7150c | 1159 | unsigned long perf_event_mlock_kb_in_pages(void) |
994a1f78 | 1160 | { |
f5e7150c ACM |
1161 | unsigned long pages; |
1162 | int max; | |
8185e881 | 1163 | |
f5e7150c ACM |
1164 | if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { |
1165 | /* | |
1166 | * Pick a once upon a time good value, i.e. things look | |
1167 | * strange since we can't read a sysctl value, but lets not | |
1168 | * die yet... | |
1169 | */ | |
1170 | max = 512; | |
1171 | } else { | |
1172 | max -= (page_size / 1024); | |
1173 | } | |
8185e881 | 1174 | |
f5e7150c ACM |
1175 | pages = (max * 1024) / page_size; |
1176 | if (!is_power_of_2(pages)) | |
1177 | pages = rounddown_pow_of_two(pages); | |
1178 | ||
1179 | return pages; | |
1180 | } | |
1181 | ||
1182 | static size_t perf_evlist__mmap_size(unsigned long pages) | |
1183 | { | |
1184 | if (pages == UINT_MAX) | |
1185 | pages = perf_event_mlock_kb_in_pages(); | |
1186 | else if (!is_power_of_2(pages)) | |
994a1f78 JO |
1187 | return 0; |
1188 | ||
1189 | return (pages + 1) * page_size; | |
1190 | } | |
1191 | ||
33c2dcfd DA |
1192 | static long parse_pages_arg(const char *str, unsigned long min, |
1193 | unsigned long max) | |
994a1f78 | 1194 | { |
2fbe4abe | 1195 | unsigned long pages, val; |
27050f53 JO |
1196 | static struct parse_tag tags[] = { |
1197 | { .tag = 'B', .mult = 1 }, | |
1198 | { .tag = 'K', .mult = 1 << 10 }, | |
1199 | { .tag = 'M', .mult = 1 << 20 }, | |
1200 | { .tag = 'G', .mult = 1 << 30 }, | |
1201 | { .tag = 0 }, | |
1202 | }; | |
994a1f78 | 1203 | |
8973504b | 1204 | if (str == NULL) |
33c2dcfd | 1205 | return -EINVAL; |
8973504b | 1206 | |
27050f53 | 1207 | val = parse_tag_value(str, tags); |
2fbe4abe | 1208 | if (val != (unsigned long) -1) { |
27050f53 JO |
1209 | /* we got file size value */ |
1210 | pages = PERF_ALIGN(val, page_size) / page_size; | |
27050f53 JO |
1211 | } else { |
1212 | /* we got pages count value */ | |
1213 | char *eptr; | |
1214 | pages = strtoul(str, &eptr, 10); | |
33c2dcfd DA |
1215 | if (*eptr != '\0') |
1216 | return -EINVAL; | |
994a1f78 JO |
1217 | } |
1218 | ||
2bcab6c1 | 1219 | if (pages == 0 && min == 0) { |
33c2dcfd | 1220 | /* leave number of pages at 0 */ |
1dbfa938 | 1221 | } else if (!is_power_of_2(pages)) { |
33c2dcfd | 1222 | /* round pages up to next power of 2 */ |
91529834 | 1223 | pages = roundup_pow_of_two(pages); |
1dbfa938 AH |
1224 | if (!pages) |
1225 | return -EINVAL; | |
9639837e DA |
1226 | pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", |
1227 | pages * page_size, pages); | |
2fbe4abe AH |
1228 | } |
1229 | ||
33c2dcfd DA |
1230 | if (pages > max) |
1231 | return -EINVAL; | |
1232 | ||
1233 | return pages; | |
1234 | } | |
1235 | ||
e9db1310 | 1236 | int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) |
33c2dcfd | 1237 | { |
33c2dcfd DA |
1238 | unsigned long max = UINT_MAX; |
1239 | long pages; | |
1240 | ||
f5ae9c42 | 1241 | if (max > SIZE_MAX / page_size) |
33c2dcfd DA |
1242 | max = SIZE_MAX / page_size; |
1243 | ||
1244 | pages = parse_pages_arg(str, 1, max); | |
1245 | if (pages < 0) { | |
1246 | pr_err("Invalid argument for --mmap_pages/-m\n"); | |
994a1f78 JO |
1247 | return -1; |
1248 | } | |
1249 | ||
1250 | *mmap_pages = pages; | |
1251 | return 0; | |
1252 | } | |
1253 | ||
e9db1310 AH |
1254 | int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, |
1255 | int unset __maybe_unused) | |
1256 | { | |
1257 | return __perf_evlist__parse_mmap_pages(opt->value, str); | |
1258 | } | |
1259 | ||
c83fa7f2 | 1260 | /** |
718c602d | 1261 | * perf_evlist__mmap_ex - Create mmaps to receive events. |
c83fa7f2 AH |
1262 | * @evlist: list of events |
1263 | * @pages: map length in pages | |
1264 | * @overwrite: overwrite older events? | |
718c602d AH |
1265 | * @auxtrace_pages - auxtrace map length in pages |
1266 | * @auxtrace_overwrite - overwrite older auxtrace data? | |
f8a95309 | 1267 | * |
c83fa7f2 AH |
1268 | * If @overwrite is %false the user needs to signal event consumption using |
1269 | * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this | |
1270 | * automatically. | |
7e2ed097 | 1271 | * |
718c602d AH |
1272 | * Similarly, if @auxtrace_overwrite is %false the user needs to signal data |
1273 | * consumption using auxtrace_mmap__write_tail(). | |
1274 | * | |
c83fa7f2 | 1275 | * Return: %0 on success, negative error code otherwise. |
f8a95309 | 1276 | */ |
718c602d AH |
1277 | int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, |
1278 | bool overwrite, unsigned int auxtrace_pages, | |
1279 | bool auxtrace_overwrite) | |
f8a95309 | 1280 | { |
aece948f | 1281 | struct perf_evsel *evsel; |
7e2ed097 ACM |
1282 | const struct cpu_map *cpus = evlist->cpus; |
1283 | const struct thread_map *threads = evlist->threads; | |
a8a8f3eb AH |
1284 | struct mmap_params mp = { |
1285 | .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), | |
1286 | }; | |
50a682ce | 1287 | |
8db6d6b1 WN |
1288 | if (!evlist->mmap) |
1289 | evlist->mmap = perf_evlist__alloc_mmap(evlist); | |
1290 | if (!evlist->mmap) | |
f8a95309 ACM |
1291 | return -ENOMEM; |
1292 | ||
1b85337d | 1293 | if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
f8a95309 ACM |
1294 | return -ENOMEM; |
1295 | ||
1296 | evlist->overwrite = overwrite; | |
994a1f78 | 1297 | evlist->mmap_len = perf_evlist__mmap_size(pages); |
2af68ef5 | 1298 | pr_debug("mmap size %zuB\n", evlist->mmap_len); |
a8a8f3eb | 1299 | mp.mask = evlist->mmap_len - page_size - 1; |
f8a95309 | 1300 | |
718c602d AH |
1301 | auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, |
1302 | auxtrace_pages, auxtrace_overwrite); | |
1303 | ||
e5cadb93 | 1304 | evlist__for_each_entry(evlist, evsel) { |
f8a95309 | 1305 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
a91e5431 | 1306 | evsel->sample_id == NULL && |
a14bb7a6 | 1307 | perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) |
f8a95309 | 1308 | return -ENOMEM; |
f8a95309 ACM |
1309 | } |
1310 | ||
ec1e7e43 | 1311 | if (cpu_map__empty(cpus)) |
a8a8f3eb | 1312 | return perf_evlist__mmap_per_thread(evlist, &mp); |
f8a95309 | 1313 | |
a8a8f3eb | 1314 | return perf_evlist__mmap_per_cpu(evlist, &mp); |
f8a95309 | 1315 | } |
7e2ed097 | 1316 | |
718c602d AH |
1317 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, |
1318 | bool overwrite) | |
1319 | { | |
1320 | return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); | |
1321 | } | |
1322 | ||
602ad878 | 1323 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) |
7e2ed097 | 1324 | { |
74bfd2b2 AH |
1325 | struct cpu_map *cpus; |
1326 | struct thread_map *threads; | |
7e2ed097 | 1327 | |
74bfd2b2 | 1328 | threads = thread_map__new_str(target->pid, target->tid, target->uid); |
7e2ed097 | 1329 | |
74bfd2b2 | 1330 | if (!threads) |
7e2ed097 ACM |
1331 | return -1; |
1332 | ||
9c105fbc | 1333 | if (target__uses_dummy_map(target)) |
74bfd2b2 | 1334 | cpus = cpu_map__dummy_new(); |
879d77d0 | 1335 | else |
74bfd2b2 | 1336 | cpus = cpu_map__new(target->cpu_list); |
7e2ed097 | 1337 | |
74bfd2b2 | 1338 | if (!cpus) |
7e2ed097 ACM |
1339 | goto out_delete_threads; |
1340 | ||
ec9a77a7 AH |
1341 | evlist->has_user_cpus = !!target->cpu_list; |
1342 | ||
74bfd2b2 | 1343 | perf_evlist__set_maps(evlist, cpus, threads); |
d5bc056e AH |
1344 | |
1345 | return 0; | |
7e2ed097 ACM |
1346 | |
1347 | out_delete_threads: | |
74bfd2b2 | 1348 | thread_map__put(threads); |
7e2ed097 ACM |
1349 | return -1; |
1350 | } | |
1351 | ||
d5bc056e AH |
1352 | void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, |
1353 | struct thread_map *threads) | |
3de5cfb0 | 1354 | { |
934e0f20 AH |
1355 | /* |
1356 | * Allow for the possibility that one or another of the maps isn't being | |
1357 | * changed i.e. don't put it. Note we are assuming the maps that are | |
1358 | * being applied are brand new and evlist is taking ownership of the | |
1359 | * original reference count of 1. If that is not the case it is up to | |
1360 | * the caller to increase the reference count. | |
1361 | */ | |
1362 | if (cpus != evlist->cpus) { | |
3de5cfb0 | 1363 | cpu_map__put(evlist->cpus); |
a55e5663 | 1364 | evlist->cpus = cpu_map__get(cpus); |
934e0f20 | 1365 | } |
3de5cfb0 | 1366 | |
934e0f20 | 1367 | if (threads != evlist->threads) { |
3de5cfb0 | 1368 | thread_map__put(evlist->threads); |
a55e5663 | 1369 | evlist->threads = thread_map__get(threads); |
934e0f20 | 1370 | } |
3de5cfb0 | 1371 | |
ec9a77a7 | 1372 | perf_evlist__propagate_maps(evlist); |
3de5cfb0 JO |
1373 | } |
1374 | ||
22c8a376 ACM |
1375 | void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, |
1376 | enum perf_event_sample_format bit) | |
1377 | { | |
1378 | struct perf_evsel *evsel; | |
1379 | ||
e5cadb93 | 1380 | evlist__for_each_entry(evlist, evsel) |
22c8a376 ACM |
1381 | __perf_evsel__set_sample_bit(evsel, bit); |
1382 | } | |
1383 | ||
1384 | void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, | |
1385 | enum perf_event_sample_format bit) | |
1386 | { | |
1387 | struct perf_evsel *evsel; | |
1388 | ||
e5cadb93 | 1389 | evlist__for_each_entry(evlist, evsel) |
22c8a376 ACM |
1390 | __perf_evsel__reset_sample_bit(evsel, bit); |
1391 | } | |
1392 | ||
23d4aad4 | 1393 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) |
0a102479 | 1394 | { |
0a102479 | 1395 | struct perf_evsel *evsel; |
745cefc5 ACM |
1396 | int err = 0; |
1397 | const int ncpus = cpu_map__nr(evlist->cpus), | |
b3a319d5 | 1398 | nthreads = thread_map__nr(evlist->threads); |
0a102479 | 1399 | |
e5cadb93 | 1400 | evlist__for_each_entry(evlist, evsel) { |
745cefc5 | 1401 | if (evsel->filter == NULL) |
0a102479 | 1402 | continue; |
745cefc5 | 1403 | |
d988d5ee KL |
1404 | /* |
1405 | * filters only work for tracepoint event, which doesn't have cpu limit. | |
1406 | * So evlist and evsel should always be same. | |
1407 | */ | |
f47805a2 | 1408 | err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter); |
23d4aad4 ACM |
1409 | if (err) { |
1410 | *err_evsel = evsel; | |
745cefc5 | 1411 | break; |
23d4aad4 | 1412 | } |
0a102479 FW |
1413 | } |
1414 | ||
745cefc5 ACM |
1415 | return err; |
1416 | } | |
1417 | ||
1418 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) | |
1419 | { | |
1420 | struct perf_evsel *evsel; | |
1421 | int err = 0; | |
745cefc5 | 1422 | |
e5cadb93 | 1423 | evlist__for_each_entry(evlist, evsel) { |
fdf14720 WN |
1424 | if (evsel->attr.type != PERF_TYPE_TRACEPOINT) |
1425 | continue; | |
1426 | ||
94ad89bc | 1427 | err = perf_evsel__set_filter(evsel, filter); |
745cefc5 ACM |
1428 | if (err) |
1429 | break; | |
1430 | } | |
1431 | ||
1432 | return err; | |
0a102479 | 1433 | } |
74429964 | 1434 | |
be199ada | 1435 | int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) |
cfd70a26 ACM |
1436 | { |
1437 | char *filter; | |
be199ada ACM |
1438 | int ret = -1; |
1439 | size_t i; | |
cfd70a26 | 1440 | |
be199ada ACM |
1441 | for (i = 0; i < npids; ++i) { |
1442 | if (i == 0) { | |
1443 | if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) | |
1444 | return -1; | |
1445 | } else { | |
1446 | char *tmp; | |
1447 | ||
1448 | if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) | |
1449 | goto out_free; | |
1450 | ||
1451 | free(filter); | |
1452 | filter = tmp; | |
1453 | } | |
1454 | } | |
cfd70a26 ACM |
1455 | |
1456 | ret = perf_evlist__set_filter(evlist, filter); | |
be199ada | 1457 | out_free: |
cfd70a26 ACM |
1458 | free(filter); |
1459 | return ret; | |
1460 | } | |
1461 | ||
be199ada ACM |
1462 | int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) |
1463 | { | |
1464 | return perf_evlist__set_filter_pids(evlist, 1, &pid); | |
1465 | } | |
1466 | ||
0c21f736 | 1467 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
74429964 | 1468 | { |
75562573 | 1469 | struct perf_evsel *pos; |
c2a70653 | 1470 | |
75562573 AH |
1471 | if (evlist->nr_entries == 1) |
1472 | return true; | |
1473 | ||
1474 | if (evlist->id_pos < 0 || evlist->is_pos < 0) | |
1475 | return false; | |
1476 | ||
e5cadb93 | 1477 | evlist__for_each_entry(evlist, pos) { |
75562573 AH |
1478 | if (pos->id_pos != evlist->id_pos || |
1479 | pos->is_pos != evlist->is_pos) | |
c2a70653 | 1480 | return false; |
74429964 FW |
1481 | } |
1482 | ||
c2a70653 | 1483 | return true; |
74429964 FW |
1484 | } |
1485 | ||
75562573 | 1486 | u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) |
c2a70653 | 1487 | { |
75562573 AH |
1488 | struct perf_evsel *evsel; |
1489 | ||
1490 | if (evlist->combined_sample_type) | |
1491 | return evlist->combined_sample_type; | |
1492 | ||
e5cadb93 | 1493 | evlist__for_each_entry(evlist, evsel) |
75562573 AH |
1494 | evlist->combined_sample_type |= evsel->attr.sample_type; |
1495 | ||
1496 | return evlist->combined_sample_type; | |
1497 | } | |
1498 | ||
1499 | u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) | |
1500 | { | |
1501 | evlist->combined_sample_type = 0; | |
1502 | return __perf_evlist__combined_sample_type(evlist); | |
c2a70653 ACM |
1503 | } |
1504 | ||
98df858e AK |
1505 | u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) |
1506 | { | |
1507 | struct perf_evsel *evsel; | |
1508 | u64 branch_type = 0; | |
1509 | ||
e5cadb93 | 1510 | evlist__for_each_entry(evlist, evsel) |
98df858e AK |
1511 | branch_type |= evsel->attr.branch_sample_type; |
1512 | return branch_type; | |
1513 | } | |
1514 | ||
9ede473c JO |
1515 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist) |
1516 | { | |
1517 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; | |
1518 | u64 read_format = first->attr.read_format; | |
1519 | u64 sample_type = first->attr.sample_type; | |
1520 | ||
e5cadb93 | 1521 | evlist__for_each_entry(evlist, pos) { |
9ede473c JO |
1522 | if (read_format != pos->attr.read_format) |
1523 | return false; | |
1524 | } | |
1525 | ||
1526 | /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ | |
1527 | if ((sample_type & PERF_SAMPLE_READ) && | |
1528 | !(read_format & PERF_FORMAT_ID)) { | |
1529 | return false; | |
1530 | } | |
1531 | ||
1532 | return true; | |
1533 | } | |
1534 | ||
1535 | u64 perf_evlist__read_format(struct perf_evlist *evlist) | |
1536 | { | |
1537 | struct perf_evsel *first = perf_evlist__first(evlist); | |
1538 | return first->attr.read_format; | |
1539 | } | |
1540 | ||
0c21f736 | 1541 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
81e36bff | 1542 | { |
0c21f736 | 1543 | struct perf_evsel *first = perf_evlist__first(evlist); |
81e36bff ACM |
1544 | struct perf_sample *data; |
1545 | u64 sample_type; | |
1546 | u16 size = 0; | |
1547 | ||
81e36bff ACM |
1548 | if (!first->attr.sample_id_all) |
1549 | goto out; | |
1550 | ||
1551 | sample_type = first->attr.sample_type; | |
1552 | ||
1553 | if (sample_type & PERF_SAMPLE_TID) | |
1554 | size += sizeof(data->tid) * 2; | |
1555 | ||
1556 | if (sample_type & PERF_SAMPLE_TIME) | |
1557 | size += sizeof(data->time); | |
1558 | ||
1559 | if (sample_type & PERF_SAMPLE_ID) | |
1560 | size += sizeof(data->id); | |
1561 | ||
1562 | if (sample_type & PERF_SAMPLE_STREAM_ID) | |
1563 | size += sizeof(data->stream_id); | |
1564 | ||
1565 | if (sample_type & PERF_SAMPLE_CPU) | |
1566 | size += sizeof(data->cpu) * 2; | |
75562573 AH |
1567 | |
1568 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | |
1569 | size += sizeof(data->id); | |
81e36bff ACM |
1570 | out: |
1571 | return size; | |
1572 | } | |
1573 | ||
0c21f736 | 1574 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) |
74429964 | 1575 | { |
0c21f736 | 1576 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
c2a70653 | 1577 | |
e5cadb93 | 1578 | evlist__for_each_entry_continue(evlist, pos) { |
c2a70653 ACM |
1579 | if (first->attr.sample_id_all != pos->attr.sample_id_all) |
1580 | return false; | |
74429964 FW |
1581 | } |
1582 | ||
c2a70653 ACM |
1583 | return true; |
1584 | } | |
1585 | ||
0c21f736 | 1586 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist) |
c2a70653 | 1587 | { |
0c21f736 | 1588 | struct perf_evsel *first = perf_evlist__first(evlist); |
c2a70653 | 1589 | return first->attr.sample_id_all; |
74429964 | 1590 | } |
81cce8de ACM |
1591 | |
1592 | void perf_evlist__set_selected(struct perf_evlist *evlist, | |
1593 | struct perf_evsel *evsel) | |
1594 | { | |
1595 | evlist->selected = evsel; | |
1596 | } | |
727ab04e | 1597 | |
a74b4b66 NK |
1598 | void perf_evlist__close(struct perf_evlist *evlist) |
1599 | { | |
1600 | struct perf_evsel *evsel; | |
1601 | int ncpus = cpu_map__nr(evlist->cpus); | |
1602 | int nthreads = thread_map__nr(evlist->threads); | |
8ad9219e | 1603 | int n; |
a74b4b66 | 1604 | |
e5cadb93 | 1605 | evlist__for_each_entry_reverse(evlist, evsel) { |
8ad9219e SE |
1606 | n = evsel->cpus ? evsel->cpus->nr : ncpus; |
1607 | perf_evsel__close(evsel, n, nthreads); | |
1608 | } | |
a74b4b66 NK |
1609 | } |
1610 | ||
4112eb18 ACM |
1611 | static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) |
1612 | { | |
8c0498b6 AH |
1613 | struct cpu_map *cpus; |
1614 | struct thread_map *threads; | |
4112eb18 ACM |
1615 | int err = -ENOMEM; |
1616 | ||
1617 | /* | |
1618 | * Try reading /sys/devices/system/cpu/online to get | |
1619 | * an all cpus map. | |
1620 | * | |
1621 | * FIXME: -ENOMEM is the best we can do here, the cpu_map | |
1622 | * code needs an overhaul to properly forward the | |
1623 | * error, and we may not want to do that fallback to a | |
1624 | * default cpu identity map :-\ | |
1625 | */ | |
8c0498b6 AH |
1626 | cpus = cpu_map__new(NULL); |
1627 | if (!cpus) | |
4112eb18 ACM |
1628 | goto out; |
1629 | ||
8c0498b6 AH |
1630 | threads = thread_map__new_dummy(); |
1631 | if (!threads) | |
1632 | goto out_put; | |
4112eb18 | 1633 | |
8c0498b6 | 1634 | perf_evlist__set_maps(evlist, cpus, threads); |
4112eb18 ACM |
1635 | out: |
1636 | return err; | |
8c0498b6 AH |
1637 | out_put: |
1638 | cpu_map__put(cpus); | |
4112eb18 ACM |
1639 | goto out; |
1640 | } | |
1641 | ||
6a4bb04c | 1642 | int perf_evlist__open(struct perf_evlist *evlist) |
727ab04e | 1643 | { |
6a4bb04c | 1644 | struct perf_evsel *evsel; |
a74b4b66 | 1645 | int err; |
727ab04e | 1646 | |
4112eb18 ACM |
1647 | /* |
1648 | * Default: one fd per CPU, all threads, aka systemwide | |
1649 | * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL | |
1650 | */ | |
1651 | if (evlist->threads == NULL && evlist->cpus == NULL) { | |
1652 | err = perf_evlist__create_syswide_maps(evlist); | |
1653 | if (err < 0) | |
1654 | goto out_err; | |
1655 | } | |
1656 | ||
733cd2fe AH |
1657 | perf_evlist__update_id_pos(evlist); |
1658 | ||
e5cadb93 | 1659 | evlist__for_each_entry(evlist, evsel) { |
23df7f79 | 1660 | err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); |
727ab04e ACM |
1661 | if (err < 0) |
1662 | goto out_err; | |
1663 | } | |
1664 | ||
1665 | return 0; | |
1666 | out_err: | |
a74b4b66 | 1667 | perf_evlist__close(evlist); |
41c21a68 | 1668 | errno = -err; |
727ab04e ACM |
1669 | return err; |
1670 | } | |
35b9d88e | 1671 | |
602ad878 | 1672 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, |
55e162ea | 1673 | const char *argv[], bool pipe_output, |
735f7e0b | 1674 | void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) |
35b9d88e ACM |
1675 | { |
1676 | int child_ready_pipe[2], go_pipe[2]; | |
1677 | char bf; | |
1678 | ||
1679 | if (pipe(child_ready_pipe) < 0) { | |
1680 | perror("failed to create 'ready' pipe"); | |
1681 | return -1; | |
1682 | } | |
1683 | ||
1684 | if (pipe(go_pipe) < 0) { | |
1685 | perror("failed to create 'go' pipe"); | |
1686 | goto out_close_ready_pipe; | |
1687 | } | |
1688 | ||
1689 | evlist->workload.pid = fork(); | |
1690 | if (evlist->workload.pid < 0) { | |
1691 | perror("failed to fork"); | |
1692 | goto out_close_pipes; | |
1693 | } | |
1694 | ||
1695 | if (!evlist->workload.pid) { | |
5f1c4225 ACM |
1696 | int ret; |
1697 | ||
119fa3c9 | 1698 | if (pipe_output) |
35b9d88e ACM |
1699 | dup2(2, 1); |
1700 | ||
0817df08 DA |
1701 | signal(SIGTERM, SIG_DFL); |
1702 | ||
35b9d88e ACM |
1703 | close(child_ready_pipe[0]); |
1704 | close(go_pipe[1]); | |
1705 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); | |
1706 | ||
35b9d88e ACM |
1707 | /* |
1708 | * Tell the parent we're ready to go | |
1709 | */ | |
1710 | close(child_ready_pipe[1]); | |
1711 | ||
1712 | /* | |
1713 | * Wait until the parent tells us to go. | |
1714 | */ | |
5f1c4225 ACM |
1715 | ret = read(go_pipe[0], &bf, 1); |
1716 | /* | |
1717 | * The parent will ask for the execvp() to be performed by | |
1718 | * writing exactly one byte, in workload.cork_fd, usually via | |
1719 | * perf_evlist__start_workload(). | |
1720 | * | |
20f86fc1 | 1721 | * For cancelling the workload without actually running it, |
5f1c4225 ACM |
1722 | * the parent will just close workload.cork_fd, without writing |
1723 | * anything, i.e. read will return zero and we just exit() | |
1724 | * here. | |
1725 | */ | |
1726 | if (ret != 1) { | |
1727 | if (ret == -1) | |
1728 | perror("unable to read pipe"); | |
1729 | exit(ret); | |
1730 | } | |
35b9d88e ACM |
1731 | |
1732 | execvp(argv[0], (char **)argv); | |
1733 | ||
735f7e0b | 1734 | if (exec_error) { |
f33cbe72 ACM |
1735 | union sigval val; |
1736 | ||
1737 | val.sival_int = errno; | |
1738 | if (sigqueue(getppid(), SIGUSR1, val)) | |
1739 | perror(argv[0]); | |
1740 | } else | |
1741 | perror(argv[0]); | |
35b9d88e ACM |
1742 | exit(-1); |
1743 | } | |
1744 | ||
735f7e0b ACM |
1745 | if (exec_error) { |
1746 | struct sigaction act = { | |
1747 | .sa_flags = SA_SIGINFO, | |
1748 | .sa_sigaction = exec_error, | |
1749 | }; | |
1750 | sigaction(SIGUSR1, &act, NULL); | |
1751 | } | |
1752 | ||
1aaf63b1 ACM |
1753 | if (target__none(target)) { |
1754 | if (evlist->threads == NULL) { | |
1755 | fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", | |
1756 | __func__, __LINE__); | |
1757 | goto out_close_pipes; | |
1758 | } | |
e13798c7 | 1759 | thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); |
1aaf63b1 | 1760 | } |
35b9d88e ACM |
1761 | |
1762 | close(child_ready_pipe[1]); | |
1763 | close(go_pipe[0]); | |
1764 | /* | |
1765 | * wait for child to settle | |
1766 | */ | |
1767 | if (read(child_ready_pipe[0], &bf, 1) == -1) { | |
1768 | perror("unable to read pipe"); | |
1769 | goto out_close_pipes; | |
1770 | } | |
1771 | ||
bcf3145f | 1772 | fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); |
35b9d88e ACM |
1773 | evlist->workload.cork_fd = go_pipe[1]; |
1774 | close(child_ready_pipe[0]); | |
1775 | return 0; | |
1776 | ||
1777 | out_close_pipes: | |
1778 | close(go_pipe[0]); | |
1779 | close(go_pipe[1]); | |
1780 | out_close_ready_pipe: | |
1781 | close(child_ready_pipe[0]); | |
1782 | close(child_ready_pipe[1]); | |
1783 | return -1; | |
1784 | } | |
1785 | ||
1786 | int perf_evlist__start_workload(struct perf_evlist *evlist) | |
1787 | { | |
1788 | if (evlist->workload.cork_fd > 0) { | |
b3824404 | 1789 | char bf = 0; |
bcf3145f | 1790 | int ret; |
35b9d88e ACM |
1791 | /* |
1792 | * Remove the cork, let it rip! | |
1793 | */ | |
bcf3145f NK |
1794 | ret = write(evlist->workload.cork_fd, &bf, 1); |
1795 | if (ret < 0) | |
1796 | perror("enable to write to pipe"); | |
1797 | ||
1798 | close(evlist->workload.cork_fd); | |
1799 | return ret; | |
35b9d88e ACM |
1800 | } |
1801 | ||
1802 | return 0; | |
1803 | } | |
cb0b29e0 | 1804 | |
a3f698fe | 1805 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
0807d2d8 | 1806 | struct perf_sample *sample) |
cb0b29e0 | 1807 | { |
75562573 AH |
1808 | struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); |
1809 | ||
1810 | if (!evsel) | |
1811 | return -EFAULT; | |
0807d2d8 | 1812 | return perf_evsel__parse_sample(evsel, event, sample); |
cb0b29e0 | 1813 | } |
78f067b3 ACM |
1814 | |
1815 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) | |
1816 | { | |
1817 | struct perf_evsel *evsel; | |
1818 | size_t printed = 0; | |
1819 | ||
e5cadb93 | 1820 | evlist__for_each_entry(evlist, evsel) { |
78f067b3 ACM |
1821 | printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", |
1822 | perf_evsel__name(evsel)); | |
1823 | } | |
1824 | ||
b2222139 | 1825 | return printed + fprintf(fp, "\n"); |
78f067b3 | 1826 | } |
6ef068cb | 1827 | |
d9aade7f | 1828 | int perf_evlist__strerror_open(struct perf_evlist *evlist, |
a8f23d8f ACM |
1829 | int err, char *buf, size_t size) |
1830 | { | |
1831 | int printed, value; | |
c8b5f2c9 | 1832 | char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); |
a8f23d8f ACM |
1833 | |
1834 | switch (err) { | |
1835 | case EACCES: | |
1836 | case EPERM: | |
1837 | printed = scnprintf(buf, size, | |
1838 | "Error:\t%s.\n" | |
1839 | "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); | |
1840 | ||
1a47245d | 1841 | value = perf_event_paranoid(); |
a8f23d8f ACM |
1842 | |
1843 | printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); | |
1844 | ||
1845 | if (value >= 2) { | |
1846 | printed += scnprintf(buf + printed, size - printed, | |
1847 | "For your workloads it needs to be <= 1\nHint:\t"); | |
1848 | } | |
1849 | printed += scnprintf(buf + printed, size - printed, | |
5229e366 | 1850 | "For system wide tracing it needs to be set to -1.\n"); |
a8f23d8f ACM |
1851 | |
1852 | printed += scnprintf(buf + printed, size - printed, | |
5229e366 ACM |
1853 | "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" |
1854 | "Hint:\tThe current value is %d.", value); | |
a8f23d8f | 1855 | break; |
d9aade7f ACM |
1856 | case EINVAL: { |
1857 | struct perf_evsel *first = perf_evlist__first(evlist); | |
1858 | int max_freq; | |
1859 | ||
1860 | if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) | |
1861 | goto out_default; | |
1862 | ||
1863 | if (first->attr.sample_freq < (u64)max_freq) | |
1864 | goto out_default; | |
1865 | ||
1866 | printed = scnprintf(buf, size, | |
1867 | "Error:\t%s.\n" | |
1868 | "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" | |
1869 | "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", | |
1870 | emsg, max_freq, first->attr.sample_freq); | |
1871 | break; | |
1872 | } | |
a8f23d8f | 1873 | default: |
d9aade7f | 1874 | out_default: |
a8f23d8f ACM |
1875 | scnprintf(buf, size, "%s", emsg); |
1876 | break; | |
1877 | } | |
1878 | ||
1879 | return 0; | |
1880 | } | |
a025e4f0 | 1881 | |
956fa571 ACM |
1882 | int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) |
1883 | { | |
c8b5f2c9 | 1884 | char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); |
e965bea1 | 1885 | int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; |
956fa571 ACM |
1886 | |
1887 | switch (err) { | |
1888 | case EPERM: | |
e5d4a290 | 1889 | sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); |
e965bea1 ACM |
1890 | printed += scnprintf(buf + printed, size - printed, |
1891 | "Error:\t%s.\n" | |
956fa571 | 1892 | "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" |
e965bea1 | 1893 | "Hint:\tTried using %zd kB.\n", |
e5d4a290 | 1894 | emsg, pages_max_per_user, pages_attempted); |
e965bea1 ACM |
1895 | |
1896 | if (pages_attempted >= pages_max_per_user) { | |
1897 | printed += scnprintf(buf + printed, size - printed, | |
1898 | "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", | |
1899 | pages_max_per_user + pages_attempted); | |
1900 | } | |
1901 | ||
1902 | printed += scnprintf(buf + printed, size - printed, | |
1903 | "Hint:\tTry using a smaller -m/--mmap-pages value."); | |
956fa571 ACM |
1904 | break; |
1905 | default: | |
1906 | scnprintf(buf, size, "%s", emsg); | |
1907 | break; | |
1908 | } | |
1909 | ||
1910 | return 0; | |
1911 | } | |
1912 | ||
a025e4f0 AH |
1913 | void perf_evlist__to_front(struct perf_evlist *evlist, |
1914 | struct perf_evsel *move_evsel) | |
1915 | { | |
1916 | struct perf_evsel *evsel, *n; | |
1917 | LIST_HEAD(move); | |
1918 | ||
1919 | if (move_evsel == perf_evlist__first(evlist)) | |
1920 | return; | |
1921 | ||
e5cadb93 | 1922 | evlist__for_each_entry_safe(evlist, n, evsel) { |
a025e4f0 AH |
1923 | if (evsel->leader == move_evsel->leader) |
1924 | list_move_tail(&evsel->node, &move); | |
1925 | } | |
1926 | ||
1927 | list_splice(&move, &evlist->entries); | |
1928 | } | |
60b0896c AH |
1929 | |
1930 | void perf_evlist__set_tracking_event(struct perf_evlist *evlist, | |
1931 | struct perf_evsel *tracking_evsel) | |
1932 | { | |
1933 | struct perf_evsel *evsel; | |
1934 | ||
1935 | if (tracking_evsel->tracking) | |
1936 | return; | |
1937 | ||
e5cadb93 | 1938 | evlist__for_each_entry(evlist, evsel) { |
60b0896c AH |
1939 | if (evsel != tracking_evsel) |
1940 | evsel->tracking = false; | |
1941 | } | |
1942 | ||
1943 | tracking_evsel->tracking = true; | |
1944 | } | |
7630b3e2 WN |
1945 | |
1946 | struct perf_evsel * | |
1947 | perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, | |
1948 | const char *str) | |
1949 | { | |
1950 | struct perf_evsel *evsel; | |
1951 | ||
e5cadb93 | 1952 | evlist__for_each_entry(evlist, evsel) { |
7630b3e2 WN |
1953 | if (!evsel->name) |
1954 | continue; | |
1955 | if (strcmp(str, evsel->name) == 0) | |
1956 | return evsel; | |
1957 | } | |
1958 | ||
1959 | return NULL; | |
1960 | } | |
54cc54de WN |
1961 | |
1962 | void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, | |
1963 | enum bkw_mmap_state state) | |
1964 | { | |
1965 | enum bkw_mmap_state old_state = evlist->bkw_mmap_state; | |
1966 | enum action { | |
1967 | NONE, | |
1968 | PAUSE, | |
1969 | RESUME, | |
1970 | } action = NONE; | |
1971 | ||
1972 | if (!evlist->backward_mmap) | |
1973 | return; | |
1974 | ||
1975 | switch (old_state) { | |
1976 | case BKW_MMAP_NOTREADY: { | |
1977 | if (state != BKW_MMAP_RUNNING) | |
1978 | goto state_err;; | |
1979 | break; | |
1980 | } | |
1981 | case BKW_MMAP_RUNNING: { | |
1982 | if (state != BKW_MMAP_DATA_PENDING) | |
1983 | goto state_err; | |
1984 | action = PAUSE; | |
1985 | break; | |
1986 | } | |
1987 | case BKW_MMAP_DATA_PENDING: { | |
1988 | if (state != BKW_MMAP_EMPTY) | |
1989 | goto state_err; | |
1990 | break; | |
1991 | } | |
1992 | case BKW_MMAP_EMPTY: { | |
1993 | if (state != BKW_MMAP_RUNNING) | |
1994 | goto state_err; | |
1995 | action = RESUME; | |
1996 | break; | |
1997 | } | |
1998 | default: | |
1999 | WARN_ONCE(1, "Shouldn't get there\n"); | |
2000 | } | |
2001 | ||
2002 | evlist->bkw_mmap_state = state; | |
2003 | ||
2004 | switch (action) { | |
2005 | case PAUSE: | |
2006 | perf_evlist__pause(evlist); | |
2007 | break; | |
2008 | case RESUME: | |
2009 | perf_evlist__resume(evlist); | |
2010 | break; | |
2011 | case NONE: | |
2012 | default: | |
2013 | break; | |
2014 | } | |
2015 | ||
2016 | state_err: | |
2017 | return; | |
2018 | } |