]>
Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
a8c9ae18 | 9 | #include "util.h" |
956fa571 | 10 | #include <api/fs/fs.h> |
5c581041 | 11 | #include <poll.h> |
f8a95309 ACM |
12 | #include "cpumap.h" |
13 | #include "thread_map.h" | |
12864b31 | 14 | #include "target.h" |
361c99a6 ACM |
15 | #include "evlist.h" |
16 | #include "evsel.h" | |
e3e1a54f | 17 | #include "debug.h" |
35b9d88e | 18 | #include <unistd.h> |
361c99a6 | 19 | |
50d08e47 | 20 | #include "parse-events.h" |
994a1f78 | 21 | #include "parse-options.h" |
50d08e47 | 22 | |
f8a95309 ACM |
23 | #include <sys/mman.h> |
24 | ||
70db7533 ACM |
25 | #include <linux/bitops.h> |
26 | #include <linux/hash.h> | |
0389cd1f | 27 | #include <linux/log2.h> |
70db7533 | 28 | |
e4b356b5 ACM |
29 | static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); |
30 | static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); | |
31 | ||
f8a95309 | 32 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
a91e5431 | 33 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
f8a95309 | 34 | |
7e2ed097 ACM |
35 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
36 | struct thread_map *threads) | |
ef1d1af2 ACM |
37 | { |
38 | int i; | |
39 | ||
40 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | |
41 | INIT_HLIST_HEAD(&evlist->heads[i]); | |
42 | INIT_LIST_HEAD(&evlist->entries); | |
7e2ed097 | 43 | perf_evlist__set_maps(evlist, cpus, threads); |
1b85337d | 44 | fdarray__init(&evlist->pollfd, 64); |
35b9d88e | 45 | evlist->workload.pid = -1; |
ef1d1af2 ACM |
46 | } |
47 | ||
334fe7a3 | 48 | struct perf_evlist *perf_evlist__new(void) |
361c99a6 ACM |
49 | { |
50 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | |
51 | ||
ef1d1af2 | 52 | if (evlist != NULL) |
334fe7a3 | 53 | perf_evlist__init(evlist, NULL, NULL); |
361c99a6 ACM |
54 | |
55 | return evlist; | |
56 | } | |
57 | ||
b22d54b0 JO |
58 | struct perf_evlist *perf_evlist__new_default(void) |
59 | { | |
60 | struct perf_evlist *evlist = perf_evlist__new(); | |
61 | ||
62 | if (evlist && perf_evlist__add_default(evlist)) { | |
63 | perf_evlist__delete(evlist); | |
64 | evlist = NULL; | |
65 | } | |
66 | ||
67 | return evlist; | |
68 | } | |
69 | ||
75562573 AH |
70 | /** |
71 | * perf_evlist__set_id_pos - set the positions of event ids. | |
72 | * @evlist: selected event list | |
73 | * | |
74 | * Events with compatible sample types all have the same id_pos | |
75 | * and is_pos. For convenience, put a copy on evlist. | |
76 | */ | |
77 | void perf_evlist__set_id_pos(struct perf_evlist *evlist) | |
78 | { | |
79 | struct perf_evsel *first = perf_evlist__first(evlist); | |
80 | ||
81 | evlist->id_pos = first->id_pos; | |
82 | evlist->is_pos = first->is_pos; | |
83 | } | |
84 | ||
733cd2fe AH |
85 | static void perf_evlist__update_id_pos(struct perf_evlist *evlist) |
86 | { | |
87 | struct perf_evsel *evsel; | |
88 | ||
0050f7aa | 89 | evlist__for_each(evlist, evsel) |
733cd2fe AH |
90 | perf_evsel__calc_id_pos(evsel); |
91 | ||
92 | perf_evlist__set_id_pos(evlist); | |
93 | } | |
94 | ||
361c99a6 ACM |
95 | static void perf_evlist__purge(struct perf_evlist *evlist) |
96 | { | |
97 | struct perf_evsel *pos, *n; | |
98 | ||
0050f7aa | 99 | evlist__for_each_safe(evlist, n, pos) { |
361c99a6 ACM |
100 | list_del_init(&pos->node); |
101 | perf_evsel__delete(pos); | |
102 | } | |
103 | ||
104 | evlist->nr_entries = 0; | |
105 | } | |
106 | ||
ef1d1af2 | 107 | void perf_evlist__exit(struct perf_evlist *evlist) |
361c99a6 | 108 | { |
04662523 | 109 | zfree(&evlist->mmap); |
1b85337d | 110 | fdarray__exit(&evlist->pollfd); |
ef1d1af2 ACM |
111 | } |
112 | ||
113 | void perf_evlist__delete(struct perf_evlist *evlist) | |
114 | { | |
983874d1 | 115 | perf_evlist__munmap(evlist); |
f26e1c7c | 116 | perf_evlist__close(evlist); |
03ad9747 ACM |
117 | cpu_map__delete(evlist->cpus); |
118 | thread_map__delete(evlist->threads); | |
119 | evlist->cpus = NULL; | |
120 | evlist->threads = NULL; | |
ef1d1af2 ACM |
121 | perf_evlist__purge(evlist); |
122 | perf_evlist__exit(evlist); | |
361c99a6 ACM |
123 | free(evlist); |
124 | } | |
125 | ||
126 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | |
127 | { | |
128 | list_add_tail(&entry->node, &evlist->entries); | |
ef503831 | 129 | entry->idx = evlist->nr_entries; |
60b0896c | 130 | entry->tracking = !entry->idx; |
ef503831 | 131 | |
75562573 AH |
132 | if (!evlist->nr_entries++) |
133 | perf_evlist__set_id_pos(evlist); | |
361c99a6 ACM |
134 | } |
135 | ||
0529bc1f JO |
136 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
137 | struct list_head *list, | |
138 | int nr_entries) | |
50d08e47 | 139 | { |
75562573 AH |
140 | bool set_id_pos = !evlist->nr_entries; |
141 | ||
50d08e47 ACM |
142 | list_splice_tail(list, &evlist->entries); |
143 | evlist->nr_entries += nr_entries; | |
75562573 AH |
144 | if (set_id_pos) |
145 | perf_evlist__set_id_pos(evlist); | |
50d08e47 ACM |
146 | } |
147 | ||
63dab225 ACM |
148 | void __perf_evlist__set_leader(struct list_head *list) |
149 | { | |
150 | struct perf_evsel *evsel, *leader; | |
151 | ||
152 | leader = list_entry(list->next, struct perf_evsel, node); | |
97f63e4a NK |
153 | evsel = list_entry(list->prev, struct perf_evsel, node); |
154 | ||
155 | leader->nr_members = evsel->idx - leader->idx + 1; | |
63dab225 | 156 | |
0050f7aa | 157 | __evlist__for_each(list, evsel) { |
74b2133d | 158 | evsel->leader = leader; |
63dab225 ACM |
159 | } |
160 | } | |
161 | ||
162 | void perf_evlist__set_leader(struct perf_evlist *evlist) | |
6a4bb04c | 163 | { |
97f63e4a NK |
164 | if (evlist->nr_entries) { |
165 | evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; | |
63dab225 | 166 | __perf_evlist__set_leader(&evlist->entries); |
97f63e4a | 167 | } |
6a4bb04c JO |
168 | } |
169 | ||
361c99a6 ACM |
170 | int perf_evlist__add_default(struct perf_evlist *evlist) |
171 | { | |
172 | struct perf_event_attr attr = { | |
173 | .type = PERF_TYPE_HARDWARE, | |
174 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
175 | }; | |
1aed2671 JR |
176 | struct perf_evsel *evsel; |
177 | ||
178 | event_attr_init(&attr); | |
361c99a6 | 179 | |
ef503831 | 180 | evsel = perf_evsel__new(&attr); |
361c99a6 | 181 | if (evsel == NULL) |
cc2d86b0 SE |
182 | goto error; |
183 | ||
184 | /* use strdup() because free(evsel) assumes name is allocated */ | |
185 | evsel->name = strdup("cycles"); | |
186 | if (!evsel->name) | |
187 | goto error_free; | |
361c99a6 ACM |
188 | |
189 | perf_evlist__add(evlist, evsel); | |
190 | return 0; | |
cc2d86b0 SE |
191 | error_free: |
192 | perf_evsel__delete(evsel); | |
193 | error: | |
194 | return -ENOMEM; | |
361c99a6 | 195 | } |
5c581041 | 196 | |
e60fc847 ACM |
197 | static int perf_evlist__add_attrs(struct perf_evlist *evlist, |
198 | struct perf_event_attr *attrs, size_t nr_attrs) | |
50d08e47 ACM |
199 | { |
200 | struct perf_evsel *evsel, *n; | |
201 | LIST_HEAD(head); | |
202 | size_t i; | |
203 | ||
204 | for (i = 0; i < nr_attrs; i++) { | |
ef503831 | 205 | evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); |
50d08e47 ACM |
206 | if (evsel == NULL) |
207 | goto out_delete_partial_list; | |
208 | list_add_tail(&evsel->node, &head); | |
209 | } | |
210 | ||
211 | perf_evlist__splice_list_tail(evlist, &head, nr_attrs); | |
212 | ||
213 | return 0; | |
214 | ||
215 | out_delete_partial_list: | |
0050f7aa | 216 | __evlist__for_each_safe(&head, n, evsel) |
50d08e47 ACM |
217 | perf_evsel__delete(evsel); |
218 | return -1; | |
219 | } | |
220 | ||
79695e1b ACM |
221 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
222 | struct perf_event_attr *attrs, size_t nr_attrs) | |
223 | { | |
224 | size_t i; | |
225 | ||
226 | for (i = 0; i < nr_attrs; i++) | |
227 | event_attr_init(attrs + i); | |
228 | ||
229 | return perf_evlist__add_attrs(evlist, attrs, nr_attrs); | |
230 | } | |
231 | ||
da378962 ACM |
232 | struct perf_evsel * |
233 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) | |
ee29be62 ACM |
234 | { |
235 | struct perf_evsel *evsel; | |
236 | ||
0050f7aa | 237 | evlist__for_each(evlist, evsel) { |
ee29be62 ACM |
238 | if (evsel->attr.type == PERF_TYPE_TRACEPOINT && |
239 | (int)evsel->attr.config == id) | |
240 | return evsel; | |
241 | } | |
242 | ||
243 | return NULL; | |
244 | } | |
245 | ||
a2f2804a DA |
246 | struct perf_evsel * |
247 | perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | |
248 | const char *name) | |
249 | { | |
250 | struct perf_evsel *evsel; | |
251 | ||
0050f7aa | 252 | evlist__for_each(evlist, evsel) { |
a2f2804a DA |
253 | if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && |
254 | (strcmp(evsel->name, name) == 0)) | |
255 | return evsel; | |
256 | } | |
257 | ||
258 | return NULL; | |
259 | } | |
260 | ||
39876e7d ACM |
261 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
262 | const char *sys, const char *name, void *handler) | |
263 | { | |
ef503831 | 264 | struct perf_evsel *evsel = perf_evsel__newtp(sys, name); |
39876e7d | 265 | |
39876e7d ACM |
266 | if (evsel == NULL) |
267 | return -1; | |
268 | ||
744a9719 | 269 | evsel->handler = handler; |
39876e7d ACM |
270 | perf_evlist__add(evlist, evsel); |
271 | return 0; | |
272 | } | |
273 | ||
bf8e8f4b AH |
274 | static int perf_evlist__nr_threads(struct perf_evlist *evlist, |
275 | struct perf_evsel *evsel) | |
276 | { | |
277 | if (evsel->system_wide) | |
278 | return 1; | |
279 | else | |
280 | return thread_map__nr(evlist->threads); | |
281 | } | |
282 | ||
4152ab37 ACM |
283 | void perf_evlist__disable(struct perf_evlist *evlist) |
284 | { | |
285 | int cpu, thread; | |
286 | struct perf_evsel *pos; | |
b3a319d5 | 287 | int nr_cpus = cpu_map__nr(evlist->cpus); |
bf8e8f4b | 288 | int nr_threads; |
4152ab37 | 289 | |
b3a319d5 | 290 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
0050f7aa | 291 | evlist__for_each(evlist, pos) { |
395c3070 | 292 | if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
3fe4430d | 293 | continue; |
bf8e8f4b | 294 | nr_threads = perf_evlist__nr_threads(evlist, pos); |
b3a319d5 | 295 | for (thread = 0; thread < nr_threads; thread++) |
55da8005 NK |
296 | ioctl(FD(pos, cpu, thread), |
297 | PERF_EVENT_IOC_DISABLE, 0); | |
4152ab37 ACM |
298 | } |
299 | } | |
300 | } | |
301 | ||
764e16a3 DA |
302 | void perf_evlist__enable(struct perf_evlist *evlist) |
303 | { | |
304 | int cpu, thread; | |
305 | struct perf_evsel *pos; | |
b3a319d5 | 306 | int nr_cpus = cpu_map__nr(evlist->cpus); |
bf8e8f4b | 307 | int nr_threads; |
764e16a3 | 308 | |
b3a319d5 | 309 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
0050f7aa | 310 | evlist__for_each(evlist, pos) { |
395c3070 | 311 | if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
3fe4430d | 312 | continue; |
bf8e8f4b | 313 | nr_threads = perf_evlist__nr_threads(evlist, pos); |
b3a319d5 | 314 | for (thread = 0; thread < nr_threads; thread++) |
55da8005 NK |
315 | ioctl(FD(pos, cpu, thread), |
316 | PERF_EVENT_IOC_ENABLE, 0); | |
764e16a3 DA |
317 | } |
318 | } | |
319 | } | |
320 | ||
395c3070 AH |
321 | int perf_evlist__disable_event(struct perf_evlist *evlist, |
322 | struct perf_evsel *evsel) | |
323 | { | |
324 | int cpu, thread, err; | |
bf8e8f4b AH |
325 | int nr_cpus = cpu_map__nr(evlist->cpus); |
326 | int nr_threads = perf_evlist__nr_threads(evlist, evsel); | |
395c3070 AH |
327 | |
328 | if (!evsel->fd) | |
329 | return 0; | |
330 | ||
bf8e8f4b AH |
331 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
332 | for (thread = 0; thread < nr_threads; thread++) { | |
395c3070 AH |
333 | err = ioctl(FD(evsel, cpu, thread), |
334 | PERF_EVENT_IOC_DISABLE, 0); | |
335 | if (err) | |
336 | return err; | |
337 | } | |
338 | } | |
339 | return 0; | |
340 | } | |
341 | ||
342 | int perf_evlist__enable_event(struct perf_evlist *evlist, | |
343 | struct perf_evsel *evsel) | |
344 | { | |
345 | int cpu, thread, err; | |
bf8e8f4b AH |
346 | int nr_cpus = cpu_map__nr(evlist->cpus); |
347 | int nr_threads = perf_evlist__nr_threads(evlist, evsel); | |
395c3070 AH |
348 | |
349 | if (!evsel->fd) | |
350 | return -EINVAL; | |
351 | ||
bf8e8f4b AH |
352 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
353 | for (thread = 0; thread < nr_threads; thread++) { | |
395c3070 AH |
354 | err = ioctl(FD(evsel, cpu, thread), |
355 | PERF_EVENT_IOC_ENABLE, 0); | |
356 | if (err) | |
357 | return err; | |
358 | } | |
359 | } | |
360 | return 0; | |
361 | } | |
362 | ||
1c65056c AH |
363 | static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, |
364 | struct perf_evsel *evsel, int cpu) | |
365 | { | |
366 | int thread, err; | |
367 | int nr_threads = perf_evlist__nr_threads(evlist, evsel); | |
368 | ||
369 | if (!evsel->fd) | |
370 | return -EINVAL; | |
371 | ||
372 | for (thread = 0; thread < nr_threads; thread++) { | |
373 | err = ioctl(FD(evsel, cpu, thread), | |
374 | PERF_EVENT_IOC_ENABLE, 0); | |
375 | if (err) | |
376 | return err; | |
377 | } | |
378 | return 0; | |
379 | } | |
380 | ||
381 | static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, | |
382 | struct perf_evsel *evsel, | |
383 | int thread) | |
384 | { | |
385 | int cpu, err; | |
386 | int nr_cpus = cpu_map__nr(evlist->cpus); | |
387 | ||
388 | if (!evsel->fd) | |
389 | return -EINVAL; | |
390 | ||
391 | for (cpu = 0; cpu < nr_cpus; cpu++) { | |
392 | err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); | |
393 | if (err) | |
394 | return err; | |
395 | } | |
396 | return 0; | |
397 | } | |
398 | ||
399 | int perf_evlist__enable_event_idx(struct perf_evlist *evlist, | |
400 | struct perf_evsel *evsel, int idx) | |
401 | { | |
402 | bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); | |
403 | ||
404 | if (per_cpu_mmaps) | |
405 | return perf_evlist__enable_event_cpu(evlist, evsel, idx); | |
406 | else | |
407 | return perf_evlist__enable_event_thread(evlist, evsel, idx); | |
408 | } | |
409 | ||
ad6765dd | 410 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
5c581041 | 411 | { |
b3a319d5 NK |
412 | int nr_cpus = cpu_map__nr(evlist->cpus); |
413 | int nr_threads = thread_map__nr(evlist->threads); | |
bf8e8f4b AH |
414 | int nfds = 0; |
415 | struct perf_evsel *evsel; | |
416 | ||
cba9b847 | 417 | evlist__for_each(evlist, evsel) { |
bf8e8f4b AH |
418 | if (evsel->system_wide) |
419 | nfds += nr_cpus; | |
420 | else | |
421 | nfds += nr_cpus * nr_threads; | |
422 | } | |
423 | ||
1b85337d ACM |
424 | if (fdarray__available_entries(&evlist->pollfd) < nfds && |
425 | fdarray__grow(&evlist->pollfd, nfds) < 0) | |
ad6765dd ACM |
426 | return -ENOMEM; |
427 | ||
428 | return 0; | |
5c581041 | 429 | } |
70082dd9 | 430 | |
e4b356b5 ACM |
431 | static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) |
432 | { | |
433 | int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); | |
434 | /* | |
435 | * Save the idx so that when we filter out fds POLLHUP'ed we can | |
436 | * close the associated evlist->mmap[] entry. | |
437 | */ | |
438 | if (pos >= 0) { | |
439 | evlist->pollfd.priv[pos].idx = idx; | |
440 | ||
441 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
442 | } | |
443 | ||
444 | return pos; | |
445 | } | |
446 | ||
ad6765dd | 447 | int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) |
70082dd9 | 448 | { |
e4b356b5 ACM |
449 | return __perf_evlist__add_pollfd(evlist, fd, -1); |
450 | } | |
451 | ||
452 | static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) | |
453 | { | |
454 | struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); | |
1b85337d | 455 | |
e4b356b5 | 456 | perf_evlist__mmap_put(evlist, fda->priv[fd].idx); |
70082dd9 | 457 | } |
70db7533 | 458 | |
1ddec7f0 ACM |
459 | int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) |
460 | { | |
e4b356b5 ACM |
461 | return fdarray__filter(&evlist->pollfd, revents_and_mask, |
462 | perf_evlist__munmap_filtered); | |
1ddec7f0 ACM |
463 | } |
464 | ||
f66a889d ACM |
465 | int perf_evlist__poll(struct perf_evlist *evlist, int timeout) |
466 | { | |
1b85337d | 467 | return fdarray__poll(&evlist->pollfd, timeout); |
f66a889d ACM |
468 | } |
469 | ||
a91e5431 ACM |
470 | static void perf_evlist__id_hash(struct perf_evlist *evlist, |
471 | struct perf_evsel *evsel, | |
472 | int cpu, int thread, u64 id) | |
3d3b5e95 ACM |
473 | { |
474 | int hash; | |
475 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
476 | ||
477 | sid->id = id; | |
478 | sid->evsel = evsel; | |
479 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | |
480 | hlist_add_head(&sid->node, &evlist->heads[hash]); | |
481 | } | |
482 | ||
a91e5431 ACM |
483 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
484 | int cpu, int thread, u64 id) | |
485 | { | |
486 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | |
487 | evsel->id[evsel->ids++] = id; | |
488 | } | |
489 | ||
490 | static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | |
491 | struct perf_evsel *evsel, | |
492 | int cpu, int thread, int fd) | |
f8a95309 | 493 | { |
f8a95309 | 494 | u64 read_data[4] = { 0, }; |
3d3b5e95 | 495 | int id_idx = 1; /* The first entry is the counter value */ |
e2b5abe0 JO |
496 | u64 id; |
497 | int ret; | |
498 | ||
499 | ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); | |
500 | if (!ret) | |
501 | goto add; | |
502 | ||
503 | if (errno != ENOTTY) | |
504 | return -1; | |
505 | ||
506 | /* Legacy way to get event id.. All hail to old kernels! */ | |
f8a95309 | 507 | |
c4861afe JO |
508 | /* |
509 | * This way does not work with group format read, so bail | |
510 | * out in that case. | |
511 | */ | |
512 | if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) | |
513 | return -1; | |
514 | ||
f8a95309 ACM |
515 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || |
516 | read(fd, &read_data, sizeof(read_data)) == -1) | |
517 | return -1; | |
518 | ||
519 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
520 | ++id_idx; | |
521 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
522 | ++id_idx; | |
523 | ||
e2b5abe0 JO |
524 | id = read_data[id_idx]; |
525 | ||
526 | add: | |
527 | perf_evlist__id_add(evlist, evsel, cpu, thread, id); | |
f8a95309 ACM |
528 | return 0; |
529 | } | |
530 | ||
3c659eed AH |
531 | static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, |
532 | struct perf_evsel *evsel, int idx, int cpu, | |
533 | int thread) | |
534 | { | |
535 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
536 | sid->idx = idx; | |
537 | if (evlist->cpus && cpu >= 0) | |
538 | sid->cpu = evlist->cpus->map[cpu]; | |
539 | else | |
540 | sid->cpu = -1; | |
541 | if (!evsel->system_wide && evlist->threads && thread >= 0) | |
542 | sid->tid = evlist->threads->map[thread]; | |
543 | else | |
544 | sid->tid = -1; | |
545 | } | |
546 | ||
932a3594 | 547 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) |
70db7533 ACM |
548 | { |
549 | struct hlist_head *head; | |
70db7533 ACM |
550 | struct perf_sample_id *sid; |
551 | int hash; | |
552 | ||
70db7533 ACM |
553 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
554 | head = &evlist->heads[hash]; | |
555 | ||
b67bfe0d | 556 | hlist_for_each_entry(sid, head, node) |
70db7533 | 557 | if (sid->id == id) |
932a3594 JO |
558 | return sid; |
559 | ||
560 | return NULL; | |
561 | } | |
562 | ||
563 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |
564 | { | |
565 | struct perf_sample_id *sid; | |
566 | ||
567 | if (evlist->nr_entries == 1) | |
568 | return perf_evlist__first(evlist); | |
569 | ||
570 | sid = perf_evlist__id2sid(evlist, id); | |
571 | if (sid) | |
572 | return sid->evsel; | |
30e68bcc NK |
573 | |
574 | if (!perf_evlist__sample_id_all(evlist)) | |
0c21f736 | 575 | return perf_evlist__first(evlist); |
30e68bcc | 576 | |
70db7533 ACM |
577 | return NULL; |
578 | } | |
04391deb | 579 | |
75562573 AH |
580 | static int perf_evlist__event2id(struct perf_evlist *evlist, |
581 | union perf_event *event, u64 *id) | |
582 | { | |
583 | const u64 *array = event->sample.array; | |
584 | ssize_t n; | |
585 | ||
586 | n = (event->header.size - sizeof(event->header)) >> 3; | |
587 | ||
588 | if (event->header.type == PERF_RECORD_SAMPLE) { | |
589 | if (evlist->id_pos >= n) | |
590 | return -1; | |
591 | *id = array[evlist->id_pos]; | |
592 | } else { | |
593 | if (evlist->is_pos > n) | |
594 | return -1; | |
595 | n -= evlist->is_pos; | |
596 | *id = array[n]; | |
597 | } | |
598 | return 0; | |
599 | } | |
600 | ||
601 | static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, | |
602 | union perf_event *event) | |
603 | { | |
98be6966 | 604 | struct perf_evsel *first = perf_evlist__first(evlist); |
75562573 AH |
605 | struct hlist_head *head; |
606 | struct perf_sample_id *sid; | |
607 | int hash; | |
608 | u64 id; | |
609 | ||
610 | if (evlist->nr_entries == 1) | |
98be6966 AH |
611 | return first; |
612 | ||
613 | if (!first->attr.sample_id_all && | |
614 | event->header.type != PERF_RECORD_SAMPLE) | |
615 | return first; | |
75562573 AH |
616 | |
617 | if (perf_evlist__event2id(evlist, event, &id)) | |
618 | return NULL; | |
619 | ||
620 | /* Synthesized events have an id of zero */ | |
621 | if (!id) | |
98be6966 | 622 | return first; |
75562573 AH |
623 | |
624 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | |
625 | head = &evlist->heads[hash]; | |
626 | ||
627 | hlist_for_each_entry(sid, head, node) { | |
628 | if (sid->id == id) | |
629 | return sid->evsel; | |
630 | } | |
631 | return NULL; | |
632 | } | |
633 | ||
aece948f | 634 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
04391deb | 635 | { |
aece948f | 636 | struct perf_mmap *md = &evlist->mmap[idx]; |
7b8283b5 DA |
637 | u64 head = perf_mmap__read_head(md); |
638 | u64 old = md->prev; | |
04391deb | 639 | unsigned char *data = md->base + page_size; |
8115d60c | 640 | union perf_event *event = NULL; |
04391deb | 641 | |
7bb41152 | 642 | if (evlist->overwrite) { |
04391deb | 643 | /* |
7bb41152 ACM |
644 | * If we're further behind than half the buffer, there's a chance |
645 | * the writer will bite our tail and mess up the samples under us. | |
646 | * | |
647 | * If we somehow ended up ahead of the head, we got messed up. | |
648 | * | |
649 | * In either case, truncate and restart at head. | |
04391deb | 650 | */ |
7bb41152 ACM |
651 | int diff = head - old; |
652 | if (diff > md->mask / 2 || diff < 0) { | |
653 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | |
654 | ||
655 | /* | |
656 | * head points to a known good entry, start there. | |
657 | */ | |
658 | old = head; | |
659 | } | |
04391deb ACM |
660 | } |
661 | ||
662 | if (old != head) { | |
663 | size_t size; | |
664 | ||
8115d60c | 665 | event = (union perf_event *)&data[old & md->mask]; |
04391deb ACM |
666 | size = event->header.size; |
667 | ||
668 | /* | |
669 | * Event straddles the mmap boundary -- header should always | |
670 | * be inside due to u64 alignment of output. | |
671 | */ | |
672 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | |
673 | unsigned int offset = old; | |
674 | unsigned int len = min(sizeof(*event), size), cpy; | |
a65cb4b9 | 675 | void *dst = md->event_copy; |
04391deb ACM |
676 | |
677 | do { | |
678 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
679 | memcpy(dst, &data[offset & md->mask], cpy); | |
680 | offset += cpy; | |
681 | dst += cpy; | |
682 | len -= cpy; | |
683 | } while (len); | |
684 | ||
a65cb4b9 | 685 | event = (union perf_event *) md->event_copy; |
04391deb ACM |
686 | } |
687 | ||
688 | old += size; | |
689 | } | |
690 | ||
691 | md->prev = old; | |
7bb41152 | 692 | |
04391deb ACM |
693 | return event; |
694 | } | |
f8a95309 | 695 | |
82396986 ACM |
696 | static bool perf_mmap__empty(struct perf_mmap *md) |
697 | { | |
b72e74d1 | 698 | return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; |
82396986 ACM |
699 | } |
700 | ||
701 | static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) | |
702 | { | |
7143849a | 703 | atomic_inc(&evlist->mmap[idx].refcnt); |
82396986 ACM |
704 | } |
705 | ||
706 | static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) | |
707 | { | |
7143849a | 708 | BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0); |
82396986 | 709 | |
7143849a | 710 | if (atomic_dec_and_test(&evlist->mmap[idx].refcnt)) |
82396986 ACM |
711 | __perf_evlist__munmap(evlist, idx); |
712 | } | |
713 | ||
8e50d384 ZZ |
714 | void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) |
715 | { | |
82396986 ACM |
716 | struct perf_mmap *md = &evlist->mmap[idx]; |
717 | ||
8e50d384 | 718 | if (!evlist->overwrite) { |
7b8283b5 | 719 | u64 old = md->prev; |
8e50d384 ZZ |
720 | |
721 | perf_mmap__write_tail(md, old); | |
722 | } | |
82396986 | 723 | |
7143849a | 724 | if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) |
82396986 | 725 | perf_evlist__mmap_put(evlist, idx); |
8e50d384 ZZ |
726 | } |
727 | ||
718c602d AH |
728 | int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, |
729 | struct auxtrace_mmap_params *mp __maybe_unused, | |
730 | void *userpg __maybe_unused, | |
731 | int fd __maybe_unused) | |
732 | { | |
733 | return 0; | |
734 | } | |
735 | ||
736 | void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) | |
737 | { | |
738 | } | |
739 | ||
740 | void __weak auxtrace_mmap_params__init( | |
741 | struct auxtrace_mmap_params *mp __maybe_unused, | |
742 | off_t auxtrace_offset __maybe_unused, | |
743 | unsigned int auxtrace_pages __maybe_unused, | |
744 | bool auxtrace_overwrite __maybe_unused) | |
745 | { | |
746 | } | |
747 | ||
748 | void __weak auxtrace_mmap_params__set_idx( | |
749 | struct auxtrace_mmap_params *mp __maybe_unused, | |
750 | struct perf_evlist *evlist __maybe_unused, | |
751 | int idx __maybe_unused, | |
752 | bool per_cpu __maybe_unused) | |
753 | { | |
754 | } | |
755 | ||
93edcbd9 AH |
756 | static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) |
757 | { | |
758 | if (evlist->mmap[idx].base != NULL) { | |
759 | munmap(evlist->mmap[idx].base, evlist->mmap_len); | |
760 | evlist->mmap[idx].base = NULL; | |
7143849a | 761 | atomic_set(&evlist->mmap[idx].refcnt, 0); |
93edcbd9 | 762 | } |
718c602d | 763 | auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); |
93edcbd9 AH |
764 | } |
765 | ||
7e2ed097 | 766 | void perf_evlist__munmap(struct perf_evlist *evlist) |
f8a95309 | 767 | { |
aece948f | 768 | int i; |
f8a95309 | 769 | |
983874d1 ACM |
770 | if (evlist->mmap == NULL) |
771 | return; | |
772 | ||
93edcbd9 AH |
773 | for (i = 0; i < evlist->nr_mmaps; i++) |
774 | __perf_evlist__munmap(evlist, i); | |
aece948f | 775 | |
04662523 | 776 | zfree(&evlist->mmap); |
f8a95309 ACM |
777 | } |
778 | ||
806fb630 | 779 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
f8a95309 | 780 | { |
a14bb7a6 | 781 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
ec1e7e43 | 782 | if (cpu_map__empty(evlist->cpus)) |
b3a319d5 | 783 | evlist->nr_mmaps = thread_map__nr(evlist->threads); |
aece948f | 784 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
f8a95309 ACM |
785 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
786 | } | |
787 | ||
a8a8f3eb AH |
788 | struct mmap_params { |
789 | int prot; | |
790 | int mask; | |
718c602d | 791 | struct auxtrace_mmap_params auxtrace_mp; |
a8a8f3eb AH |
792 | }; |
793 | ||
794 | static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, | |
795 | struct mmap_params *mp, int fd) | |
f8a95309 | 796 | { |
82396986 ACM |
797 | /* |
798 | * The last one will be done at perf_evlist__mmap_consume(), so that we | |
799 | * make sure we don't prevent tools from consuming every last event in | |
800 | * the ring buffer. | |
801 | * | |
802 | * I.e. we can get the POLLHUP meaning that the fd doesn't exist | |
803 | * anymore, but the last events for it are still in the ring buffer, | |
804 | * waiting to be consumed. | |
805 | * | |
806 | * Tools can chose to ignore this at their own discretion, but the | |
807 | * evlist layer can't just drop it when filtering events in | |
808 | * perf_evlist__filter_pollfd(). | |
809 | */ | |
7143849a | 810 | atomic_set(&evlist->mmap[idx].refcnt, 2); |
aece948f | 811 | evlist->mmap[idx].prev = 0; |
a8a8f3eb AH |
812 | evlist->mmap[idx].mask = mp->mask; |
813 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, | |
f8a95309 | 814 | MAP_SHARED, fd, 0); |
301b195d | 815 | if (evlist->mmap[idx].base == MAP_FAILED) { |
02635965 AH |
816 | pr_debug2("failed to mmap perf event ring buffer, error %d\n", |
817 | errno); | |
301b195d | 818 | evlist->mmap[idx].base = NULL; |
f8a95309 | 819 | return -1; |
301b195d | 820 | } |
ad6765dd | 821 | |
718c602d AH |
822 | if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, |
823 | &mp->auxtrace_mp, evlist->mmap[idx].base, fd)) | |
824 | return -1; | |
825 | ||
f8a95309 ACM |
826 | return 0; |
827 | } | |
828 | ||
04e21314 | 829 | static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, |
a8a8f3eb AH |
830 | struct mmap_params *mp, int cpu, |
831 | int thread, int *output) | |
aece948f ACM |
832 | { |
833 | struct perf_evsel *evsel; | |
04e21314 | 834 | |
0050f7aa | 835 | evlist__for_each(evlist, evsel) { |
bf8e8f4b AH |
836 | int fd; |
837 | ||
838 | if (evsel->system_wide && thread) | |
839 | continue; | |
840 | ||
841 | fd = FD(evsel, cpu, thread); | |
04e21314 AH |
842 | |
843 | if (*output == -1) { | |
844 | *output = fd; | |
a8a8f3eb | 845 | if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) |
04e21314 AH |
846 | return -1; |
847 | } else { | |
848 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) | |
849 | return -1; | |
82396986 ACM |
850 | |
851 | perf_evlist__mmap_get(evlist, idx); | |
04e21314 AH |
852 | } |
853 | ||
f90d194a AH |
854 | /* |
855 | * The system_wide flag causes a selected event to be opened | |
856 | * always without a pid. Consequently it will never get a | |
857 | * POLLHUP, but it is used for tracking in combination with | |
858 | * other events, so it should not need to be polled anyway. | |
859 | * Therefore don't add it for polling. | |
860 | */ | |
861 | if (!evsel->system_wide && | |
862 | __perf_evlist__add_pollfd(evlist, fd, idx) < 0) { | |
82396986 | 863 | perf_evlist__mmap_put(evlist, idx); |
ad6765dd | 864 | return -1; |
82396986 | 865 | } |
033fa713 | 866 | |
3c659eed AH |
867 | if (evsel->attr.read_format & PERF_FORMAT_ID) { |
868 | if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, | |
869 | fd) < 0) | |
870 | return -1; | |
871 | perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, | |
872 | thread); | |
873 | } | |
04e21314 AH |
874 | } |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
a8a8f3eb AH |
879 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, |
880 | struct mmap_params *mp) | |
04e21314 | 881 | { |
aece948f | 882 | int cpu, thread; |
b3a319d5 NK |
883 | int nr_cpus = cpu_map__nr(evlist->cpus); |
884 | int nr_threads = thread_map__nr(evlist->threads); | |
aece948f | 885 | |
e3e1a54f | 886 | pr_debug2("perf event ring buffer mmapped per cpu\n"); |
b3a319d5 | 887 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
aece948f ACM |
888 | int output = -1; |
889 | ||
718c602d AH |
890 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, |
891 | true); | |
892 | ||
b3a319d5 | 893 | for (thread = 0; thread < nr_threads; thread++) { |
a8a8f3eb AH |
894 | if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, |
895 | thread, &output)) | |
04e21314 | 896 | goto out_unmap; |
aece948f ACM |
897 | } |
898 | } | |
899 | ||
900 | return 0; | |
901 | ||
902 | out_unmap: | |
93edcbd9 AH |
903 | for (cpu = 0; cpu < nr_cpus; cpu++) |
904 | __perf_evlist__munmap(evlist, cpu); | |
aece948f ACM |
905 | return -1; |
906 | } | |
907 | ||
a8a8f3eb AH |
908 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, |
909 | struct mmap_params *mp) | |
aece948f | 910 | { |
aece948f | 911 | int thread; |
b3a319d5 | 912 | int nr_threads = thread_map__nr(evlist->threads); |
aece948f | 913 | |
e3e1a54f | 914 | pr_debug2("perf event ring buffer mmapped per thread\n"); |
b3a319d5 | 915 | for (thread = 0; thread < nr_threads; thread++) { |
aece948f ACM |
916 | int output = -1; |
917 | ||
718c602d AH |
918 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, |
919 | false); | |
920 | ||
a8a8f3eb AH |
921 | if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, |
922 | &output)) | |
04e21314 | 923 | goto out_unmap; |
aece948f ACM |
924 | } |
925 | ||
926 | return 0; | |
927 | ||
928 | out_unmap: | |
93edcbd9 AH |
929 | for (thread = 0; thread < nr_threads; thread++) |
930 | __perf_evlist__munmap(evlist, thread); | |
aece948f ACM |
931 | return -1; |
932 | } | |
933 | ||
994a1f78 JO |
934 | static size_t perf_evlist__mmap_size(unsigned long pages) |
935 | { | |
8185e881 ACM |
936 | if (pages == UINT_MAX) { |
937 | int max; | |
938 | ||
939 | if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { | |
940 | /* | |
941 | * Pick a once upon a time good value, i.e. things look | |
942 | * strange since we can't read a sysctl value, but lets not | |
943 | * die yet... | |
944 | */ | |
945 | max = 512; | |
946 | } else { | |
947 | max -= (page_size / 1024); | |
948 | } | |
949 | ||
950 | pages = (max * 1024) / page_size; | |
1be300f4 ACM |
951 | if (!is_power_of_2(pages)) |
952 | pages = rounddown_pow_of_two(pages); | |
8185e881 | 953 | } else if (!is_power_of_2(pages)) |
994a1f78 JO |
954 | return 0; |
955 | ||
956 | return (pages + 1) * page_size; | |
957 | } | |
958 | ||
33c2dcfd DA |
959 | static long parse_pages_arg(const char *str, unsigned long min, |
960 | unsigned long max) | |
994a1f78 | 961 | { |
2fbe4abe | 962 | unsigned long pages, val; |
27050f53 JO |
963 | static struct parse_tag tags[] = { |
964 | { .tag = 'B', .mult = 1 }, | |
965 | { .tag = 'K', .mult = 1 << 10 }, | |
966 | { .tag = 'M', .mult = 1 << 20 }, | |
967 | { .tag = 'G', .mult = 1 << 30 }, | |
968 | { .tag = 0 }, | |
969 | }; | |
994a1f78 | 970 | |
8973504b | 971 | if (str == NULL) |
33c2dcfd | 972 | return -EINVAL; |
8973504b | 973 | |
27050f53 | 974 | val = parse_tag_value(str, tags); |
2fbe4abe | 975 | if (val != (unsigned long) -1) { |
27050f53 JO |
976 | /* we got file size value */ |
977 | pages = PERF_ALIGN(val, page_size) / page_size; | |
27050f53 JO |
978 | } else { |
979 | /* we got pages count value */ | |
980 | char *eptr; | |
981 | pages = strtoul(str, &eptr, 10); | |
33c2dcfd DA |
982 | if (*eptr != '\0') |
983 | return -EINVAL; | |
994a1f78 JO |
984 | } |
985 | ||
2bcab6c1 | 986 | if (pages == 0 && min == 0) { |
33c2dcfd | 987 | /* leave number of pages at 0 */ |
1dbfa938 | 988 | } else if (!is_power_of_2(pages)) { |
33c2dcfd | 989 | /* round pages up to next power of 2 */ |
91529834 | 990 | pages = roundup_pow_of_two(pages); |
1dbfa938 AH |
991 | if (!pages) |
992 | return -EINVAL; | |
9639837e DA |
993 | pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", |
994 | pages * page_size, pages); | |
2fbe4abe AH |
995 | } |
996 | ||
33c2dcfd DA |
997 | if (pages > max) |
998 | return -EINVAL; | |
999 | ||
1000 | return pages; | |
1001 | } | |
1002 | ||
e9db1310 | 1003 | int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) |
33c2dcfd | 1004 | { |
33c2dcfd DA |
1005 | unsigned long max = UINT_MAX; |
1006 | long pages; | |
1007 | ||
f5ae9c42 | 1008 | if (max > SIZE_MAX / page_size) |
33c2dcfd DA |
1009 | max = SIZE_MAX / page_size; |
1010 | ||
1011 | pages = parse_pages_arg(str, 1, max); | |
1012 | if (pages < 0) { | |
1013 | pr_err("Invalid argument for --mmap_pages/-m\n"); | |
994a1f78 JO |
1014 | return -1; |
1015 | } | |
1016 | ||
1017 | *mmap_pages = pages; | |
1018 | return 0; | |
1019 | } | |
1020 | ||
e9db1310 AH |
1021 | int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, |
1022 | int unset __maybe_unused) | |
1023 | { | |
1024 | return __perf_evlist__parse_mmap_pages(opt->value, str); | |
1025 | } | |
1026 | ||
c83fa7f2 | 1027 | /** |
718c602d | 1028 | * perf_evlist__mmap_ex - Create mmaps to receive events. |
c83fa7f2 AH |
1029 | * @evlist: list of events |
1030 | * @pages: map length in pages | |
1031 | * @overwrite: overwrite older events? | |
718c602d AH |
1032 | * @auxtrace_pages - auxtrace map length in pages |
1033 | * @auxtrace_overwrite - overwrite older auxtrace data? | |
f8a95309 | 1034 | * |
c83fa7f2 AH |
1035 | * If @overwrite is %false the user needs to signal event consumption using |
1036 | * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this | |
1037 | * automatically. | |
7e2ed097 | 1038 | * |
718c602d AH |
1039 | * Similarly, if @auxtrace_overwrite is %false the user needs to signal data |
1040 | * consumption using auxtrace_mmap__write_tail(). | |
1041 | * | |
c83fa7f2 | 1042 | * Return: %0 on success, negative error code otherwise. |
f8a95309 | 1043 | */ |
718c602d AH |
1044 | int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, |
1045 | bool overwrite, unsigned int auxtrace_pages, | |
1046 | bool auxtrace_overwrite) | |
f8a95309 | 1047 | { |
aece948f | 1048 | struct perf_evsel *evsel; |
7e2ed097 ACM |
1049 | const struct cpu_map *cpus = evlist->cpus; |
1050 | const struct thread_map *threads = evlist->threads; | |
a8a8f3eb AH |
1051 | struct mmap_params mp = { |
1052 | .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), | |
1053 | }; | |
50a682ce | 1054 | |
7e2ed097 | 1055 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
f8a95309 ACM |
1056 | return -ENOMEM; |
1057 | ||
1b85337d | 1058 | if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
f8a95309 ACM |
1059 | return -ENOMEM; |
1060 | ||
1061 | evlist->overwrite = overwrite; | |
994a1f78 | 1062 | evlist->mmap_len = perf_evlist__mmap_size(pages); |
2af68ef5 | 1063 | pr_debug("mmap size %zuB\n", evlist->mmap_len); |
a8a8f3eb | 1064 | mp.mask = evlist->mmap_len - page_size - 1; |
f8a95309 | 1065 | |
718c602d AH |
1066 | auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, |
1067 | auxtrace_pages, auxtrace_overwrite); | |
1068 | ||
0050f7aa | 1069 | evlist__for_each(evlist, evsel) { |
f8a95309 | 1070 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
a91e5431 | 1071 | evsel->sample_id == NULL && |
a14bb7a6 | 1072 | perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) |
f8a95309 | 1073 | return -ENOMEM; |
f8a95309 ACM |
1074 | } |
1075 | ||
ec1e7e43 | 1076 | if (cpu_map__empty(cpus)) |
a8a8f3eb | 1077 | return perf_evlist__mmap_per_thread(evlist, &mp); |
f8a95309 | 1078 | |
a8a8f3eb | 1079 | return perf_evlist__mmap_per_cpu(evlist, &mp); |
f8a95309 | 1080 | } |
7e2ed097 | 1081 | |
718c602d AH |
1082 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, |
1083 | bool overwrite) | |
1084 | { | |
1085 | return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); | |
1086 | } | |
1087 | ||
602ad878 | 1088 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) |
7e2ed097 | 1089 | { |
b809ac10 NK |
1090 | evlist->threads = thread_map__new_str(target->pid, target->tid, |
1091 | target->uid); | |
7e2ed097 ACM |
1092 | |
1093 | if (evlist->threads == NULL) | |
1094 | return -1; | |
1095 | ||
9c105fbc | 1096 | if (target__uses_dummy_map(target)) |
d1cb9fce | 1097 | evlist->cpus = cpu_map__dummy_new(); |
879d77d0 NK |
1098 | else |
1099 | evlist->cpus = cpu_map__new(target->cpu_list); | |
7e2ed097 ACM |
1100 | |
1101 | if (evlist->cpus == NULL) | |
1102 | goto out_delete_threads; | |
1103 | ||
1104 | return 0; | |
1105 | ||
1106 | out_delete_threads: | |
1107 | thread_map__delete(evlist->threads); | |
b2e19a93 | 1108 | evlist->threads = NULL; |
7e2ed097 ACM |
1109 | return -1; |
1110 | } | |
1111 | ||
23d4aad4 | 1112 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) |
0a102479 | 1113 | { |
0a102479 | 1114 | struct perf_evsel *evsel; |
745cefc5 ACM |
1115 | int err = 0; |
1116 | const int ncpus = cpu_map__nr(evlist->cpus), | |
b3a319d5 | 1117 | nthreads = thread_map__nr(evlist->threads); |
0a102479 | 1118 | |
0050f7aa | 1119 | evlist__for_each(evlist, evsel) { |
745cefc5 | 1120 | if (evsel->filter == NULL) |
0a102479 | 1121 | continue; |
745cefc5 ACM |
1122 | |
1123 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); | |
23d4aad4 ACM |
1124 | if (err) { |
1125 | *err_evsel = evsel; | |
745cefc5 | 1126 | break; |
23d4aad4 | 1127 | } |
0a102479 FW |
1128 | } |
1129 | ||
745cefc5 ACM |
1130 | return err; |
1131 | } | |
1132 | ||
1133 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) | |
1134 | { | |
1135 | struct perf_evsel *evsel; | |
1136 | int err = 0; | |
1137 | const int ncpus = cpu_map__nr(evlist->cpus), | |
b3a319d5 | 1138 | nthreads = thread_map__nr(evlist->threads); |
745cefc5 | 1139 | |
0050f7aa | 1140 | evlist__for_each(evlist, evsel) { |
745cefc5 ACM |
1141 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); |
1142 | if (err) | |
1143 | break; | |
1144 | } | |
1145 | ||
1146 | return err; | |
0a102479 | 1147 | } |
74429964 | 1148 | |
be199ada | 1149 | int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) |
cfd70a26 ACM |
1150 | { |
1151 | char *filter; | |
be199ada ACM |
1152 | int ret = -1; |
1153 | size_t i; | |
cfd70a26 | 1154 | |
be199ada ACM |
1155 | for (i = 0; i < npids; ++i) { |
1156 | if (i == 0) { | |
1157 | if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) | |
1158 | return -1; | |
1159 | } else { | |
1160 | char *tmp; | |
1161 | ||
1162 | if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) | |
1163 | goto out_free; | |
1164 | ||
1165 | free(filter); | |
1166 | filter = tmp; | |
1167 | } | |
1168 | } | |
cfd70a26 ACM |
1169 | |
1170 | ret = perf_evlist__set_filter(evlist, filter); | |
be199ada | 1171 | out_free: |
cfd70a26 ACM |
1172 | free(filter); |
1173 | return ret; | |
1174 | } | |
1175 | ||
be199ada ACM |
1176 | int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) |
1177 | { | |
1178 | return perf_evlist__set_filter_pids(evlist, 1, &pid); | |
1179 | } | |
1180 | ||
0c21f736 | 1181 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
74429964 | 1182 | { |
75562573 | 1183 | struct perf_evsel *pos; |
c2a70653 | 1184 | |
75562573 AH |
1185 | if (evlist->nr_entries == 1) |
1186 | return true; | |
1187 | ||
1188 | if (evlist->id_pos < 0 || evlist->is_pos < 0) | |
1189 | return false; | |
1190 | ||
0050f7aa | 1191 | evlist__for_each(evlist, pos) { |
75562573 AH |
1192 | if (pos->id_pos != evlist->id_pos || |
1193 | pos->is_pos != evlist->is_pos) | |
c2a70653 | 1194 | return false; |
74429964 FW |
1195 | } |
1196 | ||
c2a70653 | 1197 | return true; |
74429964 FW |
1198 | } |
1199 | ||
75562573 | 1200 | u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) |
c2a70653 | 1201 | { |
75562573 AH |
1202 | struct perf_evsel *evsel; |
1203 | ||
1204 | if (evlist->combined_sample_type) | |
1205 | return evlist->combined_sample_type; | |
1206 | ||
0050f7aa | 1207 | evlist__for_each(evlist, evsel) |
75562573 AH |
1208 | evlist->combined_sample_type |= evsel->attr.sample_type; |
1209 | ||
1210 | return evlist->combined_sample_type; | |
1211 | } | |
1212 | ||
1213 | u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) | |
1214 | { | |
1215 | evlist->combined_sample_type = 0; | |
1216 | return __perf_evlist__combined_sample_type(evlist); | |
c2a70653 ACM |
1217 | } |
1218 | ||
9ede473c JO |
1219 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist) |
1220 | { | |
1221 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; | |
1222 | u64 read_format = first->attr.read_format; | |
1223 | u64 sample_type = first->attr.sample_type; | |
1224 | ||
0050f7aa | 1225 | evlist__for_each(evlist, pos) { |
9ede473c JO |
1226 | if (read_format != pos->attr.read_format) |
1227 | return false; | |
1228 | } | |
1229 | ||
1230 | /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ | |
1231 | if ((sample_type & PERF_SAMPLE_READ) && | |
1232 | !(read_format & PERF_FORMAT_ID)) { | |
1233 | return false; | |
1234 | } | |
1235 | ||
1236 | return true; | |
1237 | } | |
1238 | ||
1239 | u64 perf_evlist__read_format(struct perf_evlist *evlist) | |
1240 | { | |
1241 | struct perf_evsel *first = perf_evlist__first(evlist); | |
1242 | return first->attr.read_format; | |
1243 | } | |
1244 | ||
0c21f736 | 1245 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
81e36bff | 1246 | { |
0c21f736 | 1247 | struct perf_evsel *first = perf_evlist__first(evlist); |
81e36bff ACM |
1248 | struct perf_sample *data; |
1249 | u64 sample_type; | |
1250 | u16 size = 0; | |
1251 | ||
81e36bff ACM |
1252 | if (!first->attr.sample_id_all) |
1253 | goto out; | |
1254 | ||
1255 | sample_type = first->attr.sample_type; | |
1256 | ||
1257 | if (sample_type & PERF_SAMPLE_TID) | |
1258 | size += sizeof(data->tid) * 2; | |
1259 | ||
1260 | if (sample_type & PERF_SAMPLE_TIME) | |
1261 | size += sizeof(data->time); | |
1262 | ||
1263 | if (sample_type & PERF_SAMPLE_ID) | |
1264 | size += sizeof(data->id); | |
1265 | ||
1266 | if (sample_type & PERF_SAMPLE_STREAM_ID) | |
1267 | size += sizeof(data->stream_id); | |
1268 | ||
1269 | if (sample_type & PERF_SAMPLE_CPU) | |
1270 | size += sizeof(data->cpu) * 2; | |
75562573 AH |
1271 | |
1272 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | |
1273 | size += sizeof(data->id); | |
81e36bff ACM |
1274 | out: |
1275 | return size; | |
1276 | } | |
1277 | ||
0c21f736 | 1278 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) |
74429964 | 1279 | { |
0c21f736 | 1280 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
c2a70653 | 1281 | |
0050f7aa | 1282 | evlist__for_each_continue(evlist, pos) { |
c2a70653 ACM |
1283 | if (first->attr.sample_id_all != pos->attr.sample_id_all) |
1284 | return false; | |
74429964 FW |
1285 | } |
1286 | ||
c2a70653 ACM |
1287 | return true; |
1288 | } | |
1289 | ||
0c21f736 | 1290 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist) |
c2a70653 | 1291 | { |
0c21f736 | 1292 | struct perf_evsel *first = perf_evlist__first(evlist); |
c2a70653 | 1293 | return first->attr.sample_id_all; |
74429964 | 1294 | } |
81cce8de ACM |
1295 | |
1296 | void perf_evlist__set_selected(struct perf_evlist *evlist, | |
1297 | struct perf_evsel *evsel) | |
1298 | { | |
1299 | evlist->selected = evsel; | |
1300 | } | |
727ab04e | 1301 | |
a74b4b66 NK |
1302 | void perf_evlist__close(struct perf_evlist *evlist) |
1303 | { | |
1304 | struct perf_evsel *evsel; | |
1305 | int ncpus = cpu_map__nr(evlist->cpus); | |
1306 | int nthreads = thread_map__nr(evlist->threads); | |
8ad9219e | 1307 | int n; |
a74b4b66 | 1308 | |
8ad9219e SE |
1309 | evlist__for_each_reverse(evlist, evsel) { |
1310 | n = evsel->cpus ? evsel->cpus->nr : ncpus; | |
1311 | perf_evsel__close(evsel, n, nthreads); | |
1312 | } | |
a74b4b66 NK |
1313 | } |
1314 | ||
4112eb18 ACM |
1315 | static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) |
1316 | { | |
1317 | int err = -ENOMEM; | |
1318 | ||
1319 | /* | |
1320 | * Try reading /sys/devices/system/cpu/online to get | |
1321 | * an all cpus map. | |
1322 | * | |
1323 | * FIXME: -ENOMEM is the best we can do here, the cpu_map | |
1324 | * code needs an overhaul to properly forward the | |
1325 | * error, and we may not want to do that fallback to a | |
1326 | * default cpu identity map :-\ | |
1327 | */ | |
1328 | evlist->cpus = cpu_map__new(NULL); | |
1329 | if (evlist->cpus == NULL) | |
1330 | goto out; | |
1331 | ||
1332 | evlist->threads = thread_map__new_dummy(); | |
1333 | if (evlist->threads == NULL) | |
1334 | goto out_free_cpus; | |
1335 | ||
1336 | err = 0; | |
1337 | out: | |
1338 | return err; | |
1339 | out_free_cpus: | |
1340 | cpu_map__delete(evlist->cpus); | |
1341 | evlist->cpus = NULL; | |
1342 | goto out; | |
1343 | } | |
1344 | ||
6a4bb04c | 1345 | int perf_evlist__open(struct perf_evlist *evlist) |
727ab04e | 1346 | { |
6a4bb04c | 1347 | struct perf_evsel *evsel; |
a74b4b66 | 1348 | int err; |
727ab04e | 1349 | |
4112eb18 ACM |
1350 | /* |
1351 | * Default: one fd per CPU, all threads, aka systemwide | |
1352 | * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL | |
1353 | */ | |
1354 | if (evlist->threads == NULL && evlist->cpus == NULL) { | |
1355 | err = perf_evlist__create_syswide_maps(evlist); | |
1356 | if (err < 0) | |
1357 | goto out_err; | |
1358 | } | |
1359 | ||
733cd2fe AH |
1360 | perf_evlist__update_id_pos(evlist); |
1361 | ||
0050f7aa | 1362 | evlist__for_each(evlist, evsel) { |
6a4bb04c | 1363 | err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); |
727ab04e ACM |
1364 | if (err < 0) |
1365 | goto out_err; | |
1366 | } | |
1367 | ||
1368 | return 0; | |
1369 | out_err: | |
a74b4b66 | 1370 | perf_evlist__close(evlist); |
41c21a68 | 1371 | errno = -err; |
727ab04e ACM |
1372 | return err; |
1373 | } | |
35b9d88e | 1374 | |
602ad878 | 1375 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, |
55e162ea | 1376 | const char *argv[], bool pipe_output, |
735f7e0b | 1377 | void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) |
35b9d88e ACM |
1378 | { |
1379 | int child_ready_pipe[2], go_pipe[2]; | |
1380 | char bf; | |
1381 | ||
1382 | if (pipe(child_ready_pipe) < 0) { | |
1383 | perror("failed to create 'ready' pipe"); | |
1384 | return -1; | |
1385 | } | |
1386 | ||
1387 | if (pipe(go_pipe) < 0) { | |
1388 | perror("failed to create 'go' pipe"); | |
1389 | goto out_close_ready_pipe; | |
1390 | } | |
1391 | ||
1392 | evlist->workload.pid = fork(); | |
1393 | if (evlist->workload.pid < 0) { | |
1394 | perror("failed to fork"); | |
1395 | goto out_close_pipes; | |
1396 | } | |
1397 | ||
1398 | if (!evlist->workload.pid) { | |
5f1c4225 ACM |
1399 | int ret; |
1400 | ||
119fa3c9 | 1401 | if (pipe_output) |
35b9d88e ACM |
1402 | dup2(2, 1); |
1403 | ||
0817df08 DA |
1404 | signal(SIGTERM, SIG_DFL); |
1405 | ||
35b9d88e ACM |
1406 | close(child_ready_pipe[0]); |
1407 | close(go_pipe[1]); | |
1408 | fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); | |
1409 | ||
35b9d88e ACM |
1410 | /* |
1411 | * Tell the parent we're ready to go | |
1412 | */ | |
1413 | close(child_ready_pipe[1]); | |
1414 | ||
1415 | /* | |
1416 | * Wait until the parent tells us to go. | |
1417 | */ | |
5f1c4225 ACM |
1418 | ret = read(go_pipe[0], &bf, 1); |
1419 | /* | |
1420 | * The parent will ask for the execvp() to be performed by | |
1421 | * writing exactly one byte, in workload.cork_fd, usually via | |
1422 | * perf_evlist__start_workload(). | |
1423 | * | |
20f86fc1 | 1424 | * For cancelling the workload without actually running it, |
5f1c4225 ACM |
1425 | * the parent will just close workload.cork_fd, without writing |
1426 | * anything, i.e. read will return zero and we just exit() | |
1427 | * here. | |
1428 | */ | |
1429 | if (ret != 1) { | |
1430 | if (ret == -1) | |
1431 | perror("unable to read pipe"); | |
1432 | exit(ret); | |
1433 | } | |
35b9d88e ACM |
1434 | |
1435 | execvp(argv[0], (char **)argv); | |
1436 | ||
735f7e0b | 1437 | if (exec_error) { |
f33cbe72 ACM |
1438 | union sigval val; |
1439 | ||
1440 | val.sival_int = errno; | |
1441 | if (sigqueue(getppid(), SIGUSR1, val)) | |
1442 | perror(argv[0]); | |
1443 | } else | |
1444 | perror(argv[0]); | |
35b9d88e ACM |
1445 | exit(-1); |
1446 | } | |
1447 | ||
735f7e0b ACM |
1448 | if (exec_error) { |
1449 | struct sigaction act = { | |
1450 | .sa_flags = SA_SIGINFO, | |
1451 | .sa_sigaction = exec_error, | |
1452 | }; | |
1453 | sigaction(SIGUSR1, &act, NULL); | |
1454 | } | |
1455 | ||
1aaf63b1 ACM |
1456 | if (target__none(target)) { |
1457 | if (evlist->threads == NULL) { | |
1458 | fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", | |
1459 | __func__, __LINE__); | |
1460 | goto out_close_pipes; | |
1461 | } | |
35b9d88e | 1462 | evlist->threads->map[0] = evlist->workload.pid; |
1aaf63b1 | 1463 | } |
35b9d88e ACM |
1464 | |
1465 | close(child_ready_pipe[1]); | |
1466 | close(go_pipe[0]); | |
1467 | /* | |
1468 | * wait for child to settle | |
1469 | */ | |
1470 | if (read(child_ready_pipe[0], &bf, 1) == -1) { | |
1471 | perror("unable to read pipe"); | |
1472 | goto out_close_pipes; | |
1473 | } | |
1474 | ||
bcf3145f | 1475 | fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); |
35b9d88e ACM |
1476 | evlist->workload.cork_fd = go_pipe[1]; |
1477 | close(child_ready_pipe[0]); | |
1478 | return 0; | |
1479 | ||
1480 | out_close_pipes: | |
1481 | close(go_pipe[0]); | |
1482 | close(go_pipe[1]); | |
1483 | out_close_ready_pipe: | |
1484 | close(child_ready_pipe[0]); | |
1485 | close(child_ready_pipe[1]); | |
1486 | return -1; | |
1487 | } | |
1488 | ||
1489 | int perf_evlist__start_workload(struct perf_evlist *evlist) | |
1490 | { | |
1491 | if (evlist->workload.cork_fd > 0) { | |
b3824404 | 1492 | char bf = 0; |
bcf3145f | 1493 | int ret; |
35b9d88e ACM |
1494 | /* |
1495 | * Remove the cork, let it rip! | |
1496 | */ | |
bcf3145f NK |
1497 | ret = write(evlist->workload.cork_fd, &bf, 1); |
1498 | if (ret < 0) | |
1499 | perror("enable to write to pipe"); | |
1500 | ||
1501 | close(evlist->workload.cork_fd); | |
1502 | return ret; | |
35b9d88e ACM |
1503 | } |
1504 | ||
1505 | return 0; | |
1506 | } | |
cb0b29e0 | 1507 | |
a3f698fe | 1508 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
0807d2d8 | 1509 | struct perf_sample *sample) |
cb0b29e0 | 1510 | { |
75562573 AH |
1511 | struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); |
1512 | ||
1513 | if (!evsel) | |
1514 | return -EFAULT; | |
0807d2d8 | 1515 | return perf_evsel__parse_sample(evsel, event, sample); |
cb0b29e0 | 1516 | } |
78f067b3 ACM |
1517 | |
1518 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) | |
1519 | { | |
1520 | struct perf_evsel *evsel; | |
1521 | size_t printed = 0; | |
1522 | ||
0050f7aa | 1523 | evlist__for_each(evlist, evsel) { |
78f067b3 ACM |
1524 | printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", |
1525 | perf_evsel__name(evsel)); | |
1526 | } | |
1527 | ||
b2222139 | 1528 | return printed + fprintf(fp, "\n"); |
78f067b3 | 1529 | } |
6ef068cb | 1530 | |
a8f23d8f ACM |
1531 | int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, |
1532 | int err, char *buf, size_t size) | |
1533 | { | |
1534 | int printed, value; | |
6e81c74c | 1535 | char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); |
a8f23d8f ACM |
1536 | |
1537 | switch (err) { | |
1538 | case EACCES: | |
1539 | case EPERM: | |
1540 | printed = scnprintf(buf, size, | |
1541 | "Error:\t%s.\n" | |
1542 | "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); | |
1543 | ||
1a47245d | 1544 | value = perf_event_paranoid(); |
a8f23d8f ACM |
1545 | |
1546 | printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); | |
1547 | ||
1548 | if (value >= 2) { | |
1549 | printed += scnprintf(buf + printed, size - printed, | |
1550 | "For your workloads it needs to be <= 1\nHint:\t"); | |
1551 | } | |
1552 | printed += scnprintf(buf + printed, size - printed, | |
5229e366 | 1553 | "For system wide tracing it needs to be set to -1.\n"); |
a8f23d8f ACM |
1554 | |
1555 | printed += scnprintf(buf + printed, size - printed, | |
5229e366 ACM |
1556 | "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" |
1557 | "Hint:\tThe current value is %d.", value); | |
a8f23d8f ACM |
1558 | break; |
1559 | default: | |
1560 | scnprintf(buf, size, "%s", emsg); | |
1561 | break; | |
1562 | } | |
1563 | ||
1564 | return 0; | |
1565 | } | |
a025e4f0 | 1566 | |
956fa571 ACM |
1567 | int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) |
1568 | { | |
1569 | char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); | |
e965bea1 | 1570 | int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; |
956fa571 ACM |
1571 | |
1572 | switch (err) { | |
1573 | case EPERM: | |
e5d4a290 | 1574 | sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); |
e965bea1 ACM |
1575 | printed += scnprintf(buf + printed, size - printed, |
1576 | "Error:\t%s.\n" | |
956fa571 | 1577 | "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" |
e965bea1 | 1578 | "Hint:\tTried using %zd kB.\n", |
e5d4a290 | 1579 | emsg, pages_max_per_user, pages_attempted); |
e965bea1 ACM |
1580 | |
1581 | if (pages_attempted >= pages_max_per_user) { | |
1582 | printed += scnprintf(buf + printed, size - printed, | |
1583 | "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", | |
1584 | pages_max_per_user + pages_attempted); | |
1585 | } | |
1586 | ||
1587 | printed += scnprintf(buf + printed, size - printed, | |
1588 | "Hint:\tTry using a smaller -m/--mmap-pages value."); | |
956fa571 ACM |
1589 | break; |
1590 | default: | |
1591 | scnprintf(buf, size, "%s", emsg); | |
1592 | break; | |
1593 | } | |
1594 | ||
1595 | return 0; | |
1596 | } | |
1597 | ||
a025e4f0 AH |
1598 | void perf_evlist__to_front(struct perf_evlist *evlist, |
1599 | struct perf_evsel *move_evsel) | |
1600 | { | |
1601 | struct perf_evsel *evsel, *n; | |
1602 | LIST_HEAD(move); | |
1603 | ||
1604 | if (move_evsel == perf_evlist__first(evlist)) | |
1605 | return; | |
1606 | ||
0050f7aa | 1607 | evlist__for_each_safe(evlist, n, evsel) { |
a025e4f0 AH |
1608 | if (evsel->leader == move_evsel->leader) |
1609 | list_move_tail(&evsel->node, &move); | |
1610 | } | |
1611 | ||
1612 | list_splice(&move, &evlist->entries); | |
1613 | } | |
60b0896c AH |
1614 | |
1615 | void perf_evlist__set_tracking_event(struct perf_evlist *evlist, | |
1616 | struct perf_evsel *tracking_evsel) | |
1617 | { | |
1618 | struct perf_evsel *evsel; | |
1619 | ||
1620 | if (tracking_evsel->tracking) | |
1621 | return; | |
1622 | ||
1623 | evlist__for_each(evlist, evsel) { | |
1624 | if (evsel != tracking_evsel) | |
1625 | evsel->tracking = false; | |
1626 | } | |
1627 | ||
1628 | tracking_evsel->tracking = true; | |
1629 | } |