]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - tools/perf/util/evlist.c
perf kvm stat live: Use perf_evlist__add_pollfd() instead of local equivalent
[mirror_ubuntu-bionic-kernel.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
553873e1 10#include <api/fs/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47 20#include "parse-events.h"
994a1f78 21#include "parse-options.h"
50d08e47 22
f8a95309
ACM
23#include <sys/mman.h>
24
70db7533
ACM
25#include <linux/bitops.h>
26#include <linux/hash.h>
27
f8a95309 28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 30
7e2ed097
ACM
31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
ef1d1af2
ACM
33{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 39 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 40 evlist->workload.pid = -1;
ef1d1af2
ACM
41}
42
334fe7a3 43struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
44{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
ef1d1af2 47 if (evlist != NULL)
334fe7a3 48 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
49
50 return evlist;
51}
52
b22d54b0
JO
53struct perf_evlist *perf_evlist__new_default(void)
54{
55 struct perf_evlist *evlist = perf_evlist__new();
56
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
59 evlist = NULL;
60 }
61
62 return evlist;
63}
64
75562573
AH
65/**
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
68 *
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
71 */
72void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73{
74 struct perf_evsel *first = perf_evlist__first(evlist);
75
76 evlist->id_pos = first->id_pos;
77 evlist->is_pos = first->is_pos;
78}
79
733cd2fe
AH
80static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81{
82 struct perf_evsel *evsel;
83
0050f7aa 84 evlist__for_each(evlist, evsel)
733cd2fe
AH
85 perf_evsel__calc_id_pos(evsel);
86
87 perf_evlist__set_id_pos(evlist);
88}
89
361c99a6
ACM
90static void perf_evlist__purge(struct perf_evlist *evlist)
91{
92 struct perf_evsel *pos, *n;
93
0050f7aa 94 evlist__for_each_safe(evlist, n, pos) {
361c99a6
ACM
95 list_del_init(&pos->node);
96 perf_evsel__delete(pos);
97 }
98
99 evlist->nr_entries = 0;
100}
101
ef1d1af2 102void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 103{
04662523
ACM
104 zfree(&evlist->mmap);
105 zfree(&evlist->pollfd);
ef1d1af2
ACM
106}
107
108void perf_evlist__delete(struct perf_evlist *evlist)
109{
983874d1 110 perf_evlist__munmap(evlist);
f26e1c7c 111 perf_evlist__close(evlist);
03ad9747
ACM
112 cpu_map__delete(evlist->cpus);
113 thread_map__delete(evlist->threads);
114 evlist->cpus = NULL;
115 evlist->threads = NULL;
ef1d1af2
ACM
116 perf_evlist__purge(evlist);
117 perf_evlist__exit(evlist);
361c99a6
ACM
118 free(evlist);
119}
120
121void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
122{
123 list_add_tail(&entry->node, &evlist->entries);
ef503831 124 entry->idx = evlist->nr_entries;
60b0896c 125 entry->tracking = !entry->idx;
ef503831 126
75562573
AH
127 if (!evlist->nr_entries++)
128 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
129}
130
0529bc1f
JO
131void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
132 struct list_head *list,
133 int nr_entries)
50d08e47 134{
75562573
AH
135 bool set_id_pos = !evlist->nr_entries;
136
50d08e47
ACM
137 list_splice_tail(list, &evlist->entries);
138 evlist->nr_entries += nr_entries;
75562573
AH
139 if (set_id_pos)
140 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
141}
142
63dab225
ACM
143void __perf_evlist__set_leader(struct list_head *list)
144{
145 struct perf_evsel *evsel, *leader;
146
147 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
148 evsel = list_entry(list->prev, struct perf_evsel, node);
149
150 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225 151
0050f7aa 152 __evlist__for_each(list, evsel) {
74b2133d 153 evsel->leader = leader;
63dab225
ACM
154 }
155}
156
157void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 158{
97f63e4a
NK
159 if (evlist->nr_entries) {
160 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 161 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 162 }
6a4bb04c
JO
163}
164
361c99a6
ACM
165int perf_evlist__add_default(struct perf_evlist *evlist)
166{
167 struct perf_event_attr attr = {
168 .type = PERF_TYPE_HARDWARE,
169 .config = PERF_COUNT_HW_CPU_CYCLES,
170 };
1aed2671
JR
171 struct perf_evsel *evsel;
172
173 event_attr_init(&attr);
361c99a6 174
ef503831 175 evsel = perf_evsel__new(&attr);
361c99a6 176 if (evsel == NULL)
cc2d86b0
SE
177 goto error;
178
179 /* use strdup() because free(evsel) assumes name is allocated */
180 evsel->name = strdup("cycles");
181 if (!evsel->name)
182 goto error_free;
361c99a6
ACM
183
184 perf_evlist__add(evlist, evsel);
185 return 0;
cc2d86b0
SE
186error_free:
187 perf_evsel__delete(evsel);
188error:
189 return -ENOMEM;
361c99a6 190}
5c581041 191
e60fc847
ACM
192static int perf_evlist__add_attrs(struct perf_evlist *evlist,
193 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
194{
195 struct perf_evsel *evsel, *n;
196 LIST_HEAD(head);
197 size_t i;
198
199 for (i = 0; i < nr_attrs; i++) {
ef503831 200 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
50d08e47
ACM
201 if (evsel == NULL)
202 goto out_delete_partial_list;
203 list_add_tail(&evsel->node, &head);
204 }
205
206 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
207
208 return 0;
209
210out_delete_partial_list:
0050f7aa 211 __evlist__for_each_safe(&head, n, evsel)
50d08e47
ACM
212 perf_evsel__delete(evsel);
213 return -1;
214}
215
79695e1b
ACM
216int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
217 struct perf_event_attr *attrs, size_t nr_attrs)
218{
219 size_t i;
220
221 for (i = 0; i < nr_attrs; i++)
222 event_attr_init(attrs + i);
223
224 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
225}
226
da378962
ACM
227struct perf_evsel *
228perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
229{
230 struct perf_evsel *evsel;
231
0050f7aa 232 evlist__for_each(evlist, evsel) {
ee29be62
ACM
233 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
234 (int)evsel->attr.config == id)
235 return evsel;
236 }
237
238 return NULL;
239}
240
a2f2804a
DA
241struct perf_evsel *
242perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
243 const char *name)
244{
245 struct perf_evsel *evsel;
246
0050f7aa 247 evlist__for_each(evlist, evsel) {
a2f2804a
DA
248 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
249 (strcmp(evsel->name, name) == 0))
250 return evsel;
251 }
252
253 return NULL;
254}
255
39876e7d
ACM
256int perf_evlist__add_newtp(struct perf_evlist *evlist,
257 const char *sys, const char *name, void *handler)
258{
ef503831 259 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
39876e7d 260
39876e7d
ACM
261 if (evsel == NULL)
262 return -1;
263
744a9719 264 evsel->handler = handler;
39876e7d
ACM
265 perf_evlist__add(evlist, evsel);
266 return 0;
267}
268
bf8e8f4b
AH
269static int perf_evlist__nr_threads(struct perf_evlist *evlist,
270 struct perf_evsel *evsel)
271{
272 if (evsel->system_wide)
273 return 1;
274 else
275 return thread_map__nr(evlist->threads);
276}
277
4152ab37
ACM
278void perf_evlist__disable(struct perf_evlist *evlist)
279{
280 int cpu, thread;
281 struct perf_evsel *pos;
b3a319d5 282 int nr_cpus = cpu_map__nr(evlist->cpus);
bf8e8f4b 283 int nr_threads;
4152ab37 284
b3a319d5 285 for (cpu = 0; cpu < nr_cpus; cpu++) {
0050f7aa 286 evlist__for_each(evlist, pos) {
395c3070 287 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 288 continue;
bf8e8f4b 289 nr_threads = perf_evlist__nr_threads(evlist, pos);
b3a319d5 290 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
291 ioctl(FD(pos, cpu, thread),
292 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
293 }
294 }
295}
296
764e16a3
DA
297void perf_evlist__enable(struct perf_evlist *evlist)
298{
299 int cpu, thread;
300 struct perf_evsel *pos;
b3a319d5 301 int nr_cpus = cpu_map__nr(evlist->cpus);
bf8e8f4b 302 int nr_threads;
764e16a3 303
b3a319d5 304 for (cpu = 0; cpu < nr_cpus; cpu++) {
0050f7aa 305 evlist__for_each(evlist, pos) {
395c3070 306 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 307 continue;
bf8e8f4b 308 nr_threads = perf_evlist__nr_threads(evlist, pos);
b3a319d5 309 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
310 ioctl(FD(pos, cpu, thread),
311 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
312 }
313 }
314}
315
395c3070
AH
316int perf_evlist__disable_event(struct perf_evlist *evlist,
317 struct perf_evsel *evsel)
318{
319 int cpu, thread, err;
bf8e8f4b
AH
320 int nr_cpus = cpu_map__nr(evlist->cpus);
321 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
395c3070
AH
322
323 if (!evsel->fd)
324 return 0;
325
bf8e8f4b
AH
326 for (cpu = 0; cpu < nr_cpus; cpu++) {
327 for (thread = 0; thread < nr_threads; thread++) {
395c3070
AH
328 err = ioctl(FD(evsel, cpu, thread),
329 PERF_EVENT_IOC_DISABLE, 0);
330 if (err)
331 return err;
332 }
333 }
334 return 0;
335}
336
337int perf_evlist__enable_event(struct perf_evlist *evlist,
338 struct perf_evsel *evsel)
339{
340 int cpu, thread, err;
bf8e8f4b
AH
341 int nr_cpus = cpu_map__nr(evlist->cpus);
342 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
395c3070
AH
343
344 if (!evsel->fd)
345 return -EINVAL;
346
bf8e8f4b
AH
347 for (cpu = 0; cpu < nr_cpus; cpu++) {
348 for (thread = 0; thread < nr_threads; thread++) {
395c3070
AH
349 err = ioctl(FD(evsel, cpu, thread),
350 PERF_EVENT_IOC_ENABLE, 0);
351 if (err)
352 return err;
353 }
354 }
355 return 0;
356}
357
1c65056c
AH
358static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
359 struct perf_evsel *evsel, int cpu)
360{
361 int thread, err;
362 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
363
364 if (!evsel->fd)
365 return -EINVAL;
366
367 for (thread = 0; thread < nr_threads; thread++) {
368 err = ioctl(FD(evsel, cpu, thread),
369 PERF_EVENT_IOC_ENABLE, 0);
370 if (err)
371 return err;
372 }
373 return 0;
374}
375
376static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
377 struct perf_evsel *evsel,
378 int thread)
379{
380 int cpu, err;
381 int nr_cpus = cpu_map__nr(evlist->cpus);
382
383 if (!evsel->fd)
384 return -EINVAL;
385
386 for (cpu = 0; cpu < nr_cpus; cpu++) {
387 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
388 if (err)
389 return err;
390 }
391 return 0;
392}
393
394int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
395 struct perf_evsel *evsel, int idx)
396{
397 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
398
399 if (per_cpu_mmaps)
400 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
401 else
402 return perf_evlist__enable_event_thread(evlist, evsel, idx);
403}
404
ad6765dd
ACM
405static int perf_evlist__grow_pollfd(struct perf_evlist *evlist, int hint)
406{
407 int nr_fds_alloc = evlist->nr_fds_alloc + hint;
408 size_t size = sizeof(struct pollfd) * nr_fds_alloc;
409 struct pollfd *pollfd = realloc(evlist->pollfd, size);
410
411 if (pollfd == NULL)
412 return -ENOMEM;
413
414 evlist->nr_fds_alloc = nr_fds_alloc;
415 evlist->pollfd = pollfd;
416 return 0;
417}
418
419int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 420{
b3a319d5
NK
421 int nr_cpus = cpu_map__nr(evlist->cpus);
422 int nr_threads = thread_map__nr(evlist->threads);
bf8e8f4b
AH
423 int nfds = 0;
424 struct perf_evsel *evsel;
425
426 list_for_each_entry(evsel, &evlist->entries, node) {
427 if (evsel->system_wide)
428 nfds += nr_cpus;
429 else
430 nfds += nr_cpus * nr_threads;
431 }
432
ad6765dd
ACM
433 if (evlist->nr_fds_alloc - evlist->nr_fds < nfds &&
434 perf_evlist__grow_pollfd(evlist, nfds) < 0)
435 return -ENOMEM;
436
437 return 0;
5c581041 438}
70082dd9 439
ad6765dd 440int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
70082dd9 441{
ad6765dd
ACM
442 /*
443 * XXX: 64 is arbitrary, just not to call realloc at each fd.
444 * Find a better autogrowing heuristic
445 */
446 if (evlist->nr_fds == evlist->nr_fds_alloc &&
447 perf_evlist__grow_pollfd(evlist, 64) < 0)
448 return -ENOMEM;
449
70082dd9
ACM
450 fcntl(fd, F_SETFL, O_NONBLOCK);
451 evlist->pollfd[evlist->nr_fds].fd = fd;
8179672c 452 evlist->pollfd[evlist->nr_fds].events = POLLIN | POLLERR | POLLHUP;
70082dd9 453 evlist->nr_fds++;
ad6765dd 454 return 0;
70082dd9 455}
70db7533 456
1ddec7f0
ACM
457int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
458{
459 int fd, nr_fds = 0;
460
461 if (evlist->nr_fds == 0)
462 return 0;
463
464 for (fd = 0; fd < evlist->nr_fds; ++fd) {
465 if (evlist->pollfd[fd].revents & revents_and_mask)
466 continue;
467
468 if (fd != nr_fds)
469 evlist->pollfd[nr_fds] = evlist->pollfd[fd];
470
471 ++nr_fds;
472 }
473
474 evlist->nr_fds = nr_fds;
475 return nr_fds;
476}
477
a91e5431
ACM
478static void perf_evlist__id_hash(struct perf_evlist *evlist,
479 struct perf_evsel *evsel,
480 int cpu, int thread, u64 id)
3d3b5e95
ACM
481{
482 int hash;
483 struct perf_sample_id *sid = SID(evsel, cpu, thread);
484
485 sid->id = id;
486 sid->evsel = evsel;
487 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
488 hlist_add_head(&sid->node, &evlist->heads[hash]);
489}
490
a91e5431
ACM
491void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
492 int cpu, int thread, u64 id)
493{
494 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
495 evsel->id[evsel->ids++] = id;
496}
497
498static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
499 struct perf_evsel *evsel,
500 int cpu, int thread, int fd)
f8a95309 501{
f8a95309 502 u64 read_data[4] = { 0, };
3d3b5e95 503 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
504 u64 id;
505 int ret;
506
507 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
508 if (!ret)
509 goto add;
510
511 if (errno != ENOTTY)
512 return -1;
513
514 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 515
c4861afe
JO
516 /*
517 * This way does not work with group format read, so bail
518 * out in that case.
519 */
520 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
521 return -1;
522
f8a95309
ACM
523 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
524 read(fd, &read_data, sizeof(read_data)) == -1)
525 return -1;
526
527 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
528 ++id_idx;
529 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
530 ++id_idx;
531
e2b5abe0
JO
532 id = read_data[id_idx];
533
534 add:
535 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
536 return 0;
537}
538
932a3594 539struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
540{
541 struct hlist_head *head;
70db7533
ACM
542 struct perf_sample_id *sid;
543 int hash;
544
70db7533
ACM
545 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
546 head = &evlist->heads[hash];
547
b67bfe0d 548 hlist_for_each_entry(sid, head, node)
70db7533 549 if (sid->id == id)
932a3594
JO
550 return sid;
551
552 return NULL;
553}
554
555struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
556{
557 struct perf_sample_id *sid;
558
559 if (evlist->nr_entries == 1)
560 return perf_evlist__first(evlist);
561
562 sid = perf_evlist__id2sid(evlist, id);
563 if (sid)
564 return sid->evsel;
30e68bcc
NK
565
566 if (!perf_evlist__sample_id_all(evlist))
0c21f736 567 return perf_evlist__first(evlist);
30e68bcc 568
70db7533
ACM
569 return NULL;
570}
04391deb 571
75562573
AH
572static int perf_evlist__event2id(struct perf_evlist *evlist,
573 union perf_event *event, u64 *id)
574{
575 const u64 *array = event->sample.array;
576 ssize_t n;
577
578 n = (event->header.size - sizeof(event->header)) >> 3;
579
580 if (event->header.type == PERF_RECORD_SAMPLE) {
581 if (evlist->id_pos >= n)
582 return -1;
583 *id = array[evlist->id_pos];
584 } else {
585 if (evlist->is_pos > n)
586 return -1;
587 n -= evlist->is_pos;
588 *id = array[n];
589 }
590 return 0;
591}
592
593static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
594 union perf_event *event)
595{
98be6966 596 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
597 struct hlist_head *head;
598 struct perf_sample_id *sid;
599 int hash;
600 u64 id;
601
602 if (evlist->nr_entries == 1)
98be6966
AH
603 return first;
604
605 if (!first->attr.sample_id_all &&
606 event->header.type != PERF_RECORD_SAMPLE)
607 return first;
75562573
AH
608
609 if (perf_evlist__event2id(evlist, event, &id))
610 return NULL;
611
612 /* Synthesized events have an id of zero */
613 if (!id)
98be6966 614 return first;
75562573
AH
615
616 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
617 head = &evlist->heads[hash];
618
619 hlist_for_each_entry(sid, head, node) {
620 if (sid->id == id)
621 return sid->evsel;
622 }
623 return NULL;
624}
625
aece948f 626union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 627{
aece948f 628 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
629 unsigned int head = perf_mmap__read_head(md);
630 unsigned int old = md->prev;
631 unsigned char *data = md->base + page_size;
8115d60c 632 union perf_event *event = NULL;
04391deb 633
7bb41152 634 if (evlist->overwrite) {
04391deb 635 /*
7bb41152
ACM
636 * If we're further behind than half the buffer, there's a chance
637 * the writer will bite our tail and mess up the samples under us.
638 *
639 * If we somehow ended up ahead of the head, we got messed up.
640 *
641 * In either case, truncate and restart at head.
04391deb 642 */
7bb41152
ACM
643 int diff = head - old;
644 if (diff > md->mask / 2 || diff < 0) {
645 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
646
647 /*
648 * head points to a known good entry, start there.
649 */
650 old = head;
651 }
04391deb
ACM
652 }
653
654 if (old != head) {
655 size_t size;
656
8115d60c 657 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
658 size = event->header.size;
659
660 /*
661 * Event straddles the mmap boundary -- header should always
662 * be inside due to u64 alignment of output.
663 */
664 if ((old & md->mask) + size != ((old + size) & md->mask)) {
665 unsigned int offset = old;
666 unsigned int len = min(sizeof(*event), size), cpy;
a65cb4b9 667 void *dst = md->event_copy;
04391deb
ACM
668
669 do {
670 cpy = min(md->mask + 1 - (offset & md->mask), len);
671 memcpy(dst, &data[offset & md->mask], cpy);
672 offset += cpy;
673 dst += cpy;
674 len -= cpy;
675 } while (len);
676
a65cb4b9 677 event = (union perf_event *) md->event_copy;
04391deb
ACM
678 }
679
680 old += size;
681 }
682
683 md->prev = old;
7bb41152 684
04391deb
ACM
685 return event;
686}
f8a95309 687
8e50d384
ZZ
688void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
689{
690 if (!evlist->overwrite) {
691 struct perf_mmap *md = &evlist->mmap[idx];
692 unsigned int old = md->prev;
693
694 perf_mmap__write_tail(md, old);
695 }
696}
697
93edcbd9
AH
698static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
699{
700 if (evlist->mmap[idx].base != NULL) {
701 munmap(evlist->mmap[idx].base, evlist->mmap_len);
702 evlist->mmap[idx].base = NULL;
703 }
704}
705
7e2ed097 706void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 707{
aece948f 708 int i;
f8a95309 709
983874d1
ACM
710 if (evlist->mmap == NULL)
711 return;
712
93edcbd9
AH
713 for (i = 0; i < evlist->nr_mmaps; i++)
714 __perf_evlist__munmap(evlist, i);
aece948f 715
04662523 716 zfree(&evlist->mmap);
f8a95309
ACM
717}
718
806fb630 719static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 720{
a14bb7a6 721 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 722 if (cpu_map__empty(evlist->cpus))
b3a319d5 723 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 724 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
725 return evlist->mmap != NULL ? 0 : -ENOMEM;
726}
727
a8a8f3eb
AH
728struct mmap_params {
729 int prot;
730 int mask;
731};
732
733static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
734 struct mmap_params *mp, int fd)
f8a95309 735{
aece948f 736 evlist->mmap[idx].prev = 0;
a8a8f3eb
AH
737 evlist->mmap[idx].mask = mp->mask;
738 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
f8a95309 739 MAP_SHARED, fd, 0);
301b195d 740 if (evlist->mmap[idx].base == MAP_FAILED) {
02635965
AH
741 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
742 errno);
301b195d 743 evlist->mmap[idx].base = NULL;
f8a95309 744 return -1;
301b195d 745 }
ad6765dd 746
f8a95309
ACM
747 return 0;
748}
749
04e21314 750static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
a8a8f3eb
AH
751 struct mmap_params *mp, int cpu,
752 int thread, int *output)
aece948f
ACM
753{
754 struct perf_evsel *evsel;
04e21314 755
0050f7aa 756 evlist__for_each(evlist, evsel) {
bf8e8f4b
AH
757 int fd;
758
759 if (evsel->system_wide && thread)
760 continue;
761
762 fd = FD(evsel, cpu, thread);
04e21314
AH
763
764 if (*output == -1) {
765 *output = fd;
a8a8f3eb 766 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
04e21314
AH
767 return -1;
768 } else {
769 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
770 return -1;
771 }
772
ad6765dd
ACM
773 if (perf_evlist__add_pollfd(evlist, fd) < 0)
774 return -1;
033fa713 775
04e21314
AH
776 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
777 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
778 return -1;
779 }
780
781 return 0;
782}
783
a8a8f3eb
AH
784static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
785 struct mmap_params *mp)
04e21314 786{
aece948f 787 int cpu, thread;
b3a319d5
NK
788 int nr_cpus = cpu_map__nr(evlist->cpus);
789 int nr_threads = thread_map__nr(evlist->threads);
aece948f 790
e3e1a54f 791 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 792 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
793 int output = -1;
794
b3a319d5 795 for (thread = 0; thread < nr_threads; thread++) {
a8a8f3eb
AH
796 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
797 thread, &output))
04e21314 798 goto out_unmap;
aece948f
ACM
799 }
800 }
801
802 return 0;
803
804out_unmap:
93edcbd9
AH
805 for (cpu = 0; cpu < nr_cpus; cpu++)
806 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
807 return -1;
808}
809
a8a8f3eb
AH
810static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
811 struct mmap_params *mp)
aece948f 812{
aece948f 813 int thread;
b3a319d5 814 int nr_threads = thread_map__nr(evlist->threads);
aece948f 815
e3e1a54f 816 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 817 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
818 int output = -1;
819
a8a8f3eb
AH
820 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
821 &output))
04e21314 822 goto out_unmap;
aece948f
ACM
823 }
824
825 return 0;
826
827out_unmap:
93edcbd9
AH
828 for (thread = 0; thread < nr_threads; thread++)
829 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
830 return -1;
831}
832
994a1f78
JO
833static size_t perf_evlist__mmap_size(unsigned long pages)
834{
835 /* 512 kiB: default amount of unprivileged mlocked memory */
836 if (pages == UINT_MAX)
837 pages = (512 * 1024) / page_size;
838 else if (!is_power_of_2(pages))
839 return 0;
840
841 return (pages + 1) * page_size;
842}
843
33c2dcfd
DA
844static long parse_pages_arg(const char *str, unsigned long min,
845 unsigned long max)
994a1f78 846{
2fbe4abe 847 unsigned long pages, val;
27050f53
JO
848 static struct parse_tag tags[] = {
849 { .tag = 'B', .mult = 1 },
850 { .tag = 'K', .mult = 1 << 10 },
851 { .tag = 'M', .mult = 1 << 20 },
852 { .tag = 'G', .mult = 1 << 30 },
853 { .tag = 0 },
854 };
994a1f78 855
8973504b 856 if (str == NULL)
33c2dcfd 857 return -EINVAL;
8973504b 858
27050f53 859 val = parse_tag_value(str, tags);
2fbe4abe 860 if (val != (unsigned long) -1) {
27050f53
JO
861 /* we got file size value */
862 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
863 } else {
864 /* we got pages count value */
865 char *eptr;
866 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
867 if (*eptr != '\0')
868 return -EINVAL;
994a1f78
JO
869 }
870
2bcab6c1 871 if (pages == 0 && min == 0) {
33c2dcfd 872 /* leave number of pages at 0 */
1dbfa938 873 } else if (!is_power_of_2(pages)) {
33c2dcfd 874 /* round pages up to next power of 2 */
1dbfa938
AH
875 pages = next_pow2_l(pages);
876 if (!pages)
877 return -EINVAL;
9639837e
DA
878 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
879 pages * page_size, pages);
2fbe4abe
AH
880 }
881
33c2dcfd
DA
882 if (pages > max)
883 return -EINVAL;
884
885 return pages;
886}
887
888int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
889 int unset __maybe_unused)
890{
891 unsigned int *mmap_pages = opt->value;
892 unsigned long max = UINT_MAX;
893 long pages;
894
f5ae9c42 895 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
896 max = SIZE_MAX / page_size;
897
898 pages = parse_pages_arg(str, 1, max);
899 if (pages < 0) {
900 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
901 return -1;
902 }
903
904 *mmap_pages = pages;
905 return 0;
906}
907
c83fa7f2
AH
908/**
909 * perf_evlist__mmap - Create mmaps to receive events.
910 * @evlist: list of events
911 * @pages: map length in pages
912 * @overwrite: overwrite older events?
f8a95309 913 *
c83fa7f2
AH
914 * If @overwrite is %false the user needs to signal event consumption using
915 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
916 * automatically.
7e2ed097 917 *
c83fa7f2 918 * Return: %0 on success, negative error code otherwise.
f8a95309 919 */
50a682ce
ACM
920int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
921 bool overwrite)
f8a95309 922{
aece948f 923 struct perf_evsel *evsel;
7e2ed097
ACM
924 const struct cpu_map *cpus = evlist->cpus;
925 const struct thread_map *threads = evlist->threads;
a8a8f3eb
AH
926 struct mmap_params mp = {
927 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
928 };
50a682ce 929
7e2ed097 930 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
931 return -ENOMEM;
932
7e2ed097 933 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
934 return -ENOMEM;
935
936 evlist->overwrite = overwrite;
994a1f78 937 evlist->mmap_len = perf_evlist__mmap_size(pages);
2af68ef5 938 pr_debug("mmap size %zuB\n", evlist->mmap_len);
a8a8f3eb 939 mp.mask = evlist->mmap_len - page_size - 1;
f8a95309 940
0050f7aa 941 evlist__for_each(evlist, evsel) {
f8a95309 942 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 943 evsel->sample_id == NULL &&
a14bb7a6 944 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 945 return -ENOMEM;
f8a95309
ACM
946 }
947
ec1e7e43 948 if (cpu_map__empty(cpus))
a8a8f3eb 949 return perf_evlist__mmap_per_thread(evlist, &mp);
f8a95309 950
a8a8f3eb 951 return perf_evlist__mmap_per_cpu(evlist, &mp);
f8a95309 952}
7e2ed097 953
602ad878 954int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
7e2ed097 955{
b809ac10
NK
956 evlist->threads = thread_map__new_str(target->pid, target->tid,
957 target->uid);
7e2ed097
ACM
958
959 if (evlist->threads == NULL)
960 return -1;
961
9c105fbc 962 if (target__uses_dummy_map(target))
d1cb9fce 963 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
964 else
965 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
966
967 if (evlist->cpus == NULL)
968 goto out_delete_threads;
969
970 return 0;
971
972out_delete_threads:
973 thread_map__delete(evlist->threads);
974 return -1;
975}
976
1491a632 977int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 978{
0a102479 979 struct perf_evsel *evsel;
745cefc5
ACM
980 int err = 0;
981 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 982 nthreads = thread_map__nr(evlist->threads);
0a102479 983
0050f7aa 984 evlist__for_each(evlist, evsel) {
745cefc5 985 if (evsel->filter == NULL)
0a102479 986 continue;
745cefc5
ACM
987
988 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
989 if (err)
990 break;
0a102479
FW
991 }
992
745cefc5
ACM
993 return err;
994}
995
996int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
997{
998 struct perf_evsel *evsel;
999 int err = 0;
1000 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 1001 nthreads = thread_map__nr(evlist->threads);
745cefc5 1002
0050f7aa 1003 evlist__for_each(evlist, evsel) {
745cefc5
ACM
1004 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
1005 if (err)
1006 break;
1007 }
1008
1009 return err;
0a102479 1010}
74429964 1011
0c21f736 1012bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 1013{
75562573 1014 struct perf_evsel *pos;
c2a70653 1015
75562573
AH
1016 if (evlist->nr_entries == 1)
1017 return true;
1018
1019 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1020 return false;
1021
0050f7aa 1022 evlist__for_each(evlist, pos) {
75562573
AH
1023 if (pos->id_pos != evlist->id_pos ||
1024 pos->is_pos != evlist->is_pos)
c2a70653 1025 return false;
74429964
FW
1026 }
1027
c2a70653 1028 return true;
74429964
FW
1029}
1030
75562573 1031u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 1032{
75562573
AH
1033 struct perf_evsel *evsel;
1034
1035 if (evlist->combined_sample_type)
1036 return evlist->combined_sample_type;
1037
0050f7aa 1038 evlist__for_each(evlist, evsel)
75562573
AH
1039 evlist->combined_sample_type |= evsel->attr.sample_type;
1040
1041 return evlist->combined_sample_type;
1042}
1043
1044u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1045{
1046 evlist->combined_sample_type = 0;
1047 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
1048}
1049
9ede473c
JO
1050bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1051{
1052 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1053 u64 read_format = first->attr.read_format;
1054 u64 sample_type = first->attr.sample_type;
1055
0050f7aa 1056 evlist__for_each(evlist, pos) {
9ede473c
JO
1057 if (read_format != pos->attr.read_format)
1058 return false;
1059 }
1060
1061 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1062 if ((sample_type & PERF_SAMPLE_READ) &&
1063 !(read_format & PERF_FORMAT_ID)) {
1064 return false;
1065 }
1066
1067 return true;
1068}
1069
1070u64 perf_evlist__read_format(struct perf_evlist *evlist)
1071{
1072 struct perf_evsel *first = perf_evlist__first(evlist);
1073 return first->attr.read_format;
1074}
1075
0c21f736 1076u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 1077{
0c21f736 1078 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
1079 struct perf_sample *data;
1080 u64 sample_type;
1081 u16 size = 0;
1082
81e36bff
ACM
1083 if (!first->attr.sample_id_all)
1084 goto out;
1085
1086 sample_type = first->attr.sample_type;
1087
1088 if (sample_type & PERF_SAMPLE_TID)
1089 size += sizeof(data->tid) * 2;
1090
1091 if (sample_type & PERF_SAMPLE_TIME)
1092 size += sizeof(data->time);
1093
1094 if (sample_type & PERF_SAMPLE_ID)
1095 size += sizeof(data->id);
1096
1097 if (sample_type & PERF_SAMPLE_STREAM_ID)
1098 size += sizeof(data->stream_id);
1099
1100 if (sample_type & PERF_SAMPLE_CPU)
1101 size += sizeof(data->cpu) * 2;
75562573
AH
1102
1103 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1104 size += sizeof(data->id);
81e36bff
ACM
1105out:
1106 return size;
1107}
1108
0c21f736 1109bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 1110{
0c21f736 1111 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653 1112
0050f7aa 1113 evlist__for_each_continue(evlist, pos) {
c2a70653
ACM
1114 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1115 return false;
74429964
FW
1116 }
1117
c2a70653
ACM
1118 return true;
1119}
1120
0c21f736 1121bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 1122{
0c21f736 1123 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 1124 return first->attr.sample_id_all;
74429964 1125}
81cce8de
ACM
1126
1127void perf_evlist__set_selected(struct perf_evlist *evlist,
1128 struct perf_evsel *evsel)
1129{
1130 evlist->selected = evsel;
1131}
727ab04e 1132
a74b4b66
NK
1133void perf_evlist__close(struct perf_evlist *evlist)
1134{
1135 struct perf_evsel *evsel;
1136 int ncpus = cpu_map__nr(evlist->cpus);
1137 int nthreads = thread_map__nr(evlist->threads);
8ad9219e 1138 int n;
a74b4b66 1139
8ad9219e
SE
1140 evlist__for_each_reverse(evlist, evsel) {
1141 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1142 perf_evsel__close(evsel, n, nthreads);
1143 }
a74b4b66
NK
1144}
1145
6a4bb04c 1146int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 1147{
6a4bb04c 1148 struct perf_evsel *evsel;
a74b4b66 1149 int err;
727ab04e 1150
733cd2fe
AH
1151 perf_evlist__update_id_pos(evlist);
1152
0050f7aa 1153 evlist__for_each(evlist, evsel) {
6a4bb04c 1154 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
1155 if (err < 0)
1156 goto out_err;
1157 }
1158
1159 return 0;
1160out_err:
a74b4b66 1161 perf_evlist__close(evlist);
41c21a68 1162 errno = -err;
727ab04e
ACM
1163 return err;
1164}
35b9d88e 1165
602ad878 1166int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
55e162ea 1167 const char *argv[], bool pipe_output,
735f7e0b 1168 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
35b9d88e
ACM
1169{
1170 int child_ready_pipe[2], go_pipe[2];
1171 char bf;
1172
1173 if (pipe(child_ready_pipe) < 0) {
1174 perror("failed to create 'ready' pipe");
1175 return -1;
1176 }
1177
1178 if (pipe(go_pipe) < 0) {
1179 perror("failed to create 'go' pipe");
1180 goto out_close_ready_pipe;
1181 }
1182
1183 evlist->workload.pid = fork();
1184 if (evlist->workload.pid < 0) {
1185 perror("failed to fork");
1186 goto out_close_pipes;
1187 }
1188
1189 if (!evlist->workload.pid) {
5f1c4225
ACM
1190 int ret;
1191
119fa3c9 1192 if (pipe_output)
35b9d88e
ACM
1193 dup2(2, 1);
1194
0817df08
DA
1195 signal(SIGTERM, SIG_DFL);
1196
35b9d88e
ACM
1197 close(child_ready_pipe[0]);
1198 close(go_pipe[1]);
1199 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1200
35b9d88e
ACM
1201 /*
1202 * Tell the parent we're ready to go
1203 */
1204 close(child_ready_pipe[1]);
1205
1206 /*
1207 * Wait until the parent tells us to go.
1208 */
5f1c4225
ACM
1209 ret = read(go_pipe[0], &bf, 1);
1210 /*
1211 * The parent will ask for the execvp() to be performed by
1212 * writing exactly one byte, in workload.cork_fd, usually via
1213 * perf_evlist__start_workload().
1214 *
1215 * For cancelling the workload without actuallin running it,
1216 * the parent will just close workload.cork_fd, without writing
1217 * anything, i.e. read will return zero and we just exit()
1218 * here.
1219 */
1220 if (ret != 1) {
1221 if (ret == -1)
1222 perror("unable to read pipe");
1223 exit(ret);
1224 }
35b9d88e
ACM
1225
1226 execvp(argv[0], (char **)argv);
1227
735f7e0b 1228 if (exec_error) {
f33cbe72
ACM
1229 union sigval val;
1230
1231 val.sival_int = errno;
1232 if (sigqueue(getppid(), SIGUSR1, val))
1233 perror(argv[0]);
1234 } else
1235 perror(argv[0]);
35b9d88e
ACM
1236 exit(-1);
1237 }
1238
735f7e0b
ACM
1239 if (exec_error) {
1240 struct sigaction act = {
1241 .sa_flags = SA_SIGINFO,
1242 .sa_sigaction = exec_error,
1243 };
1244 sigaction(SIGUSR1, &act, NULL);
1245 }
1246
602ad878 1247 if (target__none(target))
35b9d88e
ACM
1248 evlist->threads->map[0] = evlist->workload.pid;
1249
1250 close(child_ready_pipe[1]);
1251 close(go_pipe[0]);
1252 /*
1253 * wait for child to settle
1254 */
1255 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1256 perror("unable to read pipe");
1257 goto out_close_pipes;
1258 }
1259
bcf3145f 1260 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1261 evlist->workload.cork_fd = go_pipe[1];
1262 close(child_ready_pipe[0]);
1263 return 0;
1264
1265out_close_pipes:
1266 close(go_pipe[0]);
1267 close(go_pipe[1]);
1268out_close_ready_pipe:
1269 close(child_ready_pipe[0]);
1270 close(child_ready_pipe[1]);
1271 return -1;
1272}
1273
1274int perf_evlist__start_workload(struct perf_evlist *evlist)
1275{
1276 if (evlist->workload.cork_fd > 0) {
b3824404 1277 char bf = 0;
bcf3145f 1278 int ret;
35b9d88e
ACM
1279 /*
1280 * Remove the cork, let it rip!
1281 */
bcf3145f
NK
1282 ret = write(evlist->workload.cork_fd, &bf, 1);
1283 if (ret < 0)
1284 perror("enable to write to pipe");
1285
1286 close(evlist->workload.cork_fd);
1287 return ret;
35b9d88e
ACM
1288 }
1289
1290 return 0;
1291}
cb0b29e0 1292
a3f698fe 1293int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1294 struct perf_sample *sample)
cb0b29e0 1295{
75562573
AH
1296 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1297
1298 if (!evsel)
1299 return -EFAULT;
0807d2d8 1300 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1301}
78f067b3
ACM
1302
1303size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1304{
1305 struct perf_evsel *evsel;
1306 size_t printed = 0;
1307
0050f7aa 1308 evlist__for_each(evlist, evsel) {
78f067b3
ACM
1309 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1310 perf_evsel__name(evsel));
1311 }
1312
b2222139 1313 return printed + fprintf(fp, "\n");
78f067b3 1314}
6ef068cb
ACM
1315
1316int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1317 int err, char *buf, size_t size)
1318{
1319 char sbuf[128];
1320
1321 switch (err) {
1322 case ENOENT:
1323 scnprintf(buf, size, "%s",
1324 "Error:\tUnable to find debugfs\n"
1325 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1326 "Hint:\tIs the debugfs filesystem mounted?\n"
1327 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1328 break;
1329 case EACCES:
1330 scnprintf(buf, size,
1331 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1332 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1333 debugfs_mountpoint, debugfs_mountpoint);
1334 break;
1335 default:
1336 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1337 break;
1338 }
1339
1340 return 0;
1341}
a8f23d8f
ACM
1342
1343int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1344 int err, char *buf, size_t size)
1345{
1346 int printed, value;
6e81c74c 1347 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
a8f23d8f
ACM
1348
1349 switch (err) {
1350 case EACCES:
1351 case EPERM:
1352 printed = scnprintf(buf, size,
1353 "Error:\t%s.\n"
1354 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1355
1a47245d 1356 value = perf_event_paranoid();
a8f23d8f
ACM
1357
1358 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1359
1360 if (value >= 2) {
1361 printed += scnprintf(buf + printed, size - printed,
1362 "For your workloads it needs to be <= 1\nHint:\t");
1363 }
1364 printed += scnprintf(buf + printed, size - printed,
5229e366 1365 "For system wide tracing it needs to be set to -1.\n");
a8f23d8f
ACM
1366
1367 printed += scnprintf(buf + printed, size - printed,
5229e366
ACM
1368 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1369 "Hint:\tThe current value is %d.", value);
a8f23d8f
ACM
1370 break;
1371 default:
1372 scnprintf(buf, size, "%s", emsg);
1373 break;
1374 }
1375
1376 return 0;
1377}
a025e4f0
AH
1378
1379void perf_evlist__to_front(struct perf_evlist *evlist,
1380 struct perf_evsel *move_evsel)
1381{
1382 struct perf_evsel *evsel, *n;
1383 LIST_HEAD(move);
1384
1385 if (move_evsel == perf_evlist__first(evlist))
1386 return;
1387
0050f7aa 1388 evlist__for_each_safe(evlist, n, evsel) {
a025e4f0
AH
1389 if (evsel->leader == move_evsel->leader)
1390 list_move_tail(&evsel->node, &move);
1391 }
1392
1393 list_splice(&move, &evlist->entries);
1394}
60b0896c
AH
1395
1396void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1397 struct perf_evsel *tracking_evsel)
1398{
1399 struct perf_evsel *evsel;
1400
1401 if (tracking_evsel->tracking)
1402 return;
1403
1404 evlist__for_each(evlist, evsel) {
1405 if (evsel != tracking_evsel)
1406 evsel->tracking = false;
1407 }
1408
1409 tracking_evsel->tracking = true;
1410}