]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - tools/perf/util/evlist.c
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg...
[mirror_ubuntu-bionic-kernel.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
85c66be1 10#include <lk/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47 20#include "parse-events.h"
994a1f78 21#include "parse-options.h"
50d08e47 22
f8a95309
ACM
23#include <sys/mman.h>
24
70db7533
ACM
25#include <linux/bitops.h>
26#include <linux/hash.h>
27
f8a95309 28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 30
7e2ed097
ACM
31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
ef1d1af2
ACM
33{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 39 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 40 evlist->workload.pid = -1;
ef1d1af2
ACM
41}
42
334fe7a3 43struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
44{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
ef1d1af2 47 if (evlist != NULL)
334fe7a3 48 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
49
50 return evlist;
51}
52
b22d54b0
JO
53struct perf_evlist *perf_evlist__new_default(void)
54{
55 struct perf_evlist *evlist = perf_evlist__new();
56
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
59 evlist = NULL;
60 }
61
62 return evlist;
63}
64
75562573
AH
65/**
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
68 *
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
71 */
72void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73{
74 struct perf_evsel *first = perf_evlist__first(evlist);
75
76 evlist->id_pos = first->id_pos;
77 evlist->is_pos = first->is_pos;
78}
79
733cd2fe
AH
80static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81{
82 struct perf_evsel *evsel;
83
84 list_for_each_entry(evsel, &evlist->entries, node)
85 perf_evsel__calc_id_pos(evsel);
86
87 perf_evlist__set_id_pos(evlist);
88}
89
361c99a6
ACM
90static void perf_evlist__purge(struct perf_evlist *evlist)
91{
92 struct perf_evsel *pos, *n;
93
94 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95 list_del_init(&pos->node);
96 perf_evsel__delete(pos);
97 }
98
99 evlist->nr_entries = 0;
100}
101
ef1d1af2 102void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 103{
70db7533 104 free(evlist->mmap);
5c581041 105 free(evlist->pollfd);
ef1d1af2
ACM
106 evlist->mmap = NULL;
107 evlist->pollfd = NULL;
108}
109
110void perf_evlist__delete(struct perf_evlist *evlist)
111{
112 perf_evlist__purge(evlist);
113 perf_evlist__exit(evlist);
361c99a6
ACM
114 free(evlist);
115}
116
117void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
118{
119 list_add_tail(&entry->node, &evlist->entries);
75562573
AH
120 if (!evlist->nr_entries++)
121 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
122}
123
0529bc1f
JO
124void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
125 struct list_head *list,
126 int nr_entries)
50d08e47 127{
75562573
AH
128 bool set_id_pos = !evlist->nr_entries;
129
50d08e47
ACM
130 list_splice_tail(list, &evlist->entries);
131 evlist->nr_entries += nr_entries;
75562573
AH
132 if (set_id_pos)
133 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
134}
135
63dab225
ACM
136void __perf_evlist__set_leader(struct list_head *list)
137{
138 struct perf_evsel *evsel, *leader;
139
140 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
141 evsel = list_entry(list->prev, struct perf_evsel, node);
142
143 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
144
145 list_for_each_entry(evsel, list, node) {
74b2133d 146 evsel->leader = leader;
63dab225
ACM
147 }
148}
149
150void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 151{
97f63e4a
NK
152 if (evlist->nr_entries) {
153 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 154 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 155 }
6a4bb04c
JO
156}
157
361c99a6
ACM
158int perf_evlist__add_default(struct perf_evlist *evlist)
159{
160 struct perf_event_attr attr = {
161 .type = PERF_TYPE_HARDWARE,
162 .config = PERF_COUNT_HW_CPU_CYCLES,
163 };
1aed2671
JR
164 struct perf_evsel *evsel;
165
166 event_attr_init(&attr);
361c99a6 167
1aed2671 168 evsel = perf_evsel__new(&attr, 0);
361c99a6 169 if (evsel == NULL)
cc2d86b0
SE
170 goto error;
171
172 /* use strdup() because free(evsel) assumes name is allocated */
173 evsel->name = strdup("cycles");
174 if (!evsel->name)
175 goto error_free;
361c99a6
ACM
176
177 perf_evlist__add(evlist, evsel);
178 return 0;
cc2d86b0
SE
179error_free:
180 perf_evsel__delete(evsel);
181error:
182 return -ENOMEM;
361c99a6 183}
5c581041 184
e60fc847
ACM
185static int perf_evlist__add_attrs(struct perf_evlist *evlist,
186 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
187{
188 struct perf_evsel *evsel, *n;
189 LIST_HEAD(head);
190 size_t i;
191
192 for (i = 0; i < nr_attrs; i++) {
193 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
194 if (evsel == NULL)
195 goto out_delete_partial_list;
196 list_add_tail(&evsel->node, &head);
197 }
198
199 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
200
201 return 0;
202
203out_delete_partial_list:
204 list_for_each_entry_safe(evsel, n, &head, node)
205 perf_evsel__delete(evsel);
206 return -1;
207}
208
79695e1b
ACM
209int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
210 struct perf_event_attr *attrs, size_t nr_attrs)
211{
212 size_t i;
213
214 for (i = 0; i < nr_attrs; i++)
215 event_attr_init(attrs + i);
216
217 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
218}
219
da378962
ACM
220struct perf_evsel *
221perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
222{
223 struct perf_evsel *evsel;
224
225 list_for_each_entry(evsel, &evlist->entries, node) {
226 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
227 (int)evsel->attr.config == id)
228 return evsel;
229 }
230
231 return NULL;
232}
233
a2f2804a
DA
234struct perf_evsel *
235perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
236 const char *name)
237{
238 struct perf_evsel *evsel;
239
240 list_for_each_entry(evsel, &evlist->entries, node) {
241 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
242 (strcmp(evsel->name, name) == 0))
243 return evsel;
244 }
245
246 return NULL;
247}
248
39876e7d
ACM
249int perf_evlist__add_newtp(struct perf_evlist *evlist,
250 const char *sys, const char *name, void *handler)
251{
252 struct perf_evsel *evsel;
253
254 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
255 if (evsel == NULL)
256 return -1;
257
258 evsel->handler.func = handler;
259 perf_evlist__add(evlist, evsel);
260 return 0;
261}
262
4152ab37
ACM
263void perf_evlist__disable(struct perf_evlist *evlist)
264{
265 int cpu, thread;
266 struct perf_evsel *pos;
b3a319d5
NK
267 int nr_cpus = cpu_map__nr(evlist->cpus);
268 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 269
b3a319d5 270 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 271 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 272 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 273 continue;
b3a319d5 274 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
275 ioctl(FD(pos, cpu, thread),
276 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
277 }
278 }
279}
280
764e16a3
DA
281void perf_evlist__enable(struct perf_evlist *evlist)
282{
283 int cpu, thread;
284 struct perf_evsel *pos;
b3a319d5
NK
285 int nr_cpus = cpu_map__nr(evlist->cpus);
286 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 287
b3a319d5 288 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 289 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 290 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 291 continue;
b3a319d5 292 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
293 ioctl(FD(pos, cpu, thread),
294 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
295 }
296 }
297}
298
395c3070
AH
299int perf_evlist__disable_event(struct perf_evlist *evlist,
300 struct perf_evsel *evsel)
301{
302 int cpu, thread, err;
303
304 if (!evsel->fd)
305 return 0;
306
307 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
308 for (thread = 0; thread < evlist->threads->nr; thread++) {
309 err = ioctl(FD(evsel, cpu, thread),
310 PERF_EVENT_IOC_DISABLE, 0);
311 if (err)
312 return err;
313 }
314 }
315 return 0;
316}
317
318int perf_evlist__enable_event(struct perf_evlist *evlist,
319 struct perf_evsel *evsel)
320{
321 int cpu, thread, err;
322
323 if (!evsel->fd)
324 return -EINVAL;
325
326 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
327 for (thread = 0; thread < evlist->threads->nr; thread++) {
328 err = ioctl(FD(evsel, cpu, thread),
329 PERF_EVENT_IOC_ENABLE, 0);
330 if (err)
331 return err;
332 }
333 }
334 return 0;
335}
336
806fb630 337static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 338{
b3a319d5
NK
339 int nr_cpus = cpu_map__nr(evlist->cpus);
340 int nr_threads = thread_map__nr(evlist->threads);
341 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
342 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
343 return evlist->pollfd != NULL ? 0 : -ENOMEM;
344}
70082dd9
ACM
345
346void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
347{
348 fcntl(fd, F_SETFL, O_NONBLOCK);
349 evlist->pollfd[evlist->nr_fds].fd = fd;
350 evlist->pollfd[evlist->nr_fds].events = POLLIN;
351 evlist->nr_fds++;
352}
70db7533 353
a91e5431
ACM
354static void perf_evlist__id_hash(struct perf_evlist *evlist,
355 struct perf_evsel *evsel,
356 int cpu, int thread, u64 id)
3d3b5e95
ACM
357{
358 int hash;
359 struct perf_sample_id *sid = SID(evsel, cpu, thread);
360
361 sid->id = id;
362 sid->evsel = evsel;
363 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
364 hlist_add_head(&sid->node, &evlist->heads[hash]);
365}
366
a91e5431
ACM
367void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
368 int cpu, int thread, u64 id)
369{
370 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
371 evsel->id[evsel->ids++] = id;
372}
373
374static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
375 struct perf_evsel *evsel,
376 int cpu, int thread, int fd)
f8a95309 377{
f8a95309 378 u64 read_data[4] = { 0, };
3d3b5e95 379 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
380 u64 id;
381 int ret;
382
383 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
384 if (!ret)
385 goto add;
386
387 if (errno != ENOTTY)
388 return -1;
389
390 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 391
c4861afe
JO
392 /*
393 * This way does not work with group format read, so bail
394 * out in that case.
395 */
396 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
397 return -1;
398
f8a95309
ACM
399 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
400 read(fd, &read_data, sizeof(read_data)) == -1)
401 return -1;
402
403 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
404 ++id_idx;
405 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
406 ++id_idx;
407
e2b5abe0
JO
408 id = read_data[id_idx];
409
410 add:
411 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
412 return 0;
413}
414
932a3594 415struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
416{
417 struct hlist_head *head;
70db7533
ACM
418 struct perf_sample_id *sid;
419 int hash;
420
70db7533
ACM
421 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
422 head = &evlist->heads[hash];
423
b67bfe0d 424 hlist_for_each_entry(sid, head, node)
70db7533 425 if (sid->id == id)
932a3594
JO
426 return sid;
427
428 return NULL;
429}
430
431struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
432{
433 struct perf_sample_id *sid;
434
435 if (evlist->nr_entries == 1)
436 return perf_evlist__first(evlist);
437
438 sid = perf_evlist__id2sid(evlist, id);
439 if (sid)
440 return sid->evsel;
30e68bcc
NK
441
442 if (!perf_evlist__sample_id_all(evlist))
0c21f736 443 return perf_evlist__first(evlist);
30e68bcc 444
70db7533
ACM
445 return NULL;
446}
04391deb 447
75562573
AH
448static int perf_evlist__event2id(struct perf_evlist *evlist,
449 union perf_event *event, u64 *id)
450{
451 const u64 *array = event->sample.array;
452 ssize_t n;
453
454 n = (event->header.size - sizeof(event->header)) >> 3;
455
456 if (event->header.type == PERF_RECORD_SAMPLE) {
457 if (evlist->id_pos >= n)
458 return -1;
459 *id = array[evlist->id_pos];
460 } else {
461 if (evlist->is_pos > n)
462 return -1;
463 n -= evlist->is_pos;
464 *id = array[n];
465 }
466 return 0;
467}
468
469static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
470 union perf_event *event)
471{
98be6966 472 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
473 struct hlist_head *head;
474 struct perf_sample_id *sid;
475 int hash;
476 u64 id;
477
478 if (evlist->nr_entries == 1)
98be6966
AH
479 return first;
480
481 if (!first->attr.sample_id_all &&
482 event->header.type != PERF_RECORD_SAMPLE)
483 return first;
75562573
AH
484
485 if (perf_evlist__event2id(evlist, event, &id))
486 return NULL;
487
488 /* Synthesized events have an id of zero */
489 if (!id)
98be6966 490 return first;
75562573
AH
491
492 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
493 head = &evlist->heads[hash];
494
495 hlist_for_each_entry(sid, head, node) {
496 if (sid->id == id)
497 return sid->evsel;
498 }
499 return NULL;
500}
501
aece948f 502union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 503{
aece948f 504 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
505 unsigned int head = perf_mmap__read_head(md);
506 unsigned int old = md->prev;
507 unsigned char *data = md->base + page_size;
8115d60c 508 union perf_event *event = NULL;
04391deb 509
7bb41152 510 if (evlist->overwrite) {
04391deb 511 /*
7bb41152
ACM
512 * If we're further behind than half the buffer, there's a chance
513 * the writer will bite our tail and mess up the samples under us.
514 *
515 * If we somehow ended up ahead of the head, we got messed up.
516 *
517 * In either case, truncate and restart at head.
04391deb 518 */
7bb41152
ACM
519 int diff = head - old;
520 if (diff > md->mask / 2 || diff < 0) {
521 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
522
523 /*
524 * head points to a known good entry, start there.
525 */
526 old = head;
527 }
04391deb
ACM
528 }
529
530 if (old != head) {
531 size_t size;
532
8115d60c 533 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
534 size = event->header.size;
535
536 /*
537 * Event straddles the mmap boundary -- header should always
538 * be inside due to u64 alignment of output.
539 */
540 if ((old & md->mask) + size != ((old + size) & md->mask)) {
541 unsigned int offset = old;
542 unsigned int len = min(sizeof(*event), size), cpy;
a65cb4b9 543 void *dst = md->event_copy;
04391deb
ACM
544
545 do {
546 cpy = min(md->mask + 1 - (offset & md->mask), len);
547 memcpy(dst, &data[offset & md->mask], cpy);
548 offset += cpy;
549 dst += cpy;
550 len -= cpy;
551 } while (len);
552
a65cb4b9 553 event = (union perf_event *) md->event_copy;
04391deb
ACM
554 }
555
556 old += size;
557 }
558
559 md->prev = old;
7bb41152 560
04391deb
ACM
561 return event;
562}
f8a95309 563
8e50d384
ZZ
564void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
565{
566 if (!evlist->overwrite) {
567 struct perf_mmap *md = &evlist->mmap[idx];
568 unsigned int old = md->prev;
569
570 perf_mmap__write_tail(md, old);
571 }
572}
573
93edcbd9
AH
574static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
575{
576 if (evlist->mmap[idx].base != NULL) {
577 munmap(evlist->mmap[idx].base, evlist->mmap_len);
578 evlist->mmap[idx].base = NULL;
579 }
580}
581
7e2ed097 582void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 583{
aece948f 584 int i;
f8a95309 585
93edcbd9
AH
586 for (i = 0; i < evlist->nr_mmaps; i++)
587 __perf_evlist__munmap(evlist, i);
aece948f
ACM
588
589 free(evlist->mmap);
590 evlist->mmap = NULL;
f8a95309
ACM
591}
592
806fb630 593static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 594{
a14bb7a6 595 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 596 if (cpu_map__empty(evlist->cpus))
b3a319d5 597 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 598 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
599 return evlist->mmap != NULL ? 0 : -ENOMEM;
600}
601
bccdaba0 602static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 603 int idx, int prot, int mask, int fd)
f8a95309 604{
aece948f
ACM
605 evlist->mmap[idx].prev = 0;
606 evlist->mmap[idx].mask = mask;
607 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 608 MAP_SHARED, fd, 0);
301b195d 609 if (evlist->mmap[idx].base == MAP_FAILED) {
02635965
AH
610 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
611 errno);
301b195d 612 evlist->mmap[idx].base = NULL;
f8a95309 613 return -1;
301b195d 614 }
f8a95309
ACM
615
616 perf_evlist__add_pollfd(evlist, fd);
617 return 0;
618}
619
04e21314
AH
620static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
621 int prot, int mask, int cpu, int thread,
622 int *output)
aece948f
ACM
623{
624 struct perf_evsel *evsel;
04e21314
AH
625
626 list_for_each_entry(evsel, &evlist->entries, node) {
627 int fd = FD(evsel, cpu, thread);
628
629 if (*output == -1) {
630 *output = fd;
631 if (__perf_evlist__mmap(evlist, idx, prot, mask,
632 *output) < 0)
633 return -1;
634 } else {
635 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
636 return -1;
637 }
638
639 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
640 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
641 return -1;
642 }
643
644 return 0;
645}
646
647static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
648 int mask)
649{
aece948f 650 int cpu, thread;
b3a319d5
NK
651 int nr_cpus = cpu_map__nr(evlist->cpus);
652 int nr_threads = thread_map__nr(evlist->threads);
aece948f 653
e3e1a54f 654 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 655 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
656 int output = -1;
657
b3a319d5 658 for (thread = 0; thread < nr_threads; thread++) {
04e21314
AH
659 if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
660 cpu, thread, &output))
661 goto out_unmap;
aece948f
ACM
662 }
663 }
664
665 return 0;
666
667out_unmap:
93edcbd9
AH
668 for (cpu = 0; cpu < nr_cpus; cpu++)
669 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
670 return -1;
671}
672
04e21314
AH
673static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
674 int mask)
aece948f 675{
aece948f 676 int thread;
b3a319d5 677 int nr_threads = thread_map__nr(evlist->threads);
aece948f 678
e3e1a54f 679 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 680 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
681 int output = -1;
682
04e21314
AH
683 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
684 thread, &output))
685 goto out_unmap;
aece948f
ACM
686 }
687
688 return 0;
689
690out_unmap:
93edcbd9
AH
691 for (thread = 0; thread < nr_threads; thread++)
692 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
693 return -1;
694}
695
994a1f78
JO
696static size_t perf_evlist__mmap_size(unsigned long pages)
697{
698 /* 512 kiB: default amount of unprivileged mlocked memory */
699 if (pages == UINT_MAX)
700 pages = (512 * 1024) / page_size;
701 else if (!is_power_of_2(pages))
702 return 0;
703
704 return (pages + 1) * page_size;
705}
706
707int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
708 int unset __maybe_unused)
709{
2fbe4abe
AH
710 unsigned int *mmap_pages = opt->value;
711 unsigned long pages, val;
994a1f78 712 size_t size;
27050f53
JO
713 static struct parse_tag tags[] = {
714 { .tag = 'B', .mult = 1 },
715 { .tag = 'K', .mult = 1 << 10 },
716 { .tag = 'M', .mult = 1 << 20 },
717 { .tag = 'G', .mult = 1 << 30 },
718 { .tag = 0 },
719 };
994a1f78 720
27050f53 721 val = parse_tag_value(str, tags);
2fbe4abe 722 if (val != (unsigned long) -1) {
27050f53
JO
723 /* we got file size value */
724 pages = PERF_ALIGN(val, page_size) / page_size;
2fbe4abe 725 if (pages < (1UL << 31) && !is_power_of_2(pages)) {
27050f53 726 pages = next_pow2(pages);
2fbe4abe 727 pr_info("rounding mmap pages size to %lu (%lu pages)\n",
27050f53
JO
728 pages * page_size, pages);
729 }
730 } else {
731 /* we got pages count value */
732 char *eptr;
733 pages = strtoul(str, &eptr, 10);
734 if (*eptr != '\0') {
735 pr_err("failed to parse --mmap_pages/-m value\n");
736 return -1;
737 }
994a1f78
JO
738 }
739
2fbe4abe
AH
740 if (pages > UINT_MAX || pages > SIZE_MAX / page_size) {
741 pr_err("--mmap_pages/-m value too big\n");
742 return -1;
743 }
744
994a1f78
JO
745 size = perf_evlist__mmap_size(pages);
746 if (!size) {
747 pr_err("--mmap_pages/-m value must be a power of two.");
748 return -1;
749 }
750
751 *mmap_pages = pages;
752 return 0;
753}
754
c83fa7f2
AH
755/**
756 * perf_evlist__mmap - Create mmaps to receive events.
757 * @evlist: list of events
758 * @pages: map length in pages
759 * @overwrite: overwrite older events?
f8a95309 760 *
c83fa7f2
AH
761 * If @overwrite is %false the user needs to signal event consumption using
762 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
763 * automatically.
7e2ed097 764 *
c83fa7f2 765 * Return: %0 on success, negative error code otherwise.
f8a95309 766 */
50a682ce
ACM
767int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
768 bool overwrite)
f8a95309 769{
aece948f 770 struct perf_evsel *evsel;
7e2ed097
ACM
771 const struct cpu_map *cpus = evlist->cpus;
772 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
773 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
774
7e2ed097 775 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
776 return -ENOMEM;
777
7e2ed097 778 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
779 return -ENOMEM;
780
781 evlist->overwrite = overwrite;
994a1f78 782 evlist->mmap_len = perf_evlist__mmap_size(pages);
2af68ef5 783 pr_debug("mmap size %zuB\n", evlist->mmap_len);
994a1f78 784 mask = evlist->mmap_len - page_size - 1;
f8a95309
ACM
785
786 list_for_each_entry(evsel, &evlist->entries, node) {
787 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 788 evsel->sample_id == NULL &&
a14bb7a6 789 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 790 return -ENOMEM;
f8a95309
ACM
791 }
792
ec1e7e43 793 if (cpu_map__empty(cpus))
aece948f 794 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 795
aece948f 796 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 797}
7e2ed097 798
b809ac10
NK
799int perf_evlist__create_maps(struct perf_evlist *evlist,
800 struct perf_target *target)
7e2ed097 801{
b809ac10
NK
802 evlist->threads = thread_map__new_str(target->pid, target->tid,
803 target->uid);
7e2ed097
ACM
804
805 if (evlist->threads == NULL)
806 return -1;
807
879d77d0 808 if (perf_target__has_task(target))
d67356e7 809 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
810 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
811 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
812 else
813 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
814
815 if (evlist->cpus == NULL)
816 goto out_delete_threads;
817
818 return 0;
819
820out_delete_threads:
821 thread_map__delete(evlist->threads);
822 return -1;
823}
824
825void perf_evlist__delete_maps(struct perf_evlist *evlist)
826{
827 cpu_map__delete(evlist->cpus);
828 thread_map__delete(evlist->threads);
829 evlist->cpus = NULL;
830 evlist->threads = NULL;
831}
0a102479 832
1491a632 833int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 834{
0a102479 835 struct perf_evsel *evsel;
745cefc5
ACM
836 int err = 0;
837 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 838 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
839
840 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 841 if (evsel->filter == NULL)
0a102479 842 continue;
745cefc5
ACM
843
844 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
845 if (err)
846 break;
0a102479
FW
847 }
848
745cefc5
ACM
849 return err;
850}
851
852int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
853{
854 struct perf_evsel *evsel;
855 int err = 0;
856 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 857 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
858
859 list_for_each_entry(evsel, &evlist->entries, node) {
860 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
861 if (err)
862 break;
863 }
864
865 return err;
0a102479 866}
74429964 867
0c21f736 868bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 869{
75562573 870 struct perf_evsel *pos;
c2a70653 871
75562573
AH
872 if (evlist->nr_entries == 1)
873 return true;
874
875 if (evlist->id_pos < 0 || evlist->is_pos < 0)
876 return false;
877
878 list_for_each_entry(pos, &evlist->entries, node) {
879 if (pos->id_pos != evlist->id_pos ||
880 pos->is_pos != evlist->is_pos)
c2a70653 881 return false;
74429964
FW
882 }
883
c2a70653 884 return true;
74429964
FW
885}
886
75562573 887u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 888{
75562573
AH
889 struct perf_evsel *evsel;
890
891 if (evlist->combined_sample_type)
892 return evlist->combined_sample_type;
893
894 list_for_each_entry(evsel, &evlist->entries, node)
895 evlist->combined_sample_type |= evsel->attr.sample_type;
896
897 return evlist->combined_sample_type;
898}
899
900u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
901{
902 evlist->combined_sample_type = 0;
903 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
904}
905
9ede473c
JO
906bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
907{
908 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
909 u64 read_format = first->attr.read_format;
910 u64 sample_type = first->attr.sample_type;
911
912 list_for_each_entry_continue(pos, &evlist->entries, node) {
913 if (read_format != pos->attr.read_format)
914 return false;
915 }
916
917 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
918 if ((sample_type & PERF_SAMPLE_READ) &&
919 !(read_format & PERF_FORMAT_ID)) {
920 return false;
921 }
922
923 return true;
924}
925
926u64 perf_evlist__read_format(struct perf_evlist *evlist)
927{
928 struct perf_evsel *first = perf_evlist__first(evlist);
929 return first->attr.read_format;
930}
931
0c21f736 932u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 933{
0c21f736 934 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
935 struct perf_sample *data;
936 u64 sample_type;
937 u16 size = 0;
938
81e36bff
ACM
939 if (!first->attr.sample_id_all)
940 goto out;
941
942 sample_type = first->attr.sample_type;
943
944 if (sample_type & PERF_SAMPLE_TID)
945 size += sizeof(data->tid) * 2;
946
947 if (sample_type & PERF_SAMPLE_TIME)
948 size += sizeof(data->time);
949
950 if (sample_type & PERF_SAMPLE_ID)
951 size += sizeof(data->id);
952
953 if (sample_type & PERF_SAMPLE_STREAM_ID)
954 size += sizeof(data->stream_id);
955
956 if (sample_type & PERF_SAMPLE_CPU)
957 size += sizeof(data->cpu) * 2;
75562573
AH
958
959 if (sample_type & PERF_SAMPLE_IDENTIFIER)
960 size += sizeof(data->id);
81e36bff
ACM
961out:
962 return size;
963}
964
0c21f736 965bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 966{
0c21f736 967 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
968
969 list_for_each_entry_continue(pos, &evlist->entries, node) {
970 if (first->attr.sample_id_all != pos->attr.sample_id_all)
971 return false;
74429964
FW
972 }
973
c2a70653
ACM
974 return true;
975}
976
0c21f736 977bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 978{
0c21f736 979 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 980 return first->attr.sample_id_all;
74429964 981}
81cce8de
ACM
982
983void perf_evlist__set_selected(struct perf_evlist *evlist,
984 struct perf_evsel *evsel)
985{
986 evlist->selected = evsel;
987}
727ab04e 988
a74b4b66
NK
989void perf_evlist__close(struct perf_evlist *evlist)
990{
991 struct perf_evsel *evsel;
992 int ncpus = cpu_map__nr(evlist->cpus);
993 int nthreads = thread_map__nr(evlist->threads);
994
995 list_for_each_entry_reverse(evsel, &evlist->entries, node)
996 perf_evsel__close(evsel, ncpus, nthreads);
997}
998
6a4bb04c 999int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 1000{
6a4bb04c 1001 struct perf_evsel *evsel;
a74b4b66 1002 int err;
727ab04e 1003
733cd2fe
AH
1004 perf_evlist__update_id_pos(evlist);
1005
727ab04e 1006 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 1007 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
1008 if (err < 0)
1009 goto out_err;
1010 }
1011
1012 return 0;
1013out_err:
a74b4b66 1014 perf_evlist__close(evlist);
41c21a68 1015 errno = -err;
727ab04e
ACM
1016 return err;
1017}
35b9d88e
ACM
1018
1019int perf_evlist__prepare_workload(struct perf_evlist *evlist,
6ef73ec4 1020 struct perf_target *target,
55e162ea
NK
1021 const char *argv[], bool pipe_output,
1022 bool want_signal)
35b9d88e
ACM
1023{
1024 int child_ready_pipe[2], go_pipe[2];
1025 char bf;
1026
1027 if (pipe(child_ready_pipe) < 0) {
1028 perror("failed to create 'ready' pipe");
1029 return -1;
1030 }
1031
1032 if (pipe(go_pipe) < 0) {
1033 perror("failed to create 'go' pipe");
1034 goto out_close_ready_pipe;
1035 }
1036
1037 evlist->workload.pid = fork();
1038 if (evlist->workload.pid < 0) {
1039 perror("failed to fork");
1040 goto out_close_pipes;
1041 }
1042
1043 if (!evlist->workload.pid) {
119fa3c9 1044 if (pipe_output)
35b9d88e
ACM
1045 dup2(2, 1);
1046
0817df08
DA
1047 signal(SIGTERM, SIG_DFL);
1048
35b9d88e
ACM
1049 close(child_ready_pipe[0]);
1050 close(go_pipe[1]);
1051 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1052
35b9d88e
ACM
1053 /*
1054 * Tell the parent we're ready to go
1055 */
1056 close(child_ready_pipe[1]);
1057
1058 /*
1059 * Wait until the parent tells us to go.
1060 */
1061 if (read(go_pipe[0], &bf, 1) == -1)
1062 perror("unable to read pipe");
1063
1064 execvp(argv[0], (char **)argv);
1065
1066 perror(argv[0]);
55e162ea
NK
1067 if (want_signal)
1068 kill(getppid(), SIGUSR1);
35b9d88e
ACM
1069 exit(-1);
1070 }
1071
6ef73ec4 1072 if (perf_target__none(target))
35b9d88e
ACM
1073 evlist->threads->map[0] = evlist->workload.pid;
1074
1075 close(child_ready_pipe[1]);
1076 close(go_pipe[0]);
1077 /*
1078 * wait for child to settle
1079 */
1080 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1081 perror("unable to read pipe");
1082 goto out_close_pipes;
1083 }
1084
bcf3145f 1085 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1086 evlist->workload.cork_fd = go_pipe[1];
1087 close(child_ready_pipe[0]);
1088 return 0;
1089
1090out_close_pipes:
1091 close(go_pipe[0]);
1092 close(go_pipe[1]);
1093out_close_ready_pipe:
1094 close(child_ready_pipe[0]);
1095 close(child_ready_pipe[1]);
1096 return -1;
1097}
1098
1099int perf_evlist__start_workload(struct perf_evlist *evlist)
1100{
1101 if (evlist->workload.cork_fd > 0) {
b3824404 1102 char bf = 0;
bcf3145f 1103 int ret;
35b9d88e
ACM
1104 /*
1105 * Remove the cork, let it rip!
1106 */
bcf3145f
NK
1107 ret = write(evlist->workload.cork_fd, &bf, 1);
1108 if (ret < 0)
1109 perror("enable to write to pipe");
1110
1111 close(evlist->workload.cork_fd);
1112 return ret;
35b9d88e
ACM
1113 }
1114
1115 return 0;
1116}
cb0b29e0 1117
a3f698fe 1118int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1119 struct perf_sample *sample)
cb0b29e0 1120{
75562573
AH
1121 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1122
1123 if (!evsel)
1124 return -EFAULT;
0807d2d8 1125 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1126}
78f067b3
ACM
1127
1128size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1129{
1130 struct perf_evsel *evsel;
1131 size_t printed = 0;
1132
1133 list_for_each_entry(evsel, &evlist->entries, node) {
1134 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1135 perf_evsel__name(evsel));
1136 }
1137
1138 return printed + fprintf(fp, "\n");;
1139}
6ef068cb
ACM
1140
1141int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1142 int err, char *buf, size_t size)
1143{
1144 char sbuf[128];
1145
1146 switch (err) {
1147 case ENOENT:
1148 scnprintf(buf, size, "%s",
1149 "Error:\tUnable to find debugfs\n"
1150 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1151 "Hint:\tIs the debugfs filesystem mounted?\n"
1152 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1153 break;
1154 case EACCES:
1155 scnprintf(buf, size,
1156 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1157 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1158 debugfs_mountpoint, debugfs_mountpoint);
1159 break;
1160 default:
1161 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1162 break;
1163 }
1164
1165 return 0;
1166}
a8f23d8f
ACM
1167
1168int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1169 int err, char *buf, size_t size)
1170{
1171 int printed, value;
1172 char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1173
1174 switch (err) {
1175 case EACCES:
1176 case EPERM:
1177 printed = scnprintf(buf, size,
1178 "Error:\t%s.\n"
1179 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1180
1181 if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
1182 break;
1183
1184 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1185
1186 if (value >= 2) {
1187 printed += scnprintf(buf + printed, size - printed,
1188 "For your workloads it needs to be <= 1\nHint:\t");
1189 }
1190 printed += scnprintf(buf + printed, size - printed,
1191 "For system wide tracing it needs to be set to -1");
1192
1193 printed += scnprintf(buf + printed, size - printed,
1194 ".\nHint:\tThe current value is %d.", value);
1195 break;
1196 default:
1197 scnprintf(buf, size, "%s", emsg);
1198 break;
1199 }
1200
1201 return 0;
1202}