]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/evlist.c
perf tools: Introduce zfree
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
553873e1 10#include <api/fs/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47 20#include "parse-events.h"
994a1f78 21#include "parse-options.h"
50d08e47 22
f8a95309
ACM
23#include <sys/mman.h>
24
70db7533
ACM
25#include <linux/bitops.h>
26#include <linux/hash.h>
27
f8a95309 28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 30
7e2ed097
ACM
31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
ef1d1af2
ACM
33{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 39 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 40 evlist->workload.pid = -1;
ef1d1af2
ACM
41}
42
334fe7a3 43struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
44{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
ef1d1af2 47 if (evlist != NULL)
334fe7a3 48 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
49
50 return evlist;
51}
52
b22d54b0
JO
53struct perf_evlist *perf_evlist__new_default(void)
54{
55 struct perf_evlist *evlist = perf_evlist__new();
56
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
59 evlist = NULL;
60 }
61
62 return evlist;
63}
64
75562573
AH
65/**
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
68 *
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
71 */
72void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73{
74 struct perf_evsel *first = perf_evlist__first(evlist);
75
76 evlist->id_pos = first->id_pos;
77 evlist->is_pos = first->is_pos;
78}
79
733cd2fe
AH
80static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81{
82 struct perf_evsel *evsel;
83
84 list_for_each_entry(evsel, &evlist->entries, node)
85 perf_evsel__calc_id_pos(evsel);
86
87 perf_evlist__set_id_pos(evlist);
88}
89
361c99a6
ACM
90static void perf_evlist__purge(struct perf_evlist *evlist)
91{
92 struct perf_evsel *pos, *n;
93
94 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95 list_del_init(&pos->node);
96 perf_evsel__delete(pos);
97 }
98
99 evlist->nr_entries = 0;
100}
101
ef1d1af2 102void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 103{
04662523
ACM
104 zfree(&evlist->mmap);
105 zfree(&evlist->pollfd);
ef1d1af2
ACM
106}
107
108void perf_evlist__delete(struct perf_evlist *evlist)
109{
110 perf_evlist__purge(evlist);
111 perf_evlist__exit(evlist);
361c99a6
ACM
112 free(evlist);
113}
114
115void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
116{
117 list_add_tail(&entry->node, &evlist->entries);
ef503831
ACM
118 entry->idx = evlist->nr_entries;
119
75562573
AH
120 if (!evlist->nr_entries++)
121 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
122}
123
0529bc1f
JO
124void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
125 struct list_head *list,
126 int nr_entries)
50d08e47 127{
75562573
AH
128 bool set_id_pos = !evlist->nr_entries;
129
50d08e47
ACM
130 list_splice_tail(list, &evlist->entries);
131 evlist->nr_entries += nr_entries;
75562573
AH
132 if (set_id_pos)
133 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
134}
135
63dab225
ACM
136void __perf_evlist__set_leader(struct list_head *list)
137{
138 struct perf_evsel *evsel, *leader;
139
140 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
141 evsel = list_entry(list->prev, struct perf_evsel, node);
142
143 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
144
145 list_for_each_entry(evsel, list, node) {
74b2133d 146 evsel->leader = leader;
63dab225
ACM
147 }
148}
149
150void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 151{
97f63e4a
NK
152 if (evlist->nr_entries) {
153 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 154 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 155 }
6a4bb04c
JO
156}
157
361c99a6
ACM
158int perf_evlist__add_default(struct perf_evlist *evlist)
159{
160 struct perf_event_attr attr = {
161 .type = PERF_TYPE_HARDWARE,
162 .config = PERF_COUNT_HW_CPU_CYCLES,
163 };
1aed2671
JR
164 struct perf_evsel *evsel;
165
166 event_attr_init(&attr);
361c99a6 167
ef503831 168 evsel = perf_evsel__new(&attr);
361c99a6 169 if (evsel == NULL)
cc2d86b0
SE
170 goto error;
171
172 /* use strdup() because free(evsel) assumes name is allocated */
173 evsel->name = strdup("cycles");
174 if (!evsel->name)
175 goto error_free;
361c99a6
ACM
176
177 perf_evlist__add(evlist, evsel);
178 return 0;
cc2d86b0
SE
179error_free:
180 perf_evsel__delete(evsel);
181error:
182 return -ENOMEM;
361c99a6 183}
5c581041 184
e60fc847
ACM
185static int perf_evlist__add_attrs(struct perf_evlist *evlist,
186 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
187{
188 struct perf_evsel *evsel, *n;
189 LIST_HEAD(head);
190 size_t i;
191
192 for (i = 0; i < nr_attrs; i++) {
ef503831 193 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
50d08e47
ACM
194 if (evsel == NULL)
195 goto out_delete_partial_list;
196 list_add_tail(&evsel->node, &head);
197 }
198
199 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
200
201 return 0;
202
203out_delete_partial_list:
204 list_for_each_entry_safe(evsel, n, &head, node)
205 perf_evsel__delete(evsel);
206 return -1;
207}
208
79695e1b
ACM
209int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
210 struct perf_event_attr *attrs, size_t nr_attrs)
211{
212 size_t i;
213
214 for (i = 0; i < nr_attrs; i++)
215 event_attr_init(attrs + i);
216
217 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
218}
219
da378962
ACM
220struct perf_evsel *
221perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
222{
223 struct perf_evsel *evsel;
224
225 list_for_each_entry(evsel, &evlist->entries, node) {
226 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
227 (int)evsel->attr.config == id)
228 return evsel;
229 }
230
231 return NULL;
232}
233
a2f2804a
DA
234struct perf_evsel *
235perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
236 const char *name)
237{
238 struct perf_evsel *evsel;
239
240 list_for_each_entry(evsel, &evlist->entries, node) {
241 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
242 (strcmp(evsel->name, name) == 0))
243 return evsel;
244 }
245
246 return NULL;
247}
248
39876e7d
ACM
249int perf_evlist__add_newtp(struct perf_evlist *evlist,
250 const char *sys, const char *name, void *handler)
251{
ef503831 252 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
39876e7d 253
39876e7d
ACM
254 if (evsel == NULL)
255 return -1;
256
744a9719 257 evsel->handler = handler;
39876e7d
ACM
258 perf_evlist__add(evlist, evsel);
259 return 0;
260}
261
4152ab37
ACM
262void perf_evlist__disable(struct perf_evlist *evlist)
263{
264 int cpu, thread;
265 struct perf_evsel *pos;
b3a319d5
NK
266 int nr_cpus = cpu_map__nr(evlist->cpus);
267 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 268
b3a319d5 269 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 270 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 271 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 272 continue;
b3a319d5 273 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
274 ioctl(FD(pos, cpu, thread),
275 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
276 }
277 }
278}
279
764e16a3
DA
280void perf_evlist__enable(struct perf_evlist *evlist)
281{
282 int cpu, thread;
283 struct perf_evsel *pos;
b3a319d5
NK
284 int nr_cpus = cpu_map__nr(evlist->cpus);
285 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 286
b3a319d5 287 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 288 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 289 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 290 continue;
b3a319d5 291 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
292 ioctl(FD(pos, cpu, thread),
293 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
294 }
295 }
296}
297
395c3070
AH
298int perf_evlist__disable_event(struct perf_evlist *evlist,
299 struct perf_evsel *evsel)
300{
301 int cpu, thread, err;
302
303 if (!evsel->fd)
304 return 0;
305
306 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
307 for (thread = 0; thread < evlist->threads->nr; thread++) {
308 err = ioctl(FD(evsel, cpu, thread),
309 PERF_EVENT_IOC_DISABLE, 0);
310 if (err)
311 return err;
312 }
313 }
314 return 0;
315}
316
317int perf_evlist__enable_event(struct perf_evlist *evlist,
318 struct perf_evsel *evsel)
319{
320 int cpu, thread, err;
321
322 if (!evsel->fd)
323 return -EINVAL;
324
325 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
326 for (thread = 0; thread < evlist->threads->nr; thread++) {
327 err = ioctl(FD(evsel, cpu, thread),
328 PERF_EVENT_IOC_ENABLE, 0);
329 if (err)
330 return err;
331 }
332 }
333 return 0;
334}
335
806fb630 336static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 337{
b3a319d5
NK
338 int nr_cpus = cpu_map__nr(evlist->cpus);
339 int nr_threads = thread_map__nr(evlist->threads);
340 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
341 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
342 return evlist->pollfd != NULL ? 0 : -ENOMEM;
343}
70082dd9
ACM
344
345void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
346{
347 fcntl(fd, F_SETFL, O_NONBLOCK);
348 evlist->pollfd[evlist->nr_fds].fd = fd;
349 evlist->pollfd[evlist->nr_fds].events = POLLIN;
350 evlist->nr_fds++;
351}
70db7533 352
a91e5431
ACM
353static void perf_evlist__id_hash(struct perf_evlist *evlist,
354 struct perf_evsel *evsel,
355 int cpu, int thread, u64 id)
3d3b5e95
ACM
356{
357 int hash;
358 struct perf_sample_id *sid = SID(evsel, cpu, thread);
359
360 sid->id = id;
361 sid->evsel = evsel;
362 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
363 hlist_add_head(&sid->node, &evlist->heads[hash]);
364}
365
a91e5431
ACM
366void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
367 int cpu, int thread, u64 id)
368{
369 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
370 evsel->id[evsel->ids++] = id;
371}
372
373static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
374 struct perf_evsel *evsel,
375 int cpu, int thread, int fd)
f8a95309 376{
f8a95309 377 u64 read_data[4] = { 0, };
3d3b5e95 378 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
379 u64 id;
380 int ret;
381
382 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
383 if (!ret)
384 goto add;
385
386 if (errno != ENOTTY)
387 return -1;
388
389 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 390
c4861afe
JO
391 /*
392 * This way does not work with group format read, so bail
393 * out in that case.
394 */
395 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
396 return -1;
397
f8a95309
ACM
398 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
399 read(fd, &read_data, sizeof(read_data)) == -1)
400 return -1;
401
402 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
403 ++id_idx;
404 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
405 ++id_idx;
406
e2b5abe0
JO
407 id = read_data[id_idx];
408
409 add:
410 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
411 return 0;
412}
413
932a3594 414struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
415{
416 struct hlist_head *head;
70db7533
ACM
417 struct perf_sample_id *sid;
418 int hash;
419
70db7533
ACM
420 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
421 head = &evlist->heads[hash];
422
b67bfe0d 423 hlist_for_each_entry(sid, head, node)
70db7533 424 if (sid->id == id)
932a3594
JO
425 return sid;
426
427 return NULL;
428}
429
430struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
431{
432 struct perf_sample_id *sid;
433
434 if (evlist->nr_entries == 1)
435 return perf_evlist__first(evlist);
436
437 sid = perf_evlist__id2sid(evlist, id);
438 if (sid)
439 return sid->evsel;
30e68bcc
NK
440
441 if (!perf_evlist__sample_id_all(evlist))
0c21f736 442 return perf_evlist__first(evlist);
30e68bcc 443
70db7533
ACM
444 return NULL;
445}
04391deb 446
75562573
AH
447static int perf_evlist__event2id(struct perf_evlist *evlist,
448 union perf_event *event, u64 *id)
449{
450 const u64 *array = event->sample.array;
451 ssize_t n;
452
453 n = (event->header.size - sizeof(event->header)) >> 3;
454
455 if (event->header.type == PERF_RECORD_SAMPLE) {
456 if (evlist->id_pos >= n)
457 return -1;
458 *id = array[evlist->id_pos];
459 } else {
460 if (evlist->is_pos > n)
461 return -1;
462 n -= evlist->is_pos;
463 *id = array[n];
464 }
465 return 0;
466}
467
468static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
469 union perf_event *event)
470{
98be6966 471 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
472 struct hlist_head *head;
473 struct perf_sample_id *sid;
474 int hash;
475 u64 id;
476
477 if (evlist->nr_entries == 1)
98be6966
AH
478 return first;
479
480 if (!first->attr.sample_id_all &&
481 event->header.type != PERF_RECORD_SAMPLE)
482 return first;
75562573
AH
483
484 if (perf_evlist__event2id(evlist, event, &id))
485 return NULL;
486
487 /* Synthesized events have an id of zero */
488 if (!id)
98be6966 489 return first;
75562573
AH
490
491 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
492 head = &evlist->heads[hash];
493
494 hlist_for_each_entry(sid, head, node) {
495 if (sid->id == id)
496 return sid->evsel;
497 }
498 return NULL;
499}
500
aece948f 501union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 502{
aece948f 503 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
504 unsigned int head = perf_mmap__read_head(md);
505 unsigned int old = md->prev;
506 unsigned char *data = md->base + page_size;
8115d60c 507 union perf_event *event = NULL;
04391deb 508
7bb41152 509 if (evlist->overwrite) {
04391deb 510 /*
7bb41152
ACM
511 * If we're further behind than half the buffer, there's a chance
512 * the writer will bite our tail and mess up the samples under us.
513 *
514 * If we somehow ended up ahead of the head, we got messed up.
515 *
516 * In either case, truncate and restart at head.
04391deb 517 */
7bb41152
ACM
518 int diff = head - old;
519 if (diff > md->mask / 2 || diff < 0) {
520 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
521
522 /*
523 * head points to a known good entry, start there.
524 */
525 old = head;
526 }
04391deb
ACM
527 }
528
529 if (old != head) {
530 size_t size;
531
8115d60c 532 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
533 size = event->header.size;
534
535 /*
536 * Event straddles the mmap boundary -- header should always
537 * be inside due to u64 alignment of output.
538 */
539 if ((old & md->mask) + size != ((old + size) & md->mask)) {
540 unsigned int offset = old;
541 unsigned int len = min(sizeof(*event), size), cpy;
a65cb4b9 542 void *dst = md->event_copy;
04391deb
ACM
543
544 do {
545 cpy = min(md->mask + 1 - (offset & md->mask), len);
546 memcpy(dst, &data[offset & md->mask], cpy);
547 offset += cpy;
548 dst += cpy;
549 len -= cpy;
550 } while (len);
551
a65cb4b9 552 event = (union perf_event *) md->event_copy;
04391deb
ACM
553 }
554
555 old += size;
556 }
557
558 md->prev = old;
7bb41152 559
04391deb
ACM
560 return event;
561}
f8a95309 562
8e50d384
ZZ
563void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
564{
565 if (!evlist->overwrite) {
566 struct perf_mmap *md = &evlist->mmap[idx];
567 unsigned int old = md->prev;
568
569 perf_mmap__write_tail(md, old);
570 }
571}
572
93edcbd9
AH
573static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
574{
575 if (evlist->mmap[idx].base != NULL) {
576 munmap(evlist->mmap[idx].base, evlist->mmap_len);
577 evlist->mmap[idx].base = NULL;
578 }
579}
580
7e2ed097 581void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 582{
aece948f 583 int i;
f8a95309 584
93edcbd9
AH
585 for (i = 0; i < evlist->nr_mmaps; i++)
586 __perf_evlist__munmap(evlist, i);
aece948f 587
04662523 588 zfree(&evlist->mmap);
f8a95309
ACM
589}
590
806fb630 591static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 592{
a14bb7a6 593 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 594 if (cpu_map__empty(evlist->cpus))
b3a319d5 595 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 596 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
597 return evlist->mmap != NULL ? 0 : -ENOMEM;
598}
599
bccdaba0 600static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 601 int idx, int prot, int mask, int fd)
f8a95309 602{
aece948f
ACM
603 evlist->mmap[idx].prev = 0;
604 evlist->mmap[idx].mask = mask;
605 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 606 MAP_SHARED, fd, 0);
301b195d 607 if (evlist->mmap[idx].base == MAP_FAILED) {
02635965
AH
608 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
609 errno);
301b195d 610 evlist->mmap[idx].base = NULL;
f8a95309 611 return -1;
301b195d 612 }
f8a95309
ACM
613
614 perf_evlist__add_pollfd(evlist, fd);
615 return 0;
616}
617
04e21314
AH
618static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
619 int prot, int mask, int cpu, int thread,
620 int *output)
aece948f
ACM
621{
622 struct perf_evsel *evsel;
04e21314
AH
623
624 list_for_each_entry(evsel, &evlist->entries, node) {
625 int fd = FD(evsel, cpu, thread);
626
627 if (*output == -1) {
628 *output = fd;
629 if (__perf_evlist__mmap(evlist, idx, prot, mask,
630 *output) < 0)
631 return -1;
632 } else {
633 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
634 return -1;
635 }
636
637 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
638 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
639 return -1;
640 }
641
642 return 0;
643}
644
645static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
646 int mask)
647{
aece948f 648 int cpu, thread;
b3a319d5
NK
649 int nr_cpus = cpu_map__nr(evlist->cpus);
650 int nr_threads = thread_map__nr(evlist->threads);
aece948f 651
e3e1a54f 652 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 653 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
654 int output = -1;
655
b3a319d5 656 for (thread = 0; thread < nr_threads; thread++) {
04e21314
AH
657 if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
658 cpu, thread, &output))
659 goto out_unmap;
aece948f
ACM
660 }
661 }
662
663 return 0;
664
665out_unmap:
93edcbd9
AH
666 for (cpu = 0; cpu < nr_cpus; cpu++)
667 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
668 return -1;
669}
670
04e21314
AH
671static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
672 int mask)
aece948f 673{
aece948f 674 int thread;
b3a319d5 675 int nr_threads = thread_map__nr(evlist->threads);
aece948f 676
e3e1a54f 677 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 678 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
679 int output = -1;
680
04e21314
AH
681 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
682 thread, &output))
683 goto out_unmap;
aece948f
ACM
684 }
685
686 return 0;
687
688out_unmap:
93edcbd9
AH
689 for (thread = 0; thread < nr_threads; thread++)
690 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
691 return -1;
692}
693
994a1f78
JO
694static size_t perf_evlist__mmap_size(unsigned long pages)
695{
696 /* 512 kiB: default amount of unprivileged mlocked memory */
697 if (pages == UINT_MAX)
698 pages = (512 * 1024) / page_size;
699 else if (!is_power_of_2(pages))
700 return 0;
701
702 return (pages + 1) * page_size;
703}
704
33c2dcfd
DA
705static long parse_pages_arg(const char *str, unsigned long min,
706 unsigned long max)
994a1f78 707{
2fbe4abe 708 unsigned long pages, val;
27050f53
JO
709 static struct parse_tag tags[] = {
710 { .tag = 'B', .mult = 1 },
711 { .tag = 'K', .mult = 1 << 10 },
712 { .tag = 'M', .mult = 1 << 20 },
713 { .tag = 'G', .mult = 1 << 30 },
714 { .tag = 0 },
715 };
994a1f78 716
8973504b 717 if (str == NULL)
33c2dcfd 718 return -EINVAL;
8973504b 719
27050f53 720 val = parse_tag_value(str, tags);
2fbe4abe 721 if (val != (unsigned long) -1) {
27050f53
JO
722 /* we got file size value */
723 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
724 } else {
725 /* we got pages count value */
726 char *eptr;
727 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
728 if (*eptr != '\0')
729 return -EINVAL;
994a1f78
JO
730 }
731
2bcab6c1 732 if (pages == 0 && min == 0) {
33c2dcfd 733 /* leave number of pages at 0 */
1dbfa938 734 } else if (!is_power_of_2(pages)) {
33c2dcfd 735 /* round pages up to next power of 2 */
1dbfa938
AH
736 pages = next_pow2_l(pages);
737 if (!pages)
738 return -EINVAL;
9639837e
DA
739 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
740 pages * page_size, pages);
2fbe4abe
AH
741 }
742
33c2dcfd
DA
743 if (pages > max)
744 return -EINVAL;
745
746 return pages;
747}
748
749int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
750 int unset __maybe_unused)
751{
752 unsigned int *mmap_pages = opt->value;
753 unsigned long max = UINT_MAX;
754 long pages;
755
f5ae9c42 756 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
757 max = SIZE_MAX / page_size;
758
759 pages = parse_pages_arg(str, 1, max);
760 if (pages < 0) {
761 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
762 return -1;
763 }
764
765 *mmap_pages = pages;
766 return 0;
767}
768
c83fa7f2
AH
769/**
770 * perf_evlist__mmap - Create mmaps to receive events.
771 * @evlist: list of events
772 * @pages: map length in pages
773 * @overwrite: overwrite older events?
f8a95309 774 *
c83fa7f2
AH
775 * If @overwrite is %false the user needs to signal event consumption using
776 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
777 * automatically.
7e2ed097 778 *
c83fa7f2 779 * Return: %0 on success, negative error code otherwise.
f8a95309 780 */
50a682ce
ACM
781int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
782 bool overwrite)
f8a95309 783{
aece948f 784 struct perf_evsel *evsel;
7e2ed097
ACM
785 const struct cpu_map *cpus = evlist->cpus;
786 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
787 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
788
7e2ed097 789 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
790 return -ENOMEM;
791
7e2ed097 792 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
793 return -ENOMEM;
794
795 evlist->overwrite = overwrite;
994a1f78 796 evlist->mmap_len = perf_evlist__mmap_size(pages);
2af68ef5 797 pr_debug("mmap size %zuB\n", evlist->mmap_len);
994a1f78 798 mask = evlist->mmap_len - page_size - 1;
f8a95309
ACM
799
800 list_for_each_entry(evsel, &evlist->entries, node) {
801 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 802 evsel->sample_id == NULL &&
a14bb7a6 803 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 804 return -ENOMEM;
f8a95309
ACM
805 }
806
ec1e7e43 807 if (cpu_map__empty(cpus))
aece948f 808 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 809
aece948f 810 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 811}
7e2ed097 812
602ad878 813int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
7e2ed097 814{
b809ac10
NK
815 evlist->threads = thread_map__new_str(target->pid, target->tid,
816 target->uid);
7e2ed097
ACM
817
818 if (evlist->threads == NULL)
819 return -1;
820
9c105fbc 821 if (target__uses_dummy_map(target))
d1cb9fce 822 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
823 else
824 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
825
826 if (evlist->cpus == NULL)
827 goto out_delete_threads;
828
829 return 0;
830
831out_delete_threads:
832 thread_map__delete(evlist->threads);
833 return -1;
834}
835
836void perf_evlist__delete_maps(struct perf_evlist *evlist)
837{
838 cpu_map__delete(evlist->cpus);
839 thread_map__delete(evlist->threads);
840 evlist->cpus = NULL;
841 evlist->threads = NULL;
842}
0a102479 843
1491a632 844int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 845{
0a102479 846 struct perf_evsel *evsel;
745cefc5
ACM
847 int err = 0;
848 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 849 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
850
851 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 852 if (evsel->filter == NULL)
0a102479 853 continue;
745cefc5
ACM
854
855 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
856 if (err)
857 break;
0a102479
FW
858 }
859
745cefc5
ACM
860 return err;
861}
862
863int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
864{
865 struct perf_evsel *evsel;
866 int err = 0;
867 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 868 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
869
870 list_for_each_entry(evsel, &evlist->entries, node) {
871 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
872 if (err)
873 break;
874 }
875
876 return err;
0a102479 877}
74429964 878
0c21f736 879bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 880{
75562573 881 struct perf_evsel *pos;
c2a70653 882
75562573
AH
883 if (evlist->nr_entries == 1)
884 return true;
885
886 if (evlist->id_pos < 0 || evlist->is_pos < 0)
887 return false;
888
889 list_for_each_entry(pos, &evlist->entries, node) {
890 if (pos->id_pos != evlist->id_pos ||
891 pos->is_pos != evlist->is_pos)
c2a70653 892 return false;
74429964
FW
893 }
894
c2a70653 895 return true;
74429964
FW
896}
897
75562573 898u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 899{
75562573
AH
900 struct perf_evsel *evsel;
901
902 if (evlist->combined_sample_type)
903 return evlist->combined_sample_type;
904
905 list_for_each_entry(evsel, &evlist->entries, node)
906 evlist->combined_sample_type |= evsel->attr.sample_type;
907
908 return evlist->combined_sample_type;
909}
910
911u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
912{
913 evlist->combined_sample_type = 0;
914 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
915}
916
9ede473c
JO
917bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
918{
919 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
920 u64 read_format = first->attr.read_format;
921 u64 sample_type = first->attr.sample_type;
922
923 list_for_each_entry_continue(pos, &evlist->entries, node) {
924 if (read_format != pos->attr.read_format)
925 return false;
926 }
927
928 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
929 if ((sample_type & PERF_SAMPLE_READ) &&
930 !(read_format & PERF_FORMAT_ID)) {
931 return false;
932 }
933
934 return true;
935}
936
937u64 perf_evlist__read_format(struct perf_evlist *evlist)
938{
939 struct perf_evsel *first = perf_evlist__first(evlist);
940 return first->attr.read_format;
941}
942
0c21f736 943u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 944{
0c21f736 945 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
946 struct perf_sample *data;
947 u64 sample_type;
948 u16 size = 0;
949
81e36bff
ACM
950 if (!first->attr.sample_id_all)
951 goto out;
952
953 sample_type = first->attr.sample_type;
954
955 if (sample_type & PERF_SAMPLE_TID)
956 size += sizeof(data->tid) * 2;
957
958 if (sample_type & PERF_SAMPLE_TIME)
959 size += sizeof(data->time);
960
961 if (sample_type & PERF_SAMPLE_ID)
962 size += sizeof(data->id);
963
964 if (sample_type & PERF_SAMPLE_STREAM_ID)
965 size += sizeof(data->stream_id);
966
967 if (sample_type & PERF_SAMPLE_CPU)
968 size += sizeof(data->cpu) * 2;
75562573
AH
969
970 if (sample_type & PERF_SAMPLE_IDENTIFIER)
971 size += sizeof(data->id);
81e36bff
ACM
972out:
973 return size;
974}
975
0c21f736 976bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 977{
0c21f736 978 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
979
980 list_for_each_entry_continue(pos, &evlist->entries, node) {
981 if (first->attr.sample_id_all != pos->attr.sample_id_all)
982 return false;
74429964
FW
983 }
984
c2a70653
ACM
985 return true;
986}
987
0c21f736 988bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 989{
0c21f736 990 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 991 return first->attr.sample_id_all;
74429964 992}
81cce8de
ACM
993
994void perf_evlist__set_selected(struct perf_evlist *evlist,
995 struct perf_evsel *evsel)
996{
997 evlist->selected = evsel;
998}
727ab04e 999
a74b4b66
NK
1000void perf_evlist__close(struct perf_evlist *evlist)
1001{
1002 struct perf_evsel *evsel;
1003 int ncpus = cpu_map__nr(evlist->cpus);
1004 int nthreads = thread_map__nr(evlist->threads);
1005
1006 list_for_each_entry_reverse(evsel, &evlist->entries, node)
1007 perf_evsel__close(evsel, ncpus, nthreads);
1008}
1009
6a4bb04c 1010int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 1011{
6a4bb04c 1012 struct perf_evsel *evsel;
a74b4b66 1013 int err;
727ab04e 1014
733cd2fe
AH
1015 perf_evlist__update_id_pos(evlist);
1016
727ab04e 1017 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 1018 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
1019 if (err < 0)
1020 goto out_err;
1021 }
1022
1023 return 0;
1024out_err:
a74b4b66 1025 perf_evlist__close(evlist);
41c21a68 1026 errno = -err;
727ab04e
ACM
1027 return err;
1028}
35b9d88e 1029
602ad878 1030int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
55e162ea
NK
1031 const char *argv[], bool pipe_output,
1032 bool want_signal)
35b9d88e
ACM
1033{
1034 int child_ready_pipe[2], go_pipe[2];
1035 char bf;
1036
1037 if (pipe(child_ready_pipe) < 0) {
1038 perror("failed to create 'ready' pipe");
1039 return -1;
1040 }
1041
1042 if (pipe(go_pipe) < 0) {
1043 perror("failed to create 'go' pipe");
1044 goto out_close_ready_pipe;
1045 }
1046
1047 evlist->workload.pid = fork();
1048 if (evlist->workload.pid < 0) {
1049 perror("failed to fork");
1050 goto out_close_pipes;
1051 }
1052
1053 if (!evlist->workload.pid) {
119fa3c9 1054 if (pipe_output)
35b9d88e
ACM
1055 dup2(2, 1);
1056
0817df08
DA
1057 signal(SIGTERM, SIG_DFL);
1058
35b9d88e
ACM
1059 close(child_ready_pipe[0]);
1060 close(go_pipe[1]);
1061 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1062
35b9d88e
ACM
1063 /*
1064 * Tell the parent we're ready to go
1065 */
1066 close(child_ready_pipe[1]);
1067
1068 /*
1069 * Wait until the parent tells us to go.
1070 */
1071 if (read(go_pipe[0], &bf, 1) == -1)
1072 perror("unable to read pipe");
1073
1074 execvp(argv[0], (char **)argv);
1075
1076 perror(argv[0]);
55e162ea
NK
1077 if (want_signal)
1078 kill(getppid(), SIGUSR1);
35b9d88e
ACM
1079 exit(-1);
1080 }
1081
602ad878 1082 if (target__none(target))
35b9d88e
ACM
1083 evlist->threads->map[0] = evlist->workload.pid;
1084
1085 close(child_ready_pipe[1]);
1086 close(go_pipe[0]);
1087 /*
1088 * wait for child to settle
1089 */
1090 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1091 perror("unable to read pipe");
1092 goto out_close_pipes;
1093 }
1094
bcf3145f 1095 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1096 evlist->workload.cork_fd = go_pipe[1];
1097 close(child_ready_pipe[0]);
1098 return 0;
1099
1100out_close_pipes:
1101 close(go_pipe[0]);
1102 close(go_pipe[1]);
1103out_close_ready_pipe:
1104 close(child_ready_pipe[0]);
1105 close(child_ready_pipe[1]);
1106 return -1;
1107}
1108
1109int perf_evlist__start_workload(struct perf_evlist *evlist)
1110{
1111 if (evlist->workload.cork_fd > 0) {
b3824404 1112 char bf = 0;
bcf3145f 1113 int ret;
35b9d88e
ACM
1114 /*
1115 * Remove the cork, let it rip!
1116 */
bcf3145f
NK
1117 ret = write(evlist->workload.cork_fd, &bf, 1);
1118 if (ret < 0)
1119 perror("enable to write to pipe");
1120
1121 close(evlist->workload.cork_fd);
1122 return ret;
35b9d88e
ACM
1123 }
1124
1125 return 0;
1126}
cb0b29e0 1127
a3f698fe 1128int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1129 struct perf_sample *sample)
cb0b29e0 1130{
75562573
AH
1131 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1132
1133 if (!evsel)
1134 return -EFAULT;
0807d2d8 1135 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1136}
78f067b3
ACM
1137
1138size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1139{
1140 struct perf_evsel *evsel;
1141 size_t printed = 0;
1142
1143 list_for_each_entry(evsel, &evlist->entries, node) {
1144 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1145 perf_evsel__name(evsel));
1146 }
1147
b2222139 1148 return printed + fprintf(fp, "\n");
78f067b3 1149}
6ef068cb
ACM
1150
1151int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1152 int err, char *buf, size_t size)
1153{
1154 char sbuf[128];
1155
1156 switch (err) {
1157 case ENOENT:
1158 scnprintf(buf, size, "%s",
1159 "Error:\tUnable to find debugfs\n"
1160 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1161 "Hint:\tIs the debugfs filesystem mounted?\n"
1162 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1163 break;
1164 case EACCES:
1165 scnprintf(buf, size,
1166 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1167 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1168 debugfs_mountpoint, debugfs_mountpoint);
1169 break;
1170 default:
1171 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1172 break;
1173 }
1174
1175 return 0;
1176}
a8f23d8f
ACM
1177
1178int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1179 int err, char *buf, size_t size)
1180{
1181 int printed, value;
1182 char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1183
1184 switch (err) {
1185 case EACCES:
1186 case EPERM:
1187 printed = scnprintf(buf, size,
1188 "Error:\t%s.\n"
1189 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1190
1a47245d 1191 value = perf_event_paranoid();
a8f23d8f
ACM
1192
1193 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1194
1195 if (value >= 2) {
1196 printed += scnprintf(buf + printed, size - printed,
1197 "For your workloads it needs to be <= 1\nHint:\t");
1198 }
1199 printed += scnprintf(buf + printed, size - printed,
1200 "For system wide tracing it needs to be set to -1");
1201
1202 printed += scnprintf(buf + printed, size - printed,
1203 ".\nHint:\tThe current value is %d.", value);
1204 break;
1205 default:
1206 scnprintf(buf, size, "%s", emsg);
1207 break;
1208 }
1209
1210 return 0;
1211}
a025e4f0
AH
1212
1213void perf_evlist__to_front(struct perf_evlist *evlist,
1214 struct perf_evsel *move_evsel)
1215{
1216 struct perf_evsel *evsel, *n;
1217 LIST_HEAD(move);
1218
1219 if (move_evsel == perf_evlist__first(evlist))
1220 return;
1221
1222 list_for_each_entry_safe(evsel, n, &evlist->entries, node) {
1223 if (evsel->leader == move_evsel->leader)
1224 list_move_tail(&evsel->node, &move);
1225 }
1226
1227 list_splice(&move, &evlist->entries);
1228}