]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/evlist.c
perf tools: Check mmap pages value early
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
85c66be1 10#include <lk/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47 20#include "parse-events.h"
994a1f78 21#include "parse-options.h"
50d08e47 22
f8a95309
ACM
23#include <sys/mman.h>
24
70db7533
ACM
25#include <linux/bitops.h>
26#include <linux/hash.h>
27
f8a95309 28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 30
7e2ed097
ACM
31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
ef1d1af2
ACM
33{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 39 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 40 evlist->workload.pid = -1;
ef1d1af2
ACM
41}
42
334fe7a3 43struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
44{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
ef1d1af2 47 if (evlist != NULL)
334fe7a3 48 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
49
50 return evlist;
51}
52
75562573
AH
53/**
54 * perf_evlist__set_id_pos - set the positions of event ids.
55 * @evlist: selected event list
56 *
57 * Events with compatible sample types all have the same id_pos
58 * and is_pos. For convenience, put a copy on evlist.
59 */
60void perf_evlist__set_id_pos(struct perf_evlist *evlist)
61{
62 struct perf_evsel *first = perf_evlist__first(evlist);
63
64 evlist->id_pos = first->id_pos;
65 evlist->is_pos = first->is_pos;
66}
67
733cd2fe
AH
68static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
69{
70 struct perf_evsel *evsel;
71
72 list_for_each_entry(evsel, &evlist->entries, node)
73 perf_evsel__calc_id_pos(evsel);
74
75 perf_evlist__set_id_pos(evlist);
76}
77
361c99a6
ACM
78static void perf_evlist__purge(struct perf_evlist *evlist)
79{
80 struct perf_evsel *pos, *n;
81
82 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
83 list_del_init(&pos->node);
84 perf_evsel__delete(pos);
85 }
86
87 evlist->nr_entries = 0;
88}
89
ef1d1af2 90void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 91{
70db7533 92 free(evlist->mmap);
5c581041 93 free(evlist->pollfd);
ef1d1af2
ACM
94 evlist->mmap = NULL;
95 evlist->pollfd = NULL;
96}
97
98void perf_evlist__delete(struct perf_evlist *evlist)
99{
100 perf_evlist__purge(evlist);
101 perf_evlist__exit(evlist);
361c99a6
ACM
102 free(evlist);
103}
104
105void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
106{
107 list_add_tail(&entry->node, &evlist->entries);
75562573
AH
108 if (!evlist->nr_entries++)
109 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
110}
111
0529bc1f
JO
112void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
113 struct list_head *list,
114 int nr_entries)
50d08e47 115{
75562573
AH
116 bool set_id_pos = !evlist->nr_entries;
117
50d08e47
ACM
118 list_splice_tail(list, &evlist->entries);
119 evlist->nr_entries += nr_entries;
75562573
AH
120 if (set_id_pos)
121 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
122}
123
63dab225
ACM
124void __perf_evlist__set_leader(struct list_head *list)
125{
126 struct perf_evsel *evsel, *leader;
127
128 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
129 evsel = list_entry(list->prev, struct perf_evsel, node);
130
131 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
132
133 list_for_each_entry(evsel, list, node) {
74b2133d 134 evsel->leader = leader;
63dab225
ACM
135 }
136}
137
138void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 139{
97f63e4a
NK
140 if (evlist->nr_entries) {
141 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 142 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 143 }
6a4bb04c
JO
144}
145
361c99a6
ACM
146int perf_evlist__add_default(struct perf_evlist *evlist)
147{
148 struct perf_event_attr attr = {
149 .type = PERF_TYPE_HARDWARE,
150 .config = PERF_COUNT_HW_CPU_CYCLES,
151 };
1aed2671
JR
152 struct perf_evsel *evsel;
153
154 event_attr_init(&attr);
361c99a6 155
1aed2671 156 evsel = perf_evsel__new(&attr, 0);
361c99a6 157 if (evsel == NULL)
cc2d86b0
SE
158 goto error;
159
160 /* use strdup() because free(evsel) assumes name is allocated */
161 evsel->name = strdup("cycles");
162 if (!evsel->name)
163 goto error_free;
361c99a6
ACM
164
165 perf_evlist__add(evlist, evsel);
166 return 0;
cc2d86b0
SE
167error_free:
168 perf_evsel__delete(evsel);
169error:
170 return -ENOMEM;
361c99a6 171}
5c581041 172
e60fc847
ACM
173static int perf_evlist__add_attrs(struct perf_evlist *evlist,
174 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
175{
176 struct perf_evsel *evsel, *n;
177 LIST_HEAD(head);
178 size_t i;
179
180 for (i = 0; i < nr_attrs; i++) {
181 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
182 if (evsel == NULL)
183 goto out_delete_partial_list;
184 list_add_tail(&evsel->node, &head);
185 }
186
187 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
188
189 return 0;
190
191out_delete_partial_list:
192 list_for_each_entry_safe(evsel, n, &head, node)
193 perf_evsel__delete(evsel);
194 return -1;
195}
196
79695e1b
ACM
197int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
198 struct perf_event_attr *attrs, size_t nr_attrs)
199{
200 size_t i;
201
202 for (i = 0; i < nr_attrs; i++)
203 event_attr_init(attrs + i);
204
205 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
206}
207
da378962
ACM
208struct perf_evsel *
209perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
210{
211 struct perf_evsel *evsel;
212
213 list_for_each_entry(evsel, &evlist->entries, node) {
214 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
215 (int)evsel->attr.config == id)
216 return evsel;
217 }
218
219 return NULL;
220}
221
a2f2804a
DA
222struct perf_evsel *
223perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
224 const char *name)
225{
226 struct perf_evsel *evsel;
227
228 list_for_each_entry(evsel, &evlist->entries, node) {
229 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
230 (strcmp(evsel->name, name) == 0))
231 return evsel;
232 }
233
234 return NULL;
235}
236
39876e7d
ACM
237int perf_evlist__add_newtp(struct perf_evlist *evlist,
238 const char *sys, const char *name, void *handler)
239{
240 struct perf_evsel *evsel;
241
242 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
243 if (evsel == NULL)
244 return -1;
245
246 evsel->handler.func = handler;
247 perf_evlist__add(evlist, evsel);
248 return 0;
249}
250
4152ab37
ACM
251void perf_evlist__disable(struct perf_evlist *evlist)
252{
253 int cpu, thread;
254 struct perf_evsel *pos;
b3a319d5
NK
255 int nr_cpus = cpu_map__nr(evlist->cpus);
256 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 257
b3a319d5 258 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 259 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 260 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 261 continue;
b3a319d5 262 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
263 ioctl(FD(pos, cpu, thread),
264 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
265 }
266 }
267}
268
764e16a3
DA
269void perf_evlist__enable(struct perf_evlist *evlist)
270{
271 int cpu, thread;
272 struct perf_evsel *pos;
b3a319d5
NK
273 int nr_cpus = cpu_map__nr(evlist->cpus);
274 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 275
b3a319d5 276 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 277 list_for_each_entry(pos, &evlist->entries, node) {
395c3070 278 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 279 continue;
b3a319d5 280 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
281 ioctl(FD(pos, cpu, thread),
282 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
283 }
284 }
285}
286
395c3070
AH
287int perf_evlist__disable_event(struct perf_evlist *evlist,
288 struct perf_evsel *evsel)
289{
290 int cpu, thread, err;
291
292 if (!evsel->fd)
293 return 0;
294
295 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
296 for (thread = 0; thread < evlist->threads->nr; thread++) {
297 err = ioctl(FD(evsel, cpu, thread),
298 PERF_EVENT_IOC_DISABLE, 0);
299 if (err)
300 return err;
301 }
302 }
303 return 0;
304}
305
306int perf_evlist__enable_event(struct perf_evlist *evlist,
307 struct perf_evsel *evsel)
308{
309 int cpu, thread, err;
310
311 if (!evsel->fd)
312 return -EINVAL;
313
314 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
315 for (thread = 0; thread < evlist->threads->nr; thread++) {
316 err = ioctl(FD(evsel, cpu, thread),
317 PERF_EVENT_IOC_ENABLE, 0);
318 if (err)
319 return err;
320 }
321 }
322 return 0;
323}
324
806fb630 325static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 326{
b3a319d5
NK
327 int nr_cpus = cpu_map__nr(evlist->cpus);
328 int nr_threads = thread_map__nr(evlist->threads);
329 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
330 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
331 return evlist->pollfd != NULL ? 0 : -ENOMEM;
332}
70082dd9
ACM
333
334void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
335{
336 fcntl(fd, F_SETFL, O_NONBLOCK);
337 evlist->pollfd[evlist->nr_fds].fd = fd;
338 evlist->pollfd[evlist->nr_fds].events = POLLIN;
339 evlist->nr_fds++;
340}
70db7533 341
a91e5431
ACM
342static void perf_evlist__id_hash(struct perf_evlist *evlist,
343 struct perf_evsel *evsel,
344 int cpu, int thread, u64 id)
3d3b5e95
ACM
345{
346 int hash;
347 struct perf_sample_id *sid = SID(evsel, cpu, thread);
348
349 sid->id = id;
350 sid->evsel = evsel;
351 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
352 hlist_add_head(&sid->node, &evlist->heads[hash]);
353}
354
a91e5431
ACM
355void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
356 int cpu, int thread, u64 id)
357{
358 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
359 evsel->id[evsel->ids++] = id;
360}
361
362static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
363 struct perf_evsel *evsel,
364 int cpu, int thread, int fd)
f8a95309 365{
f8a95309 366 u64 read_data[4] = { 0, };
3d3b5e95 367 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
368 u64 id;
369 int ret;
370
371 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
372 if (!ret)
373 goto add;
374
375 if (errno != ENOTTY)
376 return -1;
377
378 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 379
c4861afe
JO
380 /*
381 * This way does not work with group format read, so bail
382 * out in that case.
383 */
384 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
385 return -1;
386
f8a95309
ACM
387 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
388 read(fd, &read_data, sizeof(read_data)) == -1)
389 return -1;
390
391 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
392 ++id_idx;
393 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
394 ++id_idx;
395
e2b5abe0
JO
396 id = read_data[id_idx];
397
398 add:
399 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
400 return 0;
401}
402
932a3594 403struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
404{
405 struct hlist_head *head;
70db7533
ACM
406 struct perf_sample_id *sid;
407 int hash;
408
70db7533
ACM
409 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
410 head = &evlist->heads[hash];
411
b67bfe0d 412 hlist_for_each_entry(sid, head, node)
70db7533 413 if (sid->id == id)
932a3594
JO
414 return sid;
415
416 return NULL;
417}
418
419struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
420{
421 struct perf_sample_id *sid;
422
423 if (evlist->nr_entries == 1)
424 return perf_evlist__first(evlist);
425
426 sid = perf_evlist__id2sid(evlist, id);
427 if (sid)
428 return sid->evsel;
30e68bcc
NK
429
430 if (!perf_evlist__sample_id_all(evlist))
0c21f736 431 return perf_evlist__first(evlist);
30e68bcc 432
70db7533
ACM
433 return NULL;
434}
04391deb 435
75562573
AH
436static int perf_evlist__event2id(struct perf_evlist *evlist,
437 union perf_event *event, u64 *id)
438{
439 const u64 *array = event->sample.array;
440 ssize_t n;
441
442 n = (event->header.size - sizeof(event->header)) >> 3;
443
444 if (event->header.type == PERF_RECORD_SAMPLE) {
445 if (evlist->id_pos >= n)
446 return -1;
447 *id = array[evlist->id_pos];
448 } else {
449 if (evlist->is_pos > n)
450 return -1;
451 n -= evlist->is_pos;
452 *id = array[n];
453 }
454 return 0;
455}
456
457static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
458 union perf_event *event)
459{
98be6966 460 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
461 struct hlist_head *head;
462 struct perf_sample_id *sid;
463 int hash;
464 u64 id;
465
466 if (evlist->nr_entries == 1)
98be6966
AH
467 return first;
468
469 if (!first->attr.sample_id_all &&
470 event->header.type != PERF_RECORD_SAMPLE)
471 return first;
75562573
AH
472
473 if (perf_evlist__event2id(evlist, event, &id))
474 return NULL;
475
476 /* Synthesized events have an id of zero */
477 if (!id)
98be6966 478 return first;
75562573
AH
479
480 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
481 head = &evlist->heads[hash];
482
483 hlist_for_each_entry(sid, head, node) {
484 if (sid->id == id)
485 return sid->evsel;
486 }
487 return NULL;
488}
489
aece948f 490union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 491{
aece948f 492 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
493 unsigned int head = perf_mmap__read_head(md);
494 unsigned int old = md->prev;
495 unsigned char *data = md->base + page_size;
8115d60c 496 union perf_event *event = NULL;
04391deb 497
7bb41152 498 if (evlist->overwrite) {
04391deb 499 /*
7bb41152
ACM
500 * If we're further behind than half the buffer, there's a chance
501 * the writer will bite our tail and mess up the samples under us.
502 *
503 * If we somehow ended up ahead of the head, we got messed up.
504 *
505 * In either case, truncate and restart at head.
04391deb 506 */
7bb41152
ACM
507 int diff = head - old;
508 if (diff > md->mask / 2 || diff < 0) {
509 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
510
511 /*
512 * head points to a known good entry, start there.
513 */
514 old = head;
515 }
04391deb
ACM
516 }
517
518 if (old != head) {
519 size_t size;
520
8115d60c 521 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
522 size = event->header.size;
523
524 /*
525 * Event straddles the mmap boundary -- header should always
526 * be inside due to u64 alignment of output.
527 */
528 if ((old & md->mask) + size != ((old + size) & md->mask)) {
529 unsigned int offset = old;
530 unsigned int len = min(sizeof(*event), size), cpy;
0479b8b9 531 void *dst = &md->event_copy;
04391deb
ACM
532
533 do {
534 cpy = min(md->mask + 1 - (offset & md->mask), len);
535 memcpy(dst, &data[offset & md->mask], cpy);
536 offset += cpy;
537 dst += cpy;
538 len -= cpy;
539 } while (len);
540
0479b8b9 541 event = &md->event_copy;
04391deb
ACM
542 }
543
544 old += size;
545 }
546
547 md->prev = old;
7bb41152
ACM
548
549 if (!evlist->overwrite)
550 perf_mmap__write_tail(md, old);
551
04391deb
ACM
552 return event;
553}
f8a95309 554
93edcbd9
AH
555static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
556{
557 if (evlist->mmap[idx].base != NULL) {
558 munmap(evlist->mmap[idx].base, evlist->mmap_len);
559 evlist->mmap[idx].base = NULL;
560 }
561}
562
7e2ed097 563void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 564{
aece948f 565 int i;
f8a95309 566
93edcbd9
AH
567 for (i = 0; i < evlist->nr_mmaps; i++)
568 __perf_evlist__munmap(evlist, i);
aece948f
ACM
569
570 free(evlist->mmap);
571 evlist->mmap = NULL;
f8a95309
ACM
572}
573
806fb630 574static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 575{
a14bb7a6 576 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 577 if (cpu_map__empty(evlist->cpus))
b3a319d5 578 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 579 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
580 return evlist->mmap != NULL ? 0 : -ENOMEM;
581}
582
bccdaba0 583static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 584 int idx, int prot, int mask, int fd)
f8a95309 585{
aece948f
ACM
586 evlist->mmap[idx].prev = 0;
587 evlist->mmap[idx].mask = mask;
588 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 589 MAP_SHARED, fd, 0);
301b195d
NE
590 if (evlist->mmap[idx].base == MAP_FAILED) {
591 evlist->mmap[idx].base = NULL;
f8a95309 592 return -1;
301b195d 593 }
f8a95309
ACM
594
595 perf_evlist__add_pollfd(evlist, fd);
596 return 0;
597}
598
aece948f
ACM
599static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
600{
601 struct perf_evsel *evsel;
602 int cpu, thread;
b3a319d5
NK
603 int nr_cpus = cpu_map__nr(evlist->cpus);
604 int nr_threads = thread_map__nr(evlist->threads);
aece948f 605
e3e1a54f 606 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 607 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
608 int output = -1;
609
b3a319d5 610 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
611 list_for_each_entry(evsel, &evlist->entries, node) {
612 int fd = FD(evsel, cpu, thread);
613
614 if (output == -1) {
615 output = fd;
bccdaba0 616 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
617 prot, mask, output) < 0)
618 goto out_unmap;
619 } else {
620 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
621 goto out_unmap;
622 }
623
624 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
625 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
626 goto out_unmap;
627 }
628 }
629 }
630
631 return 0;
632
633out_unmap:
93edcbd9
AH
634 for (cpu = 0; cpu < nr_cpus; cpu++)
635 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
636 return -1;
637}
638
639static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
640{
641 struct perf_evsel *evsel;
642 int thread;
b3a319d5 643 int nr_threads = thread_map__nr(evlist->threads);
aece948f 644
e3e1a54f 645 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 646 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
647 int output = -1;
648
649 list_for_each_entry(evsel, &evlist->entries, node) {
650 int fd = FD(evsel, 0, thread);
651
652 if (output == -1) {
653 output = fd;
bccdaba0 654 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
655 prot, mask, output) < 0)
656 goto out_unmap;
657 } else {
658 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
659 goto out_unmap;
660 }
661
662 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
663 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
664 goto out_unmap;
665 }
666 }
667
668 return 0;
669
670out_unmap:
93edcbd9
AH
671 for (thread = 0; thread < nr_threads; thread++)
672 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
673 return -1;
674}
675
994a1f78
JO
676static size_t perf_evlist__mmap_size(unsigned long pages)
677{
678 /* 512 kiB: default amount of unprivileged mlocked memory */
679 if (pages == UINT_MAX)
680 pages = (512 * 1024) / page_size;
681 else if (!is_power_of_2(pages))
682 return 0;
683
684 return (pages + 1) * page_size;
685}
686
687int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
688 int unset __maybe_unused)
689{
690 unsigned int pages, *mmap_pages = opt->value;
691 size_t size;
692 char *eptr;
693
694 pages = strtoul(str, &eptr, 10);
695 if (*eptr != '\0') {
696 pr_err("failed to parse --mmap_pages/-m value\n");
697 return -1;
698 }
699
700 size = perf_evlist__mmap_size(pages);
701 if (!size) {
702 pr_err("--mmap_pages/-m value must be a power of two.");
703 return -1;
704 }
705
706 *mmap_pages = pages;
707 return 0;
708}
709
f8a95309
ACM
710/** perf_evlist__mmap - Create per cpu maps to receive events
711 *
712 * @evlist - list of events
f8a95309
ACM
713 * @pages - map length in pages
714 * @overwrite - overwrite older events?
715 *
716 * If overwrite is false the user needs to signal event consuption using:
717 *
718 * struct perf_mmap *m = &evlist->mmap[cpu];
719 * unsigned int head = perf_mmap__read_head(m);
720 *
721 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
722 *
723 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 724 */
50a682ce
ACM
725int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
726 bool overwrite)
f8a95309 727{
aece948f 728 struct perf_evsel *evsel;
7e2ed097
ACM
729 const struct cpu_map *cpus = evlist->cpus;
730 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
731 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
732
7e2ed097 733 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
734 return -ENOMEM;
735
7e2ed097 736 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
737 return -ENOMEM;
738
739 evlist->overwrite = overwrite;
994a1f78
JO
740 evlist->mmap_len = perf_evlist__mmap_size(pages);
741 mask = evlist->mmap_len - page_size - 1;
f8a95309
ACM
742
743 list_for_each_entry(evsel, &evlist->entries, node) {
744 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 745 evsel->sample_id == NULL &&
a14bb7a6 746 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 747 return -ENOMEM;
f8a95309
ACM
748 }
749
ec1e7e43 750 if (cpu_map__empty(cpus))
aece948f 751 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 752
aece948f 753 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 754}
7e2ed097 755
b809ac10
NK
756int perf_evlist__create_maps(struct perf_evlist *evlist,
757 struct perf_target *target)
7e2ed097 758{
b809ac10
NK
759 evlist->threads = thread_map__new_str(target->pid, target->tid,
760 target->uid);
7e2ed097
ACM
761
762 if (evlist->threads == NULL)
763 return -1;
764
879d77d0 765 if (perf_target__has_task(target))
d67356e7 766 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
767 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
768 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
769 else
770 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
771
772 if (evlist->cpus == NULL)
773 goto out_delete_threads;
774
775 return 0;
776
777out_delete_threads:
778 thread_map__delete(evlist->threads);
779 return -1;
780}
781
782void perf_evlist__delete_maps(struct perf_evlist *evlist)
783{
784 cpu_map__delete(evlist->cpus);
785 thread_map__delete(evlist->threads);
786 evlist->cpus = NULL;
787 evlist->threads = NULL;
788}
0a102479 789
1491a632 790int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 791{
0a102479 792 struct perf_evsel *evsel;
745cefc5
ACM
793 int err = 0;
794 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 795 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
796
797 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 798 if (evsel->filter == NULL)
0a102479 799 continue;
745cefc5
ACM
800
801 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
802 if (err)
803 break;
0a102479
FW
804 }
805
745cefc5
ACM
806 return err;
807}
808
809int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
810{
811 struct perf_evsel *evsel;
812 int err = 0;
813 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 814 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
815
816 list_for_each_entry(evsel, &evlist->entries, node) {
817 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
818 if (err)
819 break;
820 }
821
822 return err;
0a102479 823}
74429964 824
0c21f736 825bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 826{
75562573 827 struct perf_evsel *pos;
c2a70653 828
75562573
AH
829 if (evlist->nr_entries == 1)
830 return true;
831
832 if (evlist->id_pos < 0 || evlist->is_pos < 0)
833 return false;
834
835 list_for_each_entry(pos, &evlist->entries, node) {
836 if (pos->id_pos != evlist->id_pos ||
837 pos->is_pos != evlist->is_pos)
c2a70653 838 return false;
74429964
FW
839 }
840
c2a70653 841 return true;
74429964
FW
842}
843
75562573 844u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 845{
75562573
AH
846 struct perf_evsel *evsel;
847
848 if (evlist->combined_sample_type)
849 return evlist->combined_sample_type;
850
851 list_for_each_entry(evsel, &evlist->entries, node)
852 evlist->combined_sample_type |= evsel->attr.sample_type;
853
854 return evlist->combined_sample_type;
855}
856
857u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
858{
859 evlist->combined_sample_type = 0;
860 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
861}
862
9ede473c
JO
863bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
864{
865 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
866 u64 read_format = first->attr.read_format;
867 u64 sample_type = first->attr.sample_type;
868
869 list_for_each_entry_continue(pos, &evlist->entries, node) {
870 if (read_format != pos->attr.read_format)
871 return false;
872 }
873
874 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
875 if ((sample_type & PERF_SAMPLE_READ) &&
876 !(read_format & PERF_FORMAT_ID)) {
877 return false;
878 }
879
880 return true;
881}
882
883u64 perf_evlist__read_format(struct perf_evlist *evlist)
884{
885 struct perf_evsel *first = perf_evlist__first(evlist);
886 return first->attr.read_format;
887}
888
0c21f736 889u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 890{
0c21f736 891 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
892 struct perf_sample *data;
893 u64 sample_type;
894 u16 size = 0;
895
81e36bff
ACM
896 if (!first->attr.sample_id_all)
897 goto out;
898
899 sample_type = first->attr.sample_type;
900
901 if (sample_type & PERF_SAMPLE_TID)
902 size += sizeof(data->tid) * 2;
903
904 if (sample_type & PERF_SAMPLE_TIME)
905 size += sizeof(data->time);
906
907 if (sample_type & PERF_SAMPLE_ID)
908 size += sizeof(data->id);
909
910 if (sample_type & PERF_SAMPLE_STREAM_ID)
911 size += sizeof(data->stream_id);
912
913 if (sample_type & PERF_SAMPLE_CPU)
914 size += sizeof(data->cpu) * 2;
75562573
AH
915
916 if (sample_type & PERF_SAMPLE_IDENTIFIER)
917 size += sizeof(data->id);
81e36bff
ACM
918out:
919 return size;
920}
921
0c21f736 922bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 923{
0c21f736 924 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
925
926 list_for_each_entry_continue(pos, &evlist->entries, node) {
927 if (first->attr.sample_id_all != pos->attr.sample_id_all)
928 return false;
74429964
FW
929 }
930
c2a70653
ACM
931 return true;
932}
933
0c21f736 934bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 935{
0c21f736 936 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 937 return first->attr.sample_id_all;
74429964 938}
81cce8de
ACM
939
940void perf_evlist__set_selected(struct perf_evlist *evlist,
941 struct perf_evsel *evsel)
942{
943 evlist->selected = evsel;
944}
727ab04e 945
a74b4b66
NK
946void perf_evlist__close(struct perf_evlist *evlist)
947{
948 struct perf_evsel *evsel;
949 int ncpus = cpu_map__nr(evlist->cpus);
950 int nthreads = thread_map__nr(evlist->threads);
951
952 list_for_each_entry_reverse(evsel, &evlist->entries, node)
953 perf_evsel__close(evsel, ncpus, nthreads);
954}
955
6a4bb04c 956int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 957{
6a4bb04c 958 struct perf_evsel *evsel;
a74b4b66 959 int err;
727ab04e 960
733cd2fe
AH
961 perf_evlist__update_id_pos(evlist);
962
727ab04e 963 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 964 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
965 if (err < 0)
966 goto out_err;
967 }
968
969 return 0;
970out_err:
a74b4b66 971 perf_evlist__close(evlist);
41c21a68 972 errno = -err;
727ab04e
ACM
973 return err;
974}
35b9d88e
ACM
975
976int perf_evlist__prepare_workload(struct perf_evlist *evlist,
6ef73ec4 977 struct perf_target *target,
55e162ea
NK
978 const char *argv[], bool pipe_output,
979 bool want_signal)
35b9d88e
ACM
980{
981 int child_ready_pipe[2], go_pipe[2];
982 char bf;
983
984 if (pipe(child_ready_pipe) < 0) {
985 perror("failed to create 'ready' pipe");
986 return -1;
987 }
988
989 if (pipe(go_pipe) < 0) {
990 perror("failed to create 'go' pipe");
991 goto out_close_ready_pipe;
992 }
993
994 evlist->workload.pid = fork();
995 if (evlist->workload.pid < 0) {
996 perror("failed to fork");
997 goto out_close_pipes;
998 }
999
1000 if (!evlist->workload.pid) {
119fa3c9 1001 if (pipe_output)
35b9d88e
ACM
1002 dup2(2, 1);
1003
0817df08
DA
1004 signal(SIGTERM, SIG_DFL);
1005
35b9d88e
ACM
1006 close(child_ready_pipe[0]);
1007 close(go_pipe[1]);
1008 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1009
35b9d88e
ACM
1010 /*
1011 * Tell the parent we're ready to go
1012 */
1013 close(child_ready_pipe[1]);
1014
1015 /*
1016 * Wait until the parent tells us to go.
1017 */
1018 if (read(go_pipe[0], &bf, 1) == -1)
1019 perror("unable to read pipe");
1020
1021 execvp(argv[0], (char **)argv);
1022
1023 perror(argv[0]);
55e162ea
NK
1024 if (want_signal)
1025 kill(getppid(), SIGUSR1);
35b9d88e
ACM
1026 exit(-1);
1027 }
1028
6ef73ec4 1029 if (perf_target__none(target))
35b9d88e
ACM
1030 evlist->threads->map[0] = evlist->workload.pid;
1031
1032 close(child_ready_pipe[1]);
1033 close(go_pipe[0]);
1034 /*
1035 * wait for child to settle
1036 */
1037 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1038 perror("unable to read pipe");
1039 goto out_close_pipes;
1040 }
1041
bcf3145f 1042 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1043 evlist->workload.cork_fd = go_pipe[1];
1044 close(child_ready_pipe[0]);
1045 return 0;
1046
1047out_close_pipes:
1048 close(go_pipe[0]);
1049 close(go_pipe[1]);
1050out_close_ready_pipe:
1051 close(child_ready_pipe[0]);
1052 close(child_ready_pipe[1]);
1053 return -1;
1054}
1055
1056int perf_evlist__start_workload(struct perf_evlist *evlist)
1057{
1058 if (evlist->workload.cork_fd > 0) {
b3824404 1059 char bf = 0;
bcf3145f 1060 int ret;
35b9d88e
ACM
1061 /*
1062 * Remove the cork, let it rip!
1063 */
bcf3145f
NK
1064 ret = write(evlist->workload.cork_fd, &bf, 1);
1065 if (ret < 0)
1066 perror("enable to write to pipe");
1067
1068 close(evlist->workload.cork_fd);
1069 return ret;
35b9d88e
ACM
1070 }
1071
1072 return 0;
1073}
cb0b29e0 1074
a3f698fe 1075int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1076 struct perf_sample *sample)
cb0b29e0 1077{
75562573
AH
1078 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1079
1080 if (!evsel)
1081 return -EFAULT;
0807d2d8 1082 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1083}
78f067b3
ACM
1084
1085size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1086{
1087 struct perf_evsel *evsel;
1088 size_t printed = 0;
1089
1090 list_for_each_entry(evsel, &evlist->entries, node) {
1091 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1092 perf_evsel__name(evsel));
1093 }
1094
1095 return printed + fprintf(fp, "\n");;
1096}