]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - tools/perf/util/evlist.c
perf evlist: Introduce add_newtp method
[mirror_ubuntu-bionic-kernel.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18
ACM
9#include "util.h"
10#include "debugfs.h"
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
35b9d88e 17#include <unistd.h>
361c99a6 18
50d08e47
ACM
19#include "parse-events.h"
20
f8a95309
ACM
21#include <sys/mman.h>
22
70db7533
ACM
23#include <linux/bitops.h>
24#include <linux/hash.h>
25
f8a95309 26#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 27#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 28
7e2ed097
ACM
29void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
ef1d1af2
ACM
31{
32 int i;
33
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 37 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 38 evlist->workload.pid = -1;
ef1d1af2
ACM
39}
40
7e2ed097
ACM
41struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads)
361c99a6
ACM
43{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
ef1d1af2 46 if (evlist != NULL)
7e2ed097 47 perf_evlist__init(evlist, cpus, threads);
361c99a6
ACM
48
49 return evlist;
50}
51
0f82ebc4
ACM
52void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 struct perf_record_opts *opts)
54{
5090c6ae 55 struct perf_evsel *evsel, *first;
0f82ebc4
ACM
56
57 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true;
59
0c21f736 60 first = perf_evlist__first(evlist);
5090c6ae 61
0f82ebc4 62 list_for_each_entry(evsel, &evlist->entries, node) {
5090c6ae 63 perf_evsel__config(evsel, opts, first);
0f82ebc4
ACM
64
65 if (evlist->nr_entries > 1)
66 evsel->attr.sample_type |= PERF_SAMPLE_ID;
67 }
68}
69
361c99a6
ACM
70static void perf_evlist__purge(struct perf_evlist *evlist)
71{
72 struct perf_evsel *pos, *n;
73
74 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75 list_del_init(&pos->node);
76 perf_evsel__delete(pos);
77 }
78
79 evlist->nr_entries = 0;
80}
81
ef1d1af2 82void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 83{
70db7533 84 free(evlist->mmap);
5c581041 85 free(evlist->pollfd);
ef1d1af2
ACM
86 evlist->mmap = NULL;
87 evlist->pollfd = NULL;
88}
89
90void perf_evlist__delete(struct perf_evlist *evlist)
91{
92 perf_evlist__purge(evlist);
93 perf_evlist__exit(evlist);
361c99a6
ACM
94 free(evlist);
95}
96
97void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98{
99 list_add_tail(&entry->node, &evlist->entries);
100 ++evlist->nr_entries;
101}
102
0529bc1f
JO
103void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104 struct list_head *list,
105 int nr_entries)
50d08e47
ACM
106{
107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
109}
110
63dab225
ACM
111void __perf_evlist__set_leader(struct list_head *list)
112{
113 struct perf_evsel *evsel, *leader;
114
115 leader = list_entry(list->next, struct perf_evsel, node);
116 leader->leader = NULL;
117
118 list_for_each_entry(evsel, list, node) {
119 if (evsel != leader)
120 evsel->leader = leader;
121 }
122}
123
124void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c
JO
125{
126 if (evlist->nr_entries)
63dab225 127 __perf_evlist__set_leader(&evlist->entries);
6a4bb04c
JO
128}
129
361c99a6
ACM
130int perf_evlist__add_default(struct perf_evlist *evlist)
131{
132 struct perf_event_attr attr = {
133 .type = PERF_TYPE_HARDWARE,
134 .config = PERF_COUNT_HW_CPU_CYCLES,
135 };
1aed2671
JR
136 struct perf_evsel *evsel;
137
138 event_attr_init(&attr);
361c99a6 139
1aed2671 140 evsel = perf_evsel__new(&attr, 0);
361c99a6 141 if (evsel == NULL)
cc2d86b0
SE
142 goto error;
143
144 /* use strdup() because free(evsel) assumes name is allocated */
145 evsel->name = strdup("cycles");
146 if (!evsel->name)
147 goto error_free;
361c99a6
ACM
148
149 perf_evlist__add(evlist, evsel);
150 return 0;
cc2d86b0
SE
151error_free:
152 perf_evsel__delete(evsel);
153error:
154 return -ENOMEM;
361c99a6 155}
5c581041 156
50d08e47
ACM
157int perf_evlist__add_attrs(struct perf_evlist *evlist,
158 struct perf_event_attr *attrs, size_t nr_attrs)
159{
160 struct perf_evsel *evsel, *n;
161 LIST_HEAD(head);
162 size_t i;
163
164 for (i = 0; i < nr_attrs; i++) {
165 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
166 if (evsel == NULL)
167 goto out_delete_partial_list;
168 list_add_tail(&evsel->node, &head);
169 }
170
171 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
172
173 return 0;
174
175out_delete_partial_list:
176 list_for_each_entry_safe(evsel, n, &head, node)
177 perf_evsel__delete(evsel);
178 return -1;
179}
180
79695e1b
ACM
181int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
182 struct perf_event_attr *attrs, size_t nr_attrs)
183{
184 size_t i;
185
186 for (i = 0; i < nr_attrs; i++)
187 event_attr_init(attrs + i);
188
189 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
190}
191
a8c9ae18
ACM
192static int trace_event__id(const char *evname)
193{
194 char *filename, *colon;
195 int err = -1, fd;
196
197 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
198 return -1;
199
200 colon = strrchr(filename, ':');
201 if (colon != NULL)
202 *colon = '/';
203
204 fd = open(filename, O_RDONLY);
205 if (fd >= 0) {
206 char id[16];
207 if (read(fd, id, sizeof(id)) > 0)
208 err = atoi(id);
209 close(fd);
210 }
211
212 free(filename);
213 return err;
214}
215
216int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
217 const char *tracepoints[],
218 size_t nr_tracepoints)
219{
220 int err;
221 size_t i;
222 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
223
224 if (attrs == NULL)
225 return -1;
226
227 for (i = 0; i < nr_tracepoints; i++) {
228 err = trace_event__id(tracepoints[i]);
229
230 if (err < 0)
231 goto out_free_attrs;
232
233 attrs[i].type = PERF_TYPE_TRACEPOINT;
234 attrs[i].config = err;
235 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
0983cc0d 236 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD);
a8c9ae18
ACM
237 attrs[i].sample_period = 1;
238 }
239
240 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
241out_free_attrs:
242 free(attrs);
243 return err;
244}
245
da378962
ACM
246struct perf_evsel *
247perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
248{
249 struct perf_evsel *evsel;
250
251 list_for_each_entry(evsel, &evlist->entries, node) {
252 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
253 (int)evsel->attr.config == id)
254 return evsel;
255 }
256
257 return NULL;
258}
259
260int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
261 const struct perf_evsel_str_handler *assocs,
262 size_t nr_assocs)
263{
264 struct perf_evsel *evsel;
265 int err;
266 size_t i;
267
268 for (i = 0; i < nr_assocs; i++) {
269 err = trace_event__id(assocs[i].name);
270 if (err < 0)
271 goto out;
272
273 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
274 if (evsel == NULL)
275 continue;
276
277 err = -EEXIST;
278 if (evsel->handler.func != NULL)
279 goto out;
280 evsel->handler.func = assocs[i].handler;
281 }
282
283 err = 0;
284out:
285 return err;
286}
287
39876e7d
ACM
288int perf_evlist__add_newtp(struct perf_evlist *evlist,
289 const char *sys, const char *name, void *handler)
290{
291 struct perf_evsel *evsel;
292
293 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
294 if (evsel == NULL)
295 return -1;
296
297 evsel->handler.func = handler;
298 perf_evlist__add(evlist, evsel);
299 return 0;
300}
301
4152ab37
ACM
302void perf_evlist__disable(struct perf_evlist *evlist)
303{
304 int cpu, thread;
305 struct perf_evsel *pos;
306
307 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
308 list_for_each_entry(pos, &evlist->entries, node) {
309 for (thread = 0; thread < evlist->threads->nr; thread++)
55da8005
NK
310 ioctl(FD(pos, cpu, thread),
311 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
312 }
313 }
314}
315
764e16a3
DA
316void perf_evlist__enable(struct perf_evlist *evlist)
317{
318 int cpu, thread;
319 struct perf_evsel *pos;
320
a14bb7a6 321 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
764e16a3
DA
322 list_for_each_entry(pos, &evlist->entries, node) {
323 for (thread = 0; thread < evlist->threads->nr; thread++)
55da8005
NK
324 ioctl(FD(pos, cpu, thread),
325 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
326 }
327 }
328}
329
806fb630 330static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 331{
a14bb7a6 332 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
5c581041
ACM
333 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
334 return evlist->pollfd != NULL ? 0 : -ENOMEM;
335}
70082dd9
ACM
336
337void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
338{
339 fcntl(fd, F_SETFL, O_NONBLOCK);
340 evlist->pollfd[evlist->nr_fds].fd = fd;
341 evlist->pollfd[evlist->nr_fds].events = POLLIN;
342 evlist->nr_fds++;
343}
70db7533 344
a91e5431
ACM
345static void perf_evlist__id_hash(struct perf_evlist *evlist,
346 struct perf_evsel *evsel,
347 int cpu, int thread, u64 id)
3d3b5e95
ACM
348{
349 int hash;
350 struct perf_sample_id *sid = SID(evsel, cpu, thread);
351
352 sid->id = id;
353 sid->evsel = evsel;
354 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
355 hlist_add_head(&sid->node, &evlist->heads[hash]);
356}
357
a91e5431
ACM
358void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
359 int cpu, int thread, u64 id)
360{
361 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
362 evsel->id[evsel->ids++] = id;
363}
364
365static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
366 struct perf_evsel *evsel,
367 int cpu, int thread, int fd)
f8a95309 368{
f8a95309 369 u64 read_data[4] = { 0, };
3d3b5e95 370 int id_idx = 1; /* The first entry is the counter value */
f8a95309
ACM
371
372 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
373 read(fd, &read_data, sizeof(read_data)) == -1)
374 return -1;
375
376 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
377 ++id_idx;
378 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
379 ++id_idx;
380
a91e5431 381 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
f8a95309
ACM
382 return 0;
383}
384
70db7533
ACM
385struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
386{
387 struct hlist_head *head;
388 struct hlist_node *pos;
389 struct perf_sample_id *sid;
390 int hash;
391
392 if (evlist->nr_entries == 1)
0c21f736 393 return perf_evlist__first(evlist);
70db7533
ACM
394
395 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
396 head = &evlist->heads[hash];
397
398 hlist_for_each_entry(sid, pos, head, node)
399 if (sid->id == id)
400 return sid->evsel;
30e68bcc
NK
401
402 if (!perf_evlist__sample_id_all(evlist))
0c21f736 403 return perf_evlist__first(evlist);
30e68bcc 404
70db7533
ACM
405 return NULL;
406}
04391deb 407
aece948f 408union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb
ACM
409{
410 /* XXX Move this to perf.c, making it generally available */
411 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
aece948f 412 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
413 unsigned int head = perf_mmap__read_head(md);
414 unsigned int old = md->prev;
415 unsigned char *data = md->base + page_size;
8115d60c 416 union perf_event *event = NULL;
04391deb 417
7bb41152 418 if (evlist->overwrite) {
04391deb 419 /*
7bb41152
ACM
420 * If we're further behind than half the buffer, there's a chance
421 * the writer will bite our tail and mess up the samples under us.
422 *
423 * If we somehow ended up ahead of the head, we got messed up.
424 *
425 * In either case, truncate and restart at head.
04391deb 426 */
7bb41152
ACM
427 int diff = head - old;
428 if (diff > md->mask / 2 || diff < 0) {
429 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
430
431 /*
432 * head points to a known good entry, start there.
433 */
434 old = head;
435 }
04391deb
ACM
436 }
437
438 if (old != head) {
439 size_t size;
440
8115d60c 441 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
442 size = event->header.size;
443
444 /*
445 * Event straddles the mmap boundary -- header should always
446 * be inside due to u64 alignment of output.
447 */
448 if ((old & md->mask) + size != ((old + size) & md->mask)) {
449 unsigned int offset = old;
450 unsigned int len = min(sizeof(*event), size), cpy;
451 void *dst = &evlist->event_copy;
452
453 do {
454 cpy = min(md->mask + 1 - (offset & md->mask), len);
455 memcpy(dst, &data[offset & md->mask], cpy);
456 offset += cpy;
457 dst += cpy;
458 len -= cpy;
459 } while (len);
460
461 event = &evlist->event_copy;
462 }
463
464 old += size;
465 }
466
467 md->prev = old;
7bb41152
ACM
468
469 if (!evlist->overwrite)
470 perf_mmap__write_tail(md, old);
471
04391deb
ACM
472 return event;
473}
f8a95309 474
7e2ed097 475void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 476{
aece948f 477 int i;
f8a95309 478
aece948f
ACM
479 for (i = 0; i < evlist->nr_mmaps; i++) {
480 if (evlist->mmap[i].base != NULL) {
481 munmap(evlist->mmap[i].base, evlist->mmap_len);
482 evlist->mmap[i].base = NULL;
f8a95309
ACM
483 }
484 }
aece948f
ACM
485
486 free(evlist->mmap);
487 evlist->mmap = NULL;
f8a95309
ACM
488}
489
806fb630 490static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 491{
a14bb7a6
ACM
492 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
493 if (cpu_map__all(evlist->cpus))
aece948f
ACM
494 evlist->nr_mmaps = evlist->threads->nr;
495 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
496 return evlist->mmap != NULL ? 0 : -ENOMEM;
497}
498
bccdaba0 499static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 500 int idx, int prot, int mask, int fd)
f8a95309 501{
aece948f
ACM
502 evlist->mmap[idx].prev = 0;
503 evlist->mmap[idx].mask = mask;
504 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 505 MAP_SHARED, fd, 0);
301b195d
NE
506 if (evlist->mmap[idx].base == MAP_FAILED) {
507 evlist->mmap[idx].base = NULL;
f8a95309 508 return -1;
301b195d 509 }
f8a95309
ACM
510
511 perf_evlist__add_pollfd(evlist, fd);
512 return 0;
513}
514
aece948f
ACM
515static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
516{
517 struct perf_evsel *evsel;
518 int cpu, thread;
519
520 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
521 int output = -1;
522
523 for (thread = 0; thread < evlist->threads->nr; thread++) {
524 list_for_each_entry(evsel, &evlist->entries, node) {
525 int fd = FD(evsel, cpu, thread);
526
527 if (output == -1) {
528 output = fd;
bccdaba0 529 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
530 prot, mask, output) < 0)
531 goto out_unmap;
532 } else {
533 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
534 goto out_unmap;
535 }
536
537 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
538 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
539 goto out_unmap;
540 }
541 }
542 }
543
544 return 0;
545
546out_unmap:
547 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
548 if (evlist->mmap[cpu].base != NULL) {
549 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
550 evlist->mmap[cpu].base = NULL;
551 }
552 }
553 return -1;
554}
555
556static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
557{
558 struct perf_evsel *evsel;
559 int thread;
560
561 for (thread = 0; thread < evlist->threads->nr; thread++) {
562 int output = -1;
563
564 list_for_each_entry(evsel, &evlist->entries, node) {
565 int fd = FD(evsel, 0, thread);
566
567 if (output == -1) {
568 output = fd;
bccdaba0 569 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
570 prot, mask, output) < 0)
571 goto out_unmap;
572 } else {
573 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
574 goto out_unmap;
575 }
576
577 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
578 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
579 goto out_unmap;
580 }
581 }
582
583 return 0;
584
585out_unmap:
586 for (thread = 0; thread < evlist->threads->nr; thread++) {
587 if (evlist->mmap[thread].base != NULL) {
588 munmap(evlist->mmap[thread].base, evlist->mmap_len);
589 evlist->mmap[thread].base = NULL;
590 }
591 }
592 return -1;
593}
594
f8a95309
ACM
595/** perf_evlist__mmap - Create per cpu maps to receive events
596 *
597 * @evlist - list of events
f8a95309
ACM
598 * @pages - map length in pages
599 * @overwrite - overwrite older events?
600 *
601 * If overwrite is false the user needs to signal event consuption using:
602 *
603 * struct perf_mmap *m = &evlist->mmap[cpu];
604 * unsigned int head = perf_mmap__read_head(m);
605 *
606 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
607 *
608 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 609 */
50a682ce
ACM
610int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
611 bool overwrite)
f8a95309
ACM
612{
613 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
aece948f 614 struct perf_evsel *evsel;
7e2ed097
ACM
615 const struct cpu_map *cpus = evlist->cpus;
616 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
617 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
618
619 /* 512 kiB: default amount of unprivileged mlocked memory */
620 if (pages == UINT_MAX)
621 pages = (512 * 1024) / page_size;
41d0d933
NE
622 else if (!is_power_of_2(pages))
623 return -EINVAL;
50a682ce
ACM
624
625 mask = pages * page_size - 1;
f8a95309 626
7e2ed097 627 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
628 return -ENOMEM;
629
7e2ed097 630 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
631 return -ENOMEM;
632
633 evlist->overwrite = overwrite;
634 evlist->mmap_len = (pages + 1) * page_size;
f8a95309
ACM
635
636 list_for_each_entry(evsel, &evlist->entries, node) {
637 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 638 evsel->sample_id == NULL &&
a14bb7a6 639 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 640 return -ENOMEM;
f8a95309
ACM
641 }
642
a14bb7a6 643 if (cpu_map__all(cpus))
aece948f 644 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 645
aece948f 646 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 647}
7e2ed097 648
b809ac10
NK
649int perf_evlist__create_maps(struct perf_evlist *evlist,
650 struct perf_target *target)
7e2ed097 651{
b809ac10
NK
652 evlist->threads = thread_map__new_str(target->pid, target->tid,
653 target->uid);
7e2ed097
ACM
654
655 if (evlist->threads == NULL)
656 return -1;
657
879d77d0 658 if (perf_target__has_task(target))
d67356e7 659 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
660 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
661 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
662 else
663 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
664
665 if (evlist->cpus == NULL)
666 goto out_delete_threads;
667
668 return 0;
669
670out_delete_threads:
671 thread_map__delete(evlist->threads);
672 return -1;
673}
674
675void perf_evlist__delete_maps(struct perf_evlist *evlist)
676{
677 cpu_map__delete(evlist->cpus);
678 thread_map__delete(evlist->threads);
679 evlist->cpus = NULL;
680 evlist->threads = NULL;
681}
0a102479 682
1491a632 683int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 684{
0a102479 685 struct perf_evsel *evsel;
745cefc5
ACM
686 int err = 0;
687 const int ncpus = cpu_map__nr(evlist->cpus),
688 nthreads = evlist->threads->nr;
0a102479
FW
689
690 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 691 if (evsel->filter == NULL)
0a102479 692 continue;
745cefc5
ACM
693
694 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
695 if (err)
696 break;
0a102479
FW
697 }
698
745cefc5
ACM
699 return err;
700}
701
702int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
703{
704 struct perf_evsel *evsel;
705 int err = 0;
706 const int ncpus = cpu_map__nr(evlist->cpus),
707 nthreads = evlist->threads->nr;
708
709 list_for_each_entry(evsel, &evlist->entries, node) {
710 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
711 if (err)
712 break;
713 }
714
715 return err;
0a102479 716}
74429964 717
0c21f736 718bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 719{
0c21f736 720 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
721
722 list_for_each_entry_continue(pos, &evlist->entries, node) {
723 if (first->attr.sample_type != pos->attr.sample_type)
724 return false;
74429964
FW
725 }
726
c2a70653 727 return true;
74429964
FW
728}
729
0c21f736 730u64 perf_evlist__sample_type(struct perf_evlist *evlist)
c2a70653 731{
0c21f736 732 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653
ACM
733 return first->attr.sample_type;
734}
735
0c21f736 736u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 737{
0c21f736 738 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
739 struct perf_sample *data;
740 u64 sample_type;
741 u16 size = 0;
742
81e36bff
ACM
743 if (!first->attr.sample_id_all)
744 goto out;
745
746 sample_type = first->attr.sample_type;
747
748 if (sample_type & PERF_SAMPLE_TID)
749 size += sizeof(data->tid) * 2;
750
751 if (sample_type & PERF_SAMPLE_TIME)
752 size += sizeof(data->time);
753
754 if (sample_type & PERF_SAMPLE_ID)
755 size += sizeof(data->id);
756
757 if (sample_type & PERF_SAMPLE_STREAM_ID)
758 size += sizeof(data->stream_id);
759
760 if (sample_type & PERF_SAMPLE_CPU)
761 size += sizeof(data->cpu) * 2;
762out:
763 return size;
764}
765
0c21f736 766bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 767{
0c21f736 768 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
769
770 list_for_each_entry_continue(pos, &evlist->entries, node) {
771 if (first->attr.sample_id_all != pos->attr.sample_id_all)
772 return false;
74429964
FW
773 }
774
c2a70653
ACM
775 return true;
776}
777
0c21f736 778bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 779{
0c21f736 780 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 781 return first->attr.sample_id_all;
74429964 782}
81cce8de
ACM
783
784void perf_evlist__set_selected(struct perf_evlist *evlist,
785 struct perf_evsel *evsel)
786{
787 evlist->selected = evsel;
788}
727ab04e 789
6a4bb04c 790int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 791{
6a4bb04c 792 struct perf_evsel *evsel;
727ab04e
ACM
793 int err, ncpus, nthreads;
794
727ab04e 795 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 796 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
797 if (err < 0)
798 goto out_err;
799 }
800
801 return 0;
802out_err:
803 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
804 nthreads = evlist->threads ? evlist->threads->nr : 1;
805
806 list_for_each_entry_reverse(evsel, &evlist->entries, node)
807 perf_evsel__close(evsel, ncpus, nthreads);
808
41c21a68 809 errno = -err;
727ab04e
ACM
810 return err;
811}
35b9d88e
ACM
812
813int perf_evlist__prepare_workload(struct perf_evlist *evlist,
814 struct perf_record_opts *opts,
815 const char *argv[])
816{
817 int child_ready_pipe[2], go_pipe[2];
818 char bf;
819
820 if (pipe(child_ready_pipe) < 0) {
821 perror("failed to create 'ready' pipe");
822 return -1;
823 }
824
825 if (pipe(go_pipe) < 0) {
826 perror("failed to create 'go' pipe");
827 goto out_close_ready_pipe;
828 }
829
830 evlist->workload.pid = fork();
831 if (evlist->workload.pid < 0) {
832 perror("failed to fork");
833 goto out_close_pipes;
834 }
835
836 if (!evlist->workload.pid) {
837 if (opts->pipe_output)
838 dup2(2, 1);
839
840 close(child_ready_pipe[0]);
841 close(go_pipe[1]);
842 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
843
844 /*
845 * Do a dummy execvp to get the PLT entry resolved,
846 * so we avoid the resolver overhead on the real
847 * execvp call.
848 */
849 execvp("", (char **)argv);
850
851 /*
852 * Tell the parent we're ready to go
853 */
854 close(child_ready_pipe[1]);
855
856 /*
857 * Wait until the parent tells us to go.
858 */
859 if (read(go_pipe[0], &bf, 1) == -1)
860 perror("unable to read pipe");
861
862 execvp(argv[0], (char **)argv);
863
864 perror(argv[0]);
865 kill(getppid(), SIGUSR1);
866 exit(-1);
867 }
868
d67356e7 869 if (perf_target__none(&opts->target))
35b9d88e
ACM
870 evlist->threads->map[0] = evlist->workload.pid;
871
872 close(child_ready_pipe[1]);
873 close(go_pipe[0]);
874 /*
875 * wait for child to settle
876 */
877 if (read(child_ready_pipe[0], &bf, 1) == -1) {
878 perror("unable to read pipe");
879 goto out_close_pipes;
880 }
881
882 evlist->workload.cork_fd = go_pipe[1];
883 close(child_ready_pipe[0]);
884 return 0;
885
886out_close_pipes:
887 close(go_pipe[0]);
888 close(go_pipe[1]);
889out_close_ready_pipe:
890 close(child_ready_pipe[0]);
891 close(child_ready_pipe[1]);
892 return -1;
893}
894
895int perf_evlist__start_workload(struct perf_evlist *evlist)
896{
897 if (evlist->workload.cork_fd > 0) {
898 /*
899 * Remove the cork, let it rip!
900 */
901 return close(evlist->workload.cork_fd);
902 }
903
904 return 0;
905}
cb0b29e0 906
a3f698fe 907int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 908 struct perf_sample *sample)
cb0b29e0 909{
0c21f736 910 struct perf_evsel *evsel = perf_evlist__first(evlist);
0807d2d8 911 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 912}
78f067b3
ACM
913
914size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
915{
916 struct perf_evsel *evsel;
917 size_t printed = 0;
918
919 list_for_each_entry(evsel, &evlist->entries, node) {
920 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
921 perf_evsel__name(evsel));
922 }
923
924 return printed + fprintf(fp, "\n");;
925}