2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include "parse-options.h"
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
28 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
31 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
32 struct thread_map
*threads
)
36 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
37 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
38 INIT_LIST_HEAD(&evlist
->entries
);
39 perf_evlist__set_maps(evlist
, cpus
, threads
);
40 evlist
->workload
.pid
= -1;
43 struct perf_evlist
*perf_evlist__new(void)
45 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
48 perf_evlist__init(evlist
, NULL
, NULL
);
53 struct perf_evlist
*perf_evlist__new_default(void)
55 struct perf_evlist
*evlist
= perf_evlist__new();
57 if (evlist
&& perf_evlist__add_default(evlist
)) {
58 perf_evlist__delete(evlist
);
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
72 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
)
74 struct perf_evsel
*first
= perf_evlist__first(evlist
);
76 evlist
->id_pos
= first
->id_pos
;
77 evlist
->is_pos
= first
->is_pos
;
80 static void perf_evlist__update_id_pos(struct perf_evlist
*evlist
)
82 struct perf_evsel
*evsel
;
84 list_for_each_entry(evsel
, &evlist
->entries
, node
)
85 perf_evsel__calc_id_pos(evsel
);
87 perf_evlist__set_id_pos(evlist
);
90 static void perf_evlist__purge(struct perf_evlist
*evlist
)
92 struct perf_evsel
*pos
, *n
;
94 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
95 list_del_init(&pos
->node
);
96 perf_evsel__delete(pos
);
99 evlist
->nr_entries
= 0;
102 void perf_evlist__exit(struct perf_evlist
*evlist
)
105 free(evlist
->pollfd
);
107 evlist
->pollfd
= NULL
;
110 void perf_evlist__delete(struct perf_evlist
*evlist
)
112 perf_evlist__purge(evlist
);
113 perf_evlist__exit(evlist
);
117 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
119 list_add_tail(&entry
->node
, &evlist
->entries
);
120 entry
->idx
= evlist
->nr_entries
;
122 if (!evlist
->nr_entries
++)
123 perf_evlist__set_id_pos(evlist
);
126 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
127 struct list_head
*list
,
130 bool set_id_pos
= !evlist
->nr_entries
;
132 list_splice_tail(list
, &evlist
->entries
);
133 evlist
->nr_entries
+= nr_entries
;
135 perf_evlist__set_id_pos(evlist
);
138 void __perf_evlist__set_leader(struct list_head
*list
)
140 struct perf_evsel
*evsel
, *leader
;
142 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
143 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
145 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
147 list_for_each_entry(evsel
, list
, node
) {
148 evsel
->leader
= leader
;
152 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
154 if (evlist
->nr_entries
) {
155 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
156 __perf_evlist__set_leader(&evlist
->entries
);
160 int perf_evlist__add_default(struct perf_evlist
*evlist
)
162 struct perf_event_attr attr
= {
163 .type
= PERF_TYPE_HARDWARE
,
164 .config
= PERF_COUNT_HW_CPU_CYCLES
,
166 struct perf_evsel
*evsel
;
168 event_attr_init(&attr
);
170 evsel
= perf_evsel__new(&attr
);
174 /* use strdup() because free(evsel) assumes name is allocated */
175 evsel
->name
= strdup("cycles");
179 perf_evlist__add(evlist
, evsel
);
182 perf_evsel__delete(evsel
);
187 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
188 struct perf_event_attr
*attrs
, size_t nr_attrs
)
190 struct perf_evsel
*evsel
, *n
;
194 for (i
= 0; i
< nr_attrs
; i
++) {
195 evsel
= perf_evsel__new_idx(attrs
+ i
, evlist
->nr_entries
+ i
);
197 goto out_delete_partial_list
;
198 list_add_tail(&evsel
->node
, &head
);
201 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
205 out_delete_partial_list
:
206 list_for_each_entry_safe(evsel
, n
, &head
, node
)
207 perf_evsel__delete(evsel
);
211 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
212 struct perf_event_attr
*attrs
, size_t nr_attrs
)
216 for (i
= 0; i
< nr_attrs
; i
++)
217 event_attr_init(attrs
+ i
);
219 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
223 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
225 struct perf_evsel
*evsel
;
227 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
228 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
229 (int)evsel
->attr
.config
== id
)
237 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
240 struct perf_evsel
*evsel
;
242 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
243 if ((evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) &&
244 (strcmp(evsel
->name
, name
) == 0))
251 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
252 const char *sys
, const char *name
, void *handler
)
254 struct perf_evsel
*evsel
= perf_evsel__newtp(sys
, name
);
259 evsel
->handler
= handler
;
260 perf_evlist__add(evlist
, evsel
);
264 void perf_evlist__disable(struct perf_evlist
*evlist
)
267 struct perf_evsel
*pos
;
268 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
269 int nr_threads
= thread_map__nr(evlist
->threads
);
271 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
272 list_for_each_entry(pos
, &evlist
->entries
, node
) {
273 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
275 for (thread
= 0; thread
< nr_threads
; thread
++)
276 ioctl(FD(pos
, cpu
, thread
),
277 PERF_EVENT_IOC_DISABLE
, 0);
282 void perf_evlist__enable(struct perf_evlist
*evlist
)
285 struct perf_evsel
*pos
;
286 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
287 int nr_threads
= thread_map__nr(evlist
->threads
);
289 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
290 list_for_each_entry(pos
, &evlist
->entries
, node
) {
291 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
293 for (thread
= 0; thread
< nr_threads
; thread
++)
294 ioctl(FD(pos
, cpu
, thread
),
295 PERF_EVENT_IOC_ENABLE
, 0);
300 int perf_evlist__disable_event(struct perf_evlist
*evlist
,
301 struct perf_evsel
*evsel
)
303 int cpu
, thread
, err
;
308 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
309 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
310 err
= ioctl(FD(evsel
, cpu
, thread
),
311 PERF_EVENT_IOC_DISABLE
, 0);
319 int perf_evlist__enable_event(struct perf_evlist
*evlist
,
320 struct perf_evsel
*evsel
)
322 int cpu
, thread
, err
;
327 for (cpu
= 0; cpu
< evlist
->cpus
->nr
; cpu
++) {
328 for (thread
= 0; thread
< evlist
->threads
->nr
; thread
++) {
329 err
= ioctl(FD(evsel
, cpu
, thread
),
330 PERF_EVENT_IOC_ENABLE
, 0);
338 static int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
340 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
341 int nr_threads
= thread_map__nr(evlist
->threads
);
342 int nfds
= nr_cpus
* nr_threads
* evlist
->nr_entries
;
343 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
344 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
347 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
349 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
350 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
351 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
355 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
356 struct perf_evsel
*evsel
,
357 int cpu
, int thread
, u64 id
)
360 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
364 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
365 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
368 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
369 int cpu
, int thread
, u64 id
)
371 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
372 evsel
->id
[evsel
->ids
++] = id
;
375 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
376 struct perf_evsel
*evsel
,
377 int cpu
, int thread
, int fd
)
379 u64 read_data
[4] = { 0, };
380 int id_idx
= 1; /* The first entry is the counter value */
384 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
391 /* Legacy way to get event id.. All hail to old kernels! */
394 * This way does not work with group format read, so bail
397 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
400 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
401 read(fd
, &read_data
, sizeof(read_data
)) == -1)
404 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
406 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
409 id
= read_data
[id_idx
];
412 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
416 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
418 struct hlist_head
*head
;
419 struct perf_sample_id
*sid
;
422 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
423 head
= &evlist
->heads
[hash
];
425 hlist_for_each_entry(sid
, head
, node
)
432 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
434 struct perf_sample_id
*sid
;
436 if (evlist
->nr_entries
== 1)
437 return perf_evlist__first(evlist
);
439 sid
= perf_evlist__id2sid(evlist
, id
);
443 if (!perf_evlist__sample_id_all(evlist
))
444 return perf_evlist__first(evlist
);
449 static int perf_evlist__event2id(struct perf_evlist
*evlist
,
450 union perf_event
*event
, u64
*id
)
452 const u64
*array
= event
->sample
.array
;
455 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
457 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
458 if (evlist
->id_pos
>= n
)
460 *id
= array
[evlist
->id_pos
];
462 if (evlist
->is_pos
> n
)
470 static struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
471 union perf_event
*event
)
473 struct perf_evsel
*first
= perf_evlist__first(evlist
);
474 struct hlist_head
*head
;
475 struct perf_sample_id
*sid
;
479 if (evlist
->nr_entries
== 1)
482 if (!first
->attr
.sample_id_all
&&
483 event
->header
.type
!= PERF_RECORD_SAMPLE
)
486 if (perf_evlist__event2id(evlist
, event
, &id
))
489 /* Synthesized events have an id of zero */
493 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
494 head
= &evlist
->heads
[hash
];
496 hlist_for_each_entry(sid
, head
, node
) {
503 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
505 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
506 unsigned int head
= perf_mmap__read_head(md
);
507 unsigned int old
= md
->prev
;
508 unsigned char *data
= md
->base
+ page_size
;
509 union perf_event
*event
= NULL
;
511 if (evlist
->overwrite
) {
513 * If we're further behind than half the buffer, there's a chance
514 * the writer will bite our tail and mess up the samples under us.
516 * If we somehow ended up ahead of the head, we got messed up.
518 * In either case, truncate and restart at head.
520 int diff
= head
- old
;
521 if (diff
> md
->mask
/ 2 || diff
< 0) {
522 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
525 * head points to a known good entry, start there.
534 event
= (union perf_event
*)&data
[old
& md
->mask
];
535 size
= event
->header
.size
;
538 * Event straddles the mmap boundary -- header should always
539 * be inside due to u64 alignment of output.
541 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
542 unsigned int offset
= old
;
543 unsigned int len
= min(sizeof(*event
), size
), cpy
;
544 void *dst
= md
->event_copy
;
547 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
548 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
554 event
= (union perf_event
*) md
->event_copy
;
565 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
)
567 if (!evlist
->overwrite
) {
568 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
569 unsigned int old
= md
->prev
;
571 perf_mmap__write_tail(md
, old
);
575 static void __perf_evlist__munmap(struct perf_evlist
*evlist
, int idx
)
577 if (evlist
->mmap
[idx
].base
!= NULL
) {
578 munmap(evlist
->mmap
[idx
].base
, evlist
->mmap_len
);
579 evlist
->mmap
[idx
].base
= NULL
;
583 void perf_evlist__munmap(struct perf_evlist
*evlist
)
587 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
588 __perf_evlist__munmap(evlist
, i
);
594 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
596 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
597 if (cpu_map__empty(evlist
->cpus
))
598 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
599 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
600 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
603 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
604 int idx
, int prot
, int mask
, int fd
)
606 evlist
->mmap
[idx
].prev
= 0;
607 evlist
->mmap
[idx
].mask
= mask
;
608 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
610 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
611 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
613 evlist
->mmap
[idx
].base
= NULL
;
617 perf_evlist__add_pollfd(evlist
, fd
);
621 static int perf_evlist__mmap_per_evsel(struct perf_evlist
*evlist
, int idx
,
622 int prot
, int mask
, int cpu
, int thread
,
625 struct perf_evsel
*evsel
;
627 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
628 int fd
= FD(evsel
, cpu
, thread
);
632 if (__perf_evlist__mmap(evlist
, idx
, prot
, mask
,
636 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, *output
) != 0)
640 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
641 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
648 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
,
652 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
653 int nr_threads
= thread_map__nr(evlist
->threads
);
655 pr_debug2("perf event ring buffer mmapped per cpu\n");
656 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
659 for (thread
= 0; thread
< nr_threads
; thread
++) {
660 if (perf_evlist__mmap_per_evsel(evlist
, cpu
, prot
, mask
,
661 cpu
, thread
, &output
))
669 for (cpu
= 0; cpu
< nr_cpus
; cpu
++)
670 __perf_evlist__munmap(evlist
, cpu
);
674 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
,
678 int nr_threads
= thread_map__nr(evlist
->threads
);
680 pr_debug2("perf event ring buffer mmapped per thread\n");
681 for (thread
= 0; thread
< nr_threads
; thread
++) {
684 if (perf_evlist__mmap_per_evsel(evlist
, thread
, prot
, mask
, 0,
692 for (thread
= 0; thread
< nr_threads
; thread
++)
693 __perf_evlist__munmap(evlist
, thread
);
697 static size_t perf_evlist__mmap_size(unsigned long pages
)
699 /* 512 kiB: default amount of unprivileged mlocked memory */
700 if (pages
== UINT_MAX
)
701 pages
= (512 * 1024) / page_size
;
702 else if (!is_power_of_2(pages
))
705 return (pages
+ 1) * page_size
;
708 static long parse_pages_arg(const char *str
, unsigned long min
,
711 unsigned long pages
, val
;
712 static struct parse_tag tags
[] = {
713 { .tag
= 'B', .mult
= 1 },
714 { .tag
= 'K', .mult
= 1 << 10 },
715 { .tag
= 'M', .mult
= 1 << 20 },
716 { .tag
= 'G', .mult
= 1 << 30 },
723 val
= parse_tag_value(str
, tags
);
724 if (val
!= (unsigned long) -1) {
725 /* we got file size value */
726 pages
= PERF_ALIGN(val
, page_size
) / page_size
;
728 /* we got pages count value */
730 pages
= strtoul(str
, &eptr
, 10);
735 if ((pages
== 0) && (min
== 0)) {
736 /* leave number of pages at 0 */
737 } else if (pages
< (1UL << 31) && !is_power_of_2(pages
)) {
738 /* round pages up to next power of 2 */
739 pages
= next_pow2(pages
);
740 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
741 pages
* page_size
, pages
);
750 int perf_evlist__parse_mmap_pages(const struct option
*opt
, const char *str
,
751 int unset __maybe_unused
)
753 unsigned int *mmap_pages
= opt
->value
;
754 unsigned long max
= UINT_MAX
;
757 if (max
< SIZE_MAX
/ page_size
)
758 max
= SIZE_MAX
/ page_size
;
760 pages
= parse_pages_arg(str
, 1, max
);
762 pr_err("Invalid argument for --mmap_pages/-m\n");
771 * perf_evlist__mmap - Create mmaps to receive events.
772 * @evlist: list of events
773 * @pages: map length in pages
774 * @overwrite: overwrite older events?
776 * If @overwrite is %false the user needs to signal event consumption using
777 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
780 * Return: %0 on success, negative error code otherwise.
782 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
785 struct perf_evsel
*evsel
;
786 const struct cpu_map
*cpus
= evlist
->cpus
;
787 const struct thread_map
*threads
= evlist
->threads
;
788 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
), mask
;
790 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
793 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
796 evlist
->overwrite
= overwrite
;
797 evlist
->mmap_len
= perf_evlist__mmap_size(pages
);
798 pr_debug("mmap size %zuB\n", evlist
->mmap_len
);
799 mask
= evlist
->mmap_len
- page_size
- 1;
801 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
802 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
803 evsel
->sample_id
== NULL
&&
804 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
808 if (cpu_map__empty(cpus
))
809 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
811 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
814 int perf_evlist__create_maps(struct perf_evlist
*evlist
, struct target
*target
)
816 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
819 if (evlist
->threads
== NULL
)
822 if (target
->force_per_cpu
)
823 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
824 else if (target__has_task(target
))
825 evlist
->cpus
= cpu_map__dummy_new();
826 else if (!target__has_cpu(target
) && !target
->uses_mmap
)
827 evlist
->cpus
= cpu_map__dummy_new();
829 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
831 if (evlist
->cpus
== NULL
)
832 goto out_delete_threads
;
837 thread_map__delete(evlist
->threads
);
841 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
843 cpu_map__delete(evlist
->cpus
);
844 thread_map__delete(evlist
->threads
);
846 evlist
->threads
= NULL
;
849 int perf_evlist__apply_filters(struct perf_evlist
*evlist
)
851 struct perf_evsel
*evsel
;
853 const int ncpus
= cpu_map__nr(evlist
->cpus
),
854 nthreads
= thread_map__nr(evlist
->threads
);
856 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
857 if (evsel
->filter
== NULL
)
860 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
868 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
870 struct perf_evsel
*evsel
;
872 const int ncpus
= cpu_map__nr(evlist
->cpus
),
873 nthreads
= thread_map__nr(evlist
->threads
);
875 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
876 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
884 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
886 struct perf_evsel
*pos
;
888 if (evlist
->nr_entries
== 1)
891 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
894 list_for_each_entry(pos
, &evlist
->entries
, node
) {
895 if (pos
->id_pos
!= evlist
->id_pos
||
896 pos
->is_pos
!= evlist
->is_pos
)
903 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
905 struct perf_evsel
*evsel
;
907 if (evlist
->combined_sample_type
)
908 return evlist
->combined_sample_type
;
910 list_for_each_entry(evsel
, &evlist
->entries
, node
)
911 evlist
->combined_sample_type
|= evsel
->attr
.sample_type
;
913 return evlist
->combined_sample_type
;
916 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
918 evlist
->combined_sample_type
= 0;
919 return __perf_evlist__combined_sample_type(evlist
);
922 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
924 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
925 u64 read_format
= first
->attr
.read_format
;
926 u64 sample_type
= first
->attr
.sample_type
;
928 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
929 if (read_format
!= pos
->attr
.read_format
)
933 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
934 if ((sample_type
& PERF_SAMPLE_READ
) &&
935 !(read_format
& PERF_FORMAT_ID
)) {
942 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
944 struct perf_evsel
*first
= perf_evlist__first(evlist
);
945 return first
->attr
.read_format
;
948 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
950 struct perf_evsel
*first
= perf_evlist__first(evlist
);
951 struct perf_sample
*data
;
955 if (!first
->attr
.sample_id_all
)
958 sample_type
= first
->attr
.sample_type
;
960 if (sample_type
& PERF_SAMPLE_TID
)
961 size
+= sizeof(data
->tid
) * 2;
963 if (sample_type
& PERF_SAMPLE_TIME
)
964 size
+= sizeof(data
->time
);
966 if (sample_type
& PERF_SAMPLE_ID
)
967 size
+= sizeof(data
->id
);
969 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
970 size
+= sizeof(data
->stream_id
);
972 if (sample_type
& PERF_SAMPLE_CPU
)
973 size
+= sizeof(data
->cpu
) * 2;
975 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
976 size
+= sizeof(data
->id
);
981 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
983 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
985 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
986 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
993 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
995 struct perf_evsel
*first
= perf_evlist__first(evlist
);
996 return first
->attr
.sample_id_all
;
999 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
1000 struct perf_evsel
*evsel
)
1002 evlist
->selected
= evsel
;
1005 void perf_evlist__close(struct perf_evlist
*evlist
)
1007 struct perf_evsel
*evsel
;
1008 int ncpus
= cpu_map__nr(evlist
->cpus
);
1009 int nthreads
= thread_map__nr(evlist
->threads
);
1011 list_for_each_entry_reverse(evsel
, &evlist
->entries
, node
)
1012 perf_evsel__close(evsel
, ncpus
, nthreads
);
1015 int perf_evlist__open(struct perf_evlist
*evlist
)
1017 struct perf_evsel
*evsel
;
1020 perf_evlist__update_id_pos(evlist
);
1022 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
1023 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
1030 perf_evlist__close(evlist
);
1035 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
, struct target
*target
,
1036 const char *argv
[], bool pipe_output
,
1039 int child_ready_pipe
[2], go_pipe
[2];
1042 if (pipe(child_ready_pipe
) < 0) {
1043 perror("failed to create 'ready' pipe");
1047 if (pipe(go_pipe
) < 0) {
1048 perror("failed to create 'go' pipe");
1049 goto out_close_ready_pipe
;
1052 evlist
->workload
.pid
= fork();
1053 if (evlist
->workload
.pid
< 0) {
1054 perror("failed to fork");
1055 goto out_close_pipes
;
1058 if (!evlist
->workload
.pid
) {
1062 signal(SIGTERM
, SIG_DFL
);
1064 close(child_ready_pipe
[0]);
1066 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
1069 * Tell the parent we're ready to go
1071 close(child_ready_pipe
[1]);
1074 * Wait until the parent tells us to go.
1076 if (read(go_pipe
[0], &bf
, 1) == -1)
1077 perror("unable to read pipe");
1079 execvp(argv
[0], (char **)argv
);
1083 kill(getppid(), SIGUSR1
);
1087 if (target__none(target
))
1088 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
1090 close(child_ready_pipe
[1]);
1093 * wait for child to settle
1095 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1096 perror("unable to read pipe");
1097 goto out_close_pipes
;
1100 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1101 evlist
->workload
.cork_fd
= go_pipe
[1];
1102 close(child_ready_pipe
[0]);
1108 out_close_ready_pipe
:
1109 close(child_ready_pipe
[0]);
1110 close(child_ready_pipe
[1]);
1114 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
1116 if (evlist
->workload
.cork_fd
> 0) {
1120 * Remove the cork, let it rip!
1122 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1124 perror("enable to write to pipe");
1126 close(evlist
->workload
.cork_fd
);
1133 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
1134 struct perf_sample
*sample
)
1136 struct perf_evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1140 return perf_evsel__parse_sample(evsel
, event
, sample
);
1143 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
1145 struct perf_evsel
*evsel
;
1148 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
1149 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
1150 perf_evsel__name(evsel
));
1153 return printed
+ fprintf(fp
, "\n");
1156 int perf_evlist__strerror_tp(struct perf_evlist
*evlist __maybe_unused
,
1157 int err
, char *buf
, size_t size
)
1163 scnprintf(buf
, size
, "%s",
1164 "Error:\tUnable to find debugfs\n"
1165 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1166 "Hint:\tIs the debugfs filesystem mounted?\n"
1167 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1170 scnprintf(buf
, size
,
1171 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1172 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1173 debugfs_mountpoint
, debugfs_mountpoint
);
1176 scnprintf(buf
, size
, "%s", strerror_r(err
, sbuf
, sizeof(sbuf
)));
1183 int perf_evlist__strerror_open(struct perf_evlist
*evlist __maybe_unused
,
1184 int err
, char *buf
, size_t size
)
1187 char sbuf
[128], *emsg
= strerror_r(err
, sbuf
, sizeof(sbuf
));
1192 printed
= scnprintf(buf
, size
,
1194 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg
);
1196 if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value
))
1199 printed
+= scnprintf(buf
+ printed
, size
- printed
, "\nHint:\t");
1202 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1203 "For your workloads it needs to be <= 1\nHint:\t");
1205 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1206 "For system wide tracing it needs to be set to -1");
1208 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1209 ".\nHint:\tThe current value is %d.", value
);
1212 scnprintf(buf
, size
, "%s", emsg
);