2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
15 #include "thread_map.h"
25 #include "parse-events.h"
26 #include <subcmd/parse-options.h>
28 #include <sys/ioctl.h>
31 #include <linux/bitops.h>
32 #include <linux/hash.h>
33 #include <linux/log2.h>
34 #include <linux/err.h>
36 static void perf_mmap__munmap(struct perf_mmap
*map
);
37 static void perf_mmap__put(struct perf_mmap
*map
);
39 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
40 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
42 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
43 struct thread_map
*threads
)
47 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
48 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
49 INIT_LIST_HEAD(&evlist
->entries
);
50 perf_evlist__set_maps(evlist
, cpus
, threads
);
51 fdarray__init(&evlist
->pollfd
, 64);
52 evlist
->workload
.pid
= -1;
53 evlist
->bkw_mmap_state
= BKW_MMAP_NOTREADY
;
56 struct perf_evlist
*perf_evlist__new(void)
58 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
61 perf_evlist__init(evlist
, NULL
, NULL
);
66 struct perf_evlist
*perf_evlist__new_default(void)
68 struct perf_evlist
*evlist
= perf_evlist__new();
70 if (evlist
&& perf_evlist__add_default(evlist
)) {
71 perf_evlist__delete(evlist
);
78 struct perf_evlist
*perf_evlist__new_dummy(void)
80 struct perf_evlist
*evlist
= perf_evlist__new();
82 if (evlist
&& perf_evlist__add_dummy(evlist
)) {
83 perf_evlist__delete(evlist
);
91 * perf_evlist__set_id_pos - set the positions of event ids.
92 * @evlist: selected event list
94 * Events with compatible sample types all have the same id_pos
95 * and is_pos. For convenience, put a copy on evlist.
97 void perf_evlist__set_id_pos(struct perf_evlist
*evlist
)
99 struct perf_evsel
*first
= perf_evlist__first(evlist
);
101 evlist
->id_pos
= first
->id_pos
;
102 evlist
->is_pos
= first
->is_pos
;
105 static void perf_evlist__update_id_pos(struct perf_evlist
*evlist
)
107 struct perf_evsel
*evsel
;
109 evlist__for_each_entry(evlist
, evsel
)
110 perf_evsel__calc_id_pos(evsel
);
112 perf_evlist__set_id_pos(evlist
);
115 static void perf_evlist__purge(struct perf_evlist
*evlist
)
117 struct perf_evsel
*pos
, *n
;
119 evlist__for_each_entry_safe(evlist
, n
, pos
) {
120 list_del_init(&pos
->node
);
122 perf_evsel__delete(pos
);
125 evlist
->nr_entries
= 0;
128 void perf_evlist__exit(struct perf_evlist
*evlist
)
130 zfree(&evlist
->mmap
);
131 zfree(&evlist
->backward_mmap
);
132 fdarray__exit(&evlist
->pollfd
);
135 void perf_evlist__delete(struct perf_evlist
*evlist
)
140 perf_evlist__munmap(evlist
);
141 perf_evlist__close(evlist
);
142 cpu_map__put(evlist
->cpus
);
143 thread_map__put(evlist
->threads
);
145 evlist
->threads
= NULL
;
146 perf_evlist__purge(evlist
);
147 perf_evlist__exit(evlist
);
151 static void __perf_evlist__propagate_maps(struct perf_evlist
*evlist
,
152 struct perf_evsel
*evsel
)
155 * We already have cpus for evsel (via PMU sysfs) so
156 * keep it, if there's no target cpu list defined.
158 if (!evsel
->own_cpus
|| evlist
->has_user_cpus
) {
159 cpu_map__put(evsel
->cpus
);
160 evsel
->cpus
= cpu_map__get(evlist
->cpus
);
161 } else if (evsel
->cpus
!= evsel
->own_cpus
) {
162 cpu_map__put(evsel
->cpus
);
163 evsel
->cpus
= cpu_map__get(evsel
->own_cpus
);
166 thread_map__put(evsel
->threads
);
167 evsel
->threads
= thread_map__get(evlist
->threads
);
170 static void perf_evlist__propagate_maps(struct perf_evlist
*evlist
)
172 struct perf_evsel
*evsel
;
174 evlist__for_each_entry(evlist
, evsel
)
175 __perf_evlist__propagate_maps(evlist
, evsel
);
178 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
180 entry
->evlist
= evlist
;
181 list_add_tail(&entry
->node
, &evlist
->entries
);
182 entry
->idx
= evlist
->nr_entries
;
183 entry
->tracking
= !entry
->idx
;
185 if (!evlist
->nr_entries
++)
186 perf_evlist__set_id_pos(evlist
);
188 __perf_evlist__propagate_maps(evlist
, entry
);
191 void perf_evlist__remove(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
)
193 evsel
->evlist
= NULL
;
194 list_del_init(&evsel
->node
);
195 evlist
->nr_entries
-= 1;
198 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
199 struct list_head
*list
)
201 struct perf_evsel
*evsel
, *temp
;
203 __evlist__for_each_entry_safe(list
, temp
, evsel
) {
204 list_del_init(&evsel
->node
);
205 perf_evlist__add(evlist
, evsel
);
209 void __perf_evlist__set_leader(struct list_head
*list
)
211 struct perf_evsel
*evsel
, *leader
;
213 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
214 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
216 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
218 __evlist__for_each_entry(list
, evsel
) {
219 evsel
->leader
= leader
;
223 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
225 if (evlist
->nr_entries
) {
226 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
227 __perf_evlist__set_leader(&evlist
->entries
);
231 void perf_event_attr__set_max_precise_ip(struct perf_event_attr
*attr
)
233 attr
->precise_ip
= 3;
235 while (attr
->precise_ip
!= 0) {
236 int fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
245 int perf_evlist__add_default(struct perf_evlist
*evlist
)
247 struct perf_evsel
*evsel
= perf_evsel__new_cycles();
252 perf_evlist__add(evlist
, evsel
);
256 int perf_evlist__add_dummy(struct perf_evlist
*evlist
)
258 struct perf_event_attr attr
= {
259 .type
= PERF_TYPE_SOFTWARE
,
260 .config
= PERF_COUNT_SW_DUMMY
,
261 .size
= sizeof(attr
), /* to capture ABI version */
263 struct perf_evsel
*evsel
= perf_evsel__new(&attr
);
268 perf_evlist__add(evlist
, evsel
);
272 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
273 struct perf_event_attr
*attrs
, size_t nr_attrs
)
275 struct perf_evsel
*evsel
, *n
;
279 for (i
= 0; i
< nr_attrs
; i
++) {
280 evsel
= perf_evsel__new_idx(attrs
+ i
, evlist
->nr_entries
+ i
);
282 goto out_delete_partial_list
;
283 list_add_tail(&evsel
->node
, &head
);
286 perf_evlist__splice_list_tail(evlist
, &head
);
290 out_delete_partial_list
:
291 __evlist__for_each_entry_safe(&head
, n
, evsel
)
292 perf_evsel__delete(evsel
);
296 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
297 struct perf_event_attr
*attrs
, size_t nr_attrs
)
301 for (i
= 0; i
< nr_attrs
; i
++)
302 event_attr_init(attrs
+ i
);
304 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
308 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
310 struct perf_evsel
*evsel
;
312 evlist__for_each_entry(evlist
, evsel
) {
313 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
314 (int)evsel
->attr
.config
== id
)
322 perf_evlist__find_tracepoint_by_name(struct perf_evlist
*evlist
,
325 struct perf_evsel
*evsel
;
327 evlist__for_each_entry(evlist
, evsel
) {
328 if ((evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) &&
329 (strcmp(evsel
->name
, name
) == 0))
336 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
337 const char *sys
, const char *name
, void *handler
)
339 struct perf_evsel
*evsel
= perf_evsel__newtp(sys
, name
);
344 evsel
->handler
= handler
;
345 perf_evlist__add(evlist
, evsel
);
349 static int perf_evlist__nr_threads(struct perf_evlist
*evlist
,
350 struct perf_evsel
*evsel
)
352 if (evsel
->system_wide
)
355 return thread_map__nr(evlist
->threads
);
358 void perf_evlist__disable(struct perf_evlist
*evlist
)
360 struct perf_evsel
*pos
;
362 evlist__for_each_entry(evlist
, pos
) {
363 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
365 perf_evsel__disable(pos
);
368 evlist
->enabled
= false;
371 void perf_evlist__enable(struct perf_evlist
*evlist
)
373 struct perf_evsel
*pos
;
375 evlist__for_each_entry(evlist
, pos
) {
376 if (!perf_evsel__is_group_leader(pos
) || !pos
->fd
)
378 perf_evsel__enable(pos
);
381 evlist
->enabled
= true;
384 void perf_evlist__toggle_enable(struct perf_evlist
*evlist
)
386 (evlist
->enabled
? perf_evlist__disable
: perf_evlist__enable
)(evlist
);
389 static int perf_evlist__enable_event_cpu(struct perf_evlist
*evlist
,
390 struct perf_evsel
*evsel
, int cpu
)
393 int nr_threads
= perf_evlist__nr_threads(evlist
, evsel
);
398 for (thread
= 0; thread
< nr_threads
; thread
++) {
399 int err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
406 static int perf_evlist__enable_event_thread(struct perf_evlist
*evlist
,
407 struct perf_evsel
*evsel
,
411 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
416 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
417 int err
= ioctl(FD(evsel
, cpu
, thread
), PERF_EVENT_IOC_ENABLE
, 0);
424 int perf_evlist__enable_event_idx(struct perf_evlist
*evlist
,
425 struct perf_evsel
*evsel
, int idx
)
427 bool per_cpu_mmaps
= !cpu_map__empty(evlist
->cpus
);
430 return perf_evlist__enable_event_cpu(evlist
, evsel
, idx
);
432 return perf_evlist__enable_event_thread(evlist
, evsel
, idx
);
435 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
437 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
438 int nr_threads
= thread_map__nr(evlist
->threads
);
440 struct perf_evsel
*evsel
;
442 evlist__for_each_entry(evlist
, evsel
) {
443 if (evsel
->system_wide
)
446 nfds
+= nr_cpus
* nr_threads
;
449 if (fdarray__available_entries(&evlist
->pollfd
) < nfds
&&
450 fdarray__grow(&evlist
->pollfd
, nfds
) < 0)
456 static int __perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
,
457 struct perf_mmap
*map
, short revent
)
459 int pos
= fdarray__add(&evlist
->pollfd
, fd
, revent
| POLLERR
| POLLHUP
);
461 * Save the idx so that when we filter out fds POLLHUP'ed we can
462 * close the associated evlist->mmap[] entry.
465 evlist
->pollfd
.priv
[pos
].ptr
= map
;
467 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
473 int perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
475 return __perf_evlist__add_pollfd(evlist
, fd
, NULL
, POLLIN
);
478 static void perf_evlist__munmap_filtered(struct fdarray
*fda
, int fd
,
479 void *arg __maybe_unused
)
481 struct perf_mmap
*map
= fda
->priv
[fd
].ptr
;
487 int perf_evlist__filter_pollfd(struct perf_evlist
*evlist
, short revents_and_mask
)
489 return fdarray__filter(&evlist
->pollfd
, revents_and_mask
,
490 perf_evlist__munmap_filtered
, NULL
);
493 int perf_evlist__poll(struct perf_evlist
*evlist
, int timeout
)
495 return fdarray__poll(&evlist
->pollfd
, timeout
);
498 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
499 struct perf_evsel
*evsel
,
500 int cpu
, int thread
, u64 id
)
503 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
507 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
508 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
511 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
512 int cpu
, int thread
, u64 id
)
514 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
515 evsel
->id
[evsel
->ids
++] = id
;
518 int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
519 struct perf_evsel
*evsel
,
520 int cpu
, int thread
, int fd
)
522 u64 read_data
[4] = { 0, };
523 int id_idx
= 1; /* The first entry is the counter value */
527 ret
= ioctl(fd
, PERF_EVENT_IOC_ID
, &id
);
534 /* Legacy way to get event id.. All hail to old kernels! */
537 * This way does not work with group format read, so bail
540 if (perf_evlist__read_format(evlist
) & PERF_FORMAT_GROUP
)
543 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
544 read(fd
, &read_data
, sizeof(read_data
)) == -1)
547 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
549 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
552 id
= read_data
[id_idx
];
555 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, id
);
559 static void perf_evlist__set_sid_idx(struct perf_evlist
*evlist
,
560 struct perf_evsel
*evsel
, int idx
, int cpu
,
563 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
565 if (evlist
->cpus
&& cpu
>= 0)
566 sid
->cpu
= evlist
->cpus
->map
[cpu
];
569 if (!evsel
->system_wide
&& evlist
->threads
&& thread
>= 0)
570 sid
->tid
= thread_map__pid(evlist
->threads
, thread
);
575 struct perf_sample_id
*perf_evlist__id2sid(struct perf_evlist
*evlist
, u64 id
)
577 struct hlist_head
*head
;
578 struct perf_sample_id
*sid
;
581 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
582 head
= &evlist
->heads
[hash
];
584 hlist_for_each_entry(sid
, head
, node
)
591 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
593 struct perf_sample_id
*sid
;
595 if (evlist
->nr_entries
== 1 || !id
)
596 return perf_evlist__first(evlist
);
598 sid
= perf_evlist__id2sid(evlist
, id
);
602 if (!perf_evlist__sample_id_all(evlist
))
603 return perf_evlist__first(evlist
);
608 struct perf_evsel
*perf_evlist__id2evsel_strict(struct perf_evlist
*evlist
,
611 struct perf_sample_id
*sid
;
616 sid
= perf_evlist__id2sid(evlist
, id
);
623 static int perf_evlist__event2id(struct perf_evlist
*evlist
,
624 union perf_event
*event
, u64
*id
)
626 const u64
*array
= event
->sample
.array
;
629 n
= (event
->header
.size
- sizeof(event
->header
)) >> 3;
631 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
632 if (evlist
->id_pos
>= n
)
634 *id
= array
[evlist
->id_pos
];
636 if (evlist
->is_pos
> n
)
644 struct perf_evsel
*perf_evlist__event2evsel(struct perf_evlist
*evlist
,
645 union perf_event
*event
)
647 struct perf_evsel
*first
= perf_evlist__first(evlist
);
648 struct hlist_head
*head
;
649 struct perf_sample_id
*sid
;
653 if (evlist
->nr_entries
== 1)
656 if (!first
->attr
.sample_id_all
&&
657 event
->header
.type
!= PERF_RECORD_SAMPLE
)
660 if (perf_evlist__event2id(evlist
, event
, &id
))
663 /* Synthesized events have an id of zero */
667 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
668 head
= &evlist
->heads
[hash
];
670 hlist_for_each_entry(sid
, head
, node
) {
677 static int perf_evlist__set_paused(struct perf_evlist
*evlist
, bool value
)
681 if (!evlist
->backward_mmap
)
684 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
685 int fd
= evlist
->backward_mmap
[i
].fd
;
690 err
= ioctl(fd
, PERF_EVENT_IOC_PAUSE_OUTPUT
, value
? 1 : 0);
697 static int perf_evlist__pause(struct perf_evlist
*evlist
)
699 return perf_evlist__set_paused(evlist
, true);
702 static int perf_evlist__resume(struct perf_evlist
*evlist
)
704 return perf_evlist__set_paused(evlist
, false);
707 /* When check_messup is true, 'end' must points to a good entry */
708 static union perf_event
*
709 perf_mmap__read(struct perf_mmap
*md
, bool check_messup
, u64 start
,
712 unsigned char *data
= md
->base
+ page_size
;
713 union perf_event
*event
= NULL
;
714 int diff
= end
- start
;
718 * If we're further behind than half the buffer, there's a chance
719 * the writer will bite our tail and mess up the samples under us.
721 * If we somehow ended up ahead of the 'end', we got messed up.
723 * In either case, truncate and restart at 'end'.
725 if (diff
> md
->mask
/ 2 || diff
< 0) {
726 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
729 * 'end' points to a known good entry, start there.
736 if (diff
>= (int)sizeof(event
->header
)) {
739 event
= (union perf_event
*)&data
[start
& md
->mask
];
740 size
= event
->header
.size
;
742 if (size
< sizeof(event
->header
) || diff
< (int)size
) {
748 * Event straddles the mmap boundary -- header should always
749 * be inside due to u64 alignment of output.
751 if ((start
& md
->mask
) + size
!= ((start
+ size
) & md
->mask
)) {
752 unsigned int offset
= start
;
753 unsigned int len
= min(sizeof(*event
), size
), cpy
;
754 void *dst
= md
->event_copy
;
757 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
758 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
764 event
= (union perf_event
*) md
->event_copy
;
777 union perf_event
*perf_mmap__read_forward(struct perf_mmap
*md
, bool check_messup
)
783 * Check if event was unmapped due to a POLLHUP/POLLERR.
785 if (!refcount_read(&md
->refcnt
))
788 head
= perf_mmap__read_head(md
);
790 return perf_mmap__read(md
, check_messup
, old
, head
, &md
->prev
);
794 perf_mmap__read_backward(struct perf_mmap
*md
)
797 u64 start
= md
->prev
;
800 * Check if event was unmapped due to a POLLHUP/POLLERR.
802 if (!refcount_read(&md
->refcnt
))
805 head
= perf_mmap__read_head(md
);
810 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
811 * it each time when kernel writes to it, so in fact 'head' is
812 * negative. 'end' pointer is made manually by adding the size of
813 * the ring buffer to 'head' pointer, means the validate data can
814 * read is the whole ring buffer. If 'end' is positive, the ring
815 * buffer has not fully filled, so we must adjust 'end' to 0.
817 * However, since both 'head' and 'end' is unsigned, we can't
818 * simply compare 'end' against 0. Here we compare '-head' and
819 * the size of the ring buffer, where -head is the number of bytes
820 * kernel write to the ring buffer.
822 if (-head
< (u64
)(md
->mask
+ 1))
825 end
= head
+ md
->mask
+ 1;
827 return perf_mmap__read(md
, false, start
, end
, &md
->prev
);
830 union perf_event
*perf_evlist__mmap_read_forward(struct perf_evlist
*evlist
, int idx
)
832 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
835 * Check messup is required for forward overwritable ring buffer:
836 * memory pointed by md->prev can be overwritten in this case.
837 * No need for read-write ring buffer: kernel stop outputting when
838 * it hit md->prev (perf_mmap__consume()).
840 return perf_mmap__read_forward(md
, evlist
->overwrite
);
843 union perf_event
*perf_evlist__mmap_read_backward(struct perf_evlist
*evlist
, int idx
)
845 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
848 * No need to check messup for backward ring buffer:
849 * We can always read arbitrary long data from a backward
850 * ring buffer unless we forget to pause it before reading.
852 return perf_mmap__read_backward(md
);
855 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
857 return perf_evlist__mmap_read_forward(evlist
, idx
);
860 void perf_mmap__read_catchup(struct perf_mmap
*md
)
864 if (!refcount_read(&md
->refcnt
))
867 head
= perf_mmap__read_head(md
);
871 void perf_evlist__mmap_read_catchup(struct perf_evlist
*evlist
, int idx
)
873 perf_mmap__read_catchup(&evlist
->mmap
[idx
]);
876 static bool perf_mmap__empty(struct perf_mmap
*md
)
878 return perf_mmap__read_head(md
) == md
->prev
&& !md
->auxtrace_mmap
.base
;
881 static void perf_mmap__get(struct perf_mmap
*map
)
883 refcount_inc(&map
->refcnt
);
886 static void perf_mmap__put(struct perf_mmap
*md
)
888 BUG_ON(md
->base
&& refcount_read(&md
->refcnt
) == 0);
890 if (refcount_dec_and_test(&md
->refcnt
))
891 perf_mmap__munmap(md
);
894 void perf_mmap__consume(struct perf_mmap
*md
, bool overwrite
)
899 perf_mmap__write_tail(md
, old
);
902 if (refcount_read(&md
->refcnt
) == 1 && perf_mmap__empty(md
))
906 void perf_evlist__mmap_consume(struct perf_evlist
*evlist
, int idx
)
908 perf_mmap__consume(&evlist
->mmap
[idx
], evlist
->overwrite
);
911 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
912 struct auxtrace_mmap_params
*mp __maybe_unused
,
913 void *userpg __maybe_unused
,
914 int fd __maybe_unused
)
919 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
923 void __weak
auxtrace_mmap_params__init(
924 struct auxtrace_mmap_params
*mp __maybe_unused
,
925 off_t auxtrace_offset __maybe_unused
,
926 unsigned int auxtrace_pages __maybe_unused
,
927 bool auxtrace_overwrite __maybe_unused
)
931 void __weak
auxtrace_mmap_params__set_idx(
932 struct auxtrace_mmap_params
*mp __maybe_unused
,
933 struct perf_evlist
*evlist __maybe_unused
,
934 int idx __maybe_unused
,
935 bool per_cpu __maybe_unused
)
939 static void perf_mmap__munmap(struct perf_mmap
*map
)
941 if (map
->base
!= NULL
) {
942 munmap(map
->base
, perf_mmap__mmap_len(map
));
945 refcount_set(&map
->refcnt
, 0);
947 auxtrace_mmap__munmap(&map
->auxtrace_mmap
);
950 static void perf_evlist__munmap_nofree(struct perf_evlist
*evlist
)
955 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
956 perf_mmap__munmap(&evlist
->mmap
[i
]);
958 if (evlist
->backward_mmap
)
959 for (i
= 0; i
< evlist
->nr_mmaps
; i
++)
960 perf_mmap__munmap(&evlist
->backward_mmap
[i
]);
963 void perf_evlist__munmap(struct perf_evlist
*evlist
)
965 perf_evlist__munmap_nofree(evlist
);
966 zfree(&evlist
->mmap
);
967 zfree(&evlist
->backward_mmap
);
970 static struct perf_mmap
*perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
973 struct perf_mmap
*map
;
975 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
976 if (cpu_map__empty(evlist
->cpus
))
977 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
978 map
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
982 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
985 * When the perf_mmap() call is made we grab one refcount, plus
986 * one extra to let perf_evlist__mmap_consume() get the last
987 * events after all real references (perf_mmap__get()) are
990 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
991 * thus does perf_mmap__get() on it.
993 refcount_set(&map
[i
].refcnt
, 0);
1001 struct auxtrace_mmap_params auxtrace_mp
;
1004 static int perf_mmap__mmap(struct perf_mmap
*map
,
1005 struct mmap_params
*mp
, int fd
)
1008 * The last one will be done at perf_evlist__mmap_consume(), so that we
1009 * make sure we don't prevent tools from consuming every last event in
1012 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1013 * anymore, but the last events for it are still in the ring buffer,
1014 * waiting to be consumed.
1016 * Tools can chose to ignore this at their own discretion, but the
1017 * evlist layer can't just drop it when filtering events in
1018 * perf_evlist__filter_pollfd().
1020 refcount_set(&map
->refcnt
, 2);
1022 map
->mask
= mp
->mask
;
1023 map
->base
= mmap(NULL
, perf_mmap__mmap_len(map
), mp
->prot
,
1025 if (map
->base
== MAP_FAILED
) {
1026 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1033 if (auxtrace_mmap__mmap(&map
->auxtrace_mmap
,
1034 &mp
->auxtrace_mp
, map
->base
, fd
))
1041 perf_evlist__should_poll(struct perf_evlist
*evlist __maybe_unused
,
1042 struct perf_evsel
*evsel
)
1044 if (evsel
->attr
.write_backward
)
1049 static int perf_evlist__mmap_per_evsel(struct perf_evlist
*evlist
, int idx
,
1050 struct mmap_params
*mp
, int cpu_idx
,
1051 int thread
, int *_output
, int *_output_backward
)
1053 struct perf_evsel
*evsel
;
1055 int evlist_cpu
= cpu_map__cpu(evlist
->cpus
, cpu_idx
);
1057 evlist__for_each_entry(evlist
, evsel
) {
1058 struct perf_mmap
*maps
= evlist
->mmap
;
1059 int *output
= _output
;
1063 if (evsel
->attr
.write_backward
) {
1064 output
= _output_backward
;
1065 maps
= evlist
->backward_mmap
;
1068 maps
= perf_evlist__alloc_mmap(evlist
);
1071 evlist
->backward_mmap
= maps
;
1072 if (evlist
->bkw_mmap_state
== BKW_MMAP_NOTREADY
)
1073 perf_evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_RUNNING
);
1077 if (evsel
->system_wide
&& thread
)
1080 cpu
= cpu_map__idx(evsel
->cpus
, evlist_cpu
);
1084 fd
= FD(evsel
, cpu
, thread
);
1086 if (*output
== -1) {
1089 if (perf_mmap__mmap(&maps
[idx
], mp
, *output
) < 0)
1092 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, *output
) != 0)
1095 perf_mmap__get(&maps
[idx
]);
1098 revent
= perf_evlist__should_poll(evlist
, evsel
) ? POLLIN
: 0;
1101 * The system_wide flag causes a selected event to be opened
1102 * always without a pid. Consequently it will never get a
1103 * POLLHUP, but it is used for tracking in combination with
1104 * other events, so it should not need to be polled anyway.
1105 * Therefore don't add it for polling.
1107 if (!evsel
->system_wide
&&
1108 __perf_evlist__add_pollfd(evlist
, fd
, &maps
[idx
], revent
) < 0) {
1109 perf_mmap__put(&maps
[idx
]);
1113 if (evsel
->attr
.read_format
& PERF_FORMAT_ID
) {
1114 if (perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
,
1117 perf_evlist__set_sid_idx(evlist
, evsel
, idx
, cpu
,
1125 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
,
1126 struct mmap_params
*mp
)
1129 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
1130 int nr_threads
= thread_map__nr(evlist
->threads
);
1132 pr_debug2("perf event ring buffer mmapped per cpu\n");
1133 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
1135 int output_backward
= -1;
1137 auxtrace_mmap_params__set_idx(&mp
->auxtrace_mp
, evlist
, cpu
,
1140 for (thread
= 0; thread
< nr_threads
; thread
++) {
1141 if (perf_evlist__mmap_per_evsel(evlist
, cpu
, mp
, cpu
,
1142 thread
, &output
, &output_backward
))
1150 perf_evlist__munmap_nofree(evlist
);
1154 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
,
1155 struct mmap_params
*mp
)
1158 int nr_threads
= thread_map__nr(evlist
->threads
);
1160 pr_debug2("perf event ring buffer mmapped per thread\n");
1161 for (thread
= 0; thread
< nr_threads
; thread
++) {
1163 int output_backward
= -1;
1165 auxtrace_mmap_params__set_idx(&mp
->auxtrace_mp
, evlist
, thread
,
1168 if (perf_evlist__mmap_per_evsel(evlist
, thread
, mp
, 0, thread
,
1169 &output
, &output_backward
))
1176 perf_evlist__munmap_nofree(evlist
);
1180 unsigned long perf_event_mlock_kb_in_pages(void)
1182 unsigned long pages
;
1185 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max
) < 0) {
1187 * Pick a once upon a time good value, i.e. things look
1188 * strange since we can't read a sysctl value, but lets not
1193 max
-= (page_size
/ 1024);
1196 pages
= (max
* 1024) / page_size
;
1197 if (!is_power_of_2(pages
))
1198 pages
= rounddown_pow_of_two(pages
);
1203 size_t perf_evlist__mmap_size(unsigned long pages
)
1205 if (pages
== UINT_MAX
)
1206 pages
= perf_event_mlock_kb_in_pages();
1207 else if (!is_power_of_2(pages
))
1210 return (pages
+ 1) * page_size
;
1213 static long parse_pages_arg(const char *str
, unsigned long min
,
1216 unsigned long pages
, val
;
1217 static struct parse_tag tags
[] = {
1218 { .tag
= 'B', .mult
= 1 },
1219 { .tag
= 'K', .mult
= 1 << 10 },
1220 { .tag
= 'M', .mult
= 1 << 20 },
1221 { .tag
= 'G', .mult
= 1 << 30 },
1228 val
= parse_tag_value(str
, tags
);
1229 if (val
!= (unsigned long) -1) {
1230 /* we got file size value */
1231 pages
= PERF_ALIGN(val
, page_size
) / page_size
;
1233 /* we got pages count value */
1235 pages
= strtoul(str
, &eptr
, 10);
1240 if (pages
== 0 && min
== 0) {
1241 /* leave number of pages at 0 */
1242 } else if (!is_power_of_2(pages
)) {
1245 /* round pages up to next power of 2 */
1246 pages
= roundup_pow_of_two(pages
);
1250 unit_number__scnprintf(buf
, sizeof(buf
), pages
* page_size
);
1251 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1261 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages
, const char *str
)
1263 unsigned long max
= UINT_MAX
;
1266 if (max
> SIZE_MAX
/ page_size
)
1267 max
= SIZE_MAX
/ page_size
;
1269 pages
= parse_pages_arg(str
, 1, max
);
1271 pr_err("Invalid argument for --mmap_pages/-m\n");
1275 *mmap_pages
= pages
;
1279 int perf_evlist__parse_mmap_pages(const struct option
*opt
, const char *str
,
1280 int unset __maybe_unused
)
1282 return __perf_evlist__parse_mmap_pages(opt
->value
, str
);
1286 * perf_evlist__mmap_ex - Create mmaps to receive events.
1287 * @evlist: list of events
1288 * @pages: map length in pages
1289 * @overwrite: overwrite older events?
1290 * @auxtrace_pages - auxtrace map length in pages
1291 * @auxtrace_overwrite - overwrite older auxtrace data?
1293 * If @overwrite is %false the user needs to signal event consumption using
1294 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1297 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1298 * consumption using auxtrace_mmap__write_tail().
1300 * Return: %0 on success, negative error code otherwise.
1302 int perf_evlist__mmap_ex(struct perf_evlist
*evlist
, unsigned int pages
,
1303 bool overwrite
, unsigned int auxtrace_pages
,
1304 bool auxtrace_overwrite
)
1306 struct perf_evsel
*evsel
;
1307 const struct cpu_map
*cpus
= evlist
->cpus
;
1308 const struct thread_map
*threads
= evlist
->threads
;
1309 struct mmap_params mp
= {
1310 .prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
),
1314 evlist
->mmap
= perf_evlist__alloc_mmap(evlist
);
1318 if (evlist
->pollfd
.entries
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
1321 evlist
->overwrite
= overwrite
;
1322 evlist
->mmap_len
= perf_evlist__mmap_size(pages
);
1323 pr_debug("mmap size %zuB\n", evlist
->mmap_len
);
1324 mp
.mask
= evlist
->mmap_len
- page_size
- 1;
1326 auxtrace_mmap_params__init(&mp
.auxtrace_mp
, evlist
->mmap_len
,
1327 auxtrace_pages
, auxtrace_overwrite
);
1329 evlist__for_each_entry(evlist
, evsel
) {
1330 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
1331 evsel
->sample_id
== NULL
&&
1332 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
1336 if (cpu_map__empty(cpus
))
1337 return perf_evlist__mmap_per_thread(evlist
, &mp
);
1339 return perf_evlist__mmap_per_cpu(evlist
, &mp
);
1342 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
1345 return perf_evlist__mmap_ex(evlist
, pages
, overwrite
, 0, false);
1348 int perf_evlist__create_maps(struct perf_evlist
*evlist
, struct target
*target
)
1350 struct cpu_map
*cpus
;
1351 struct thread_map
*threads
;
1353 threads
= thread_map__new_str(target
->pid
, target
->tid
, target
->uid
);
1358 if (target__uses_dummy_map(target
))
1359 cpus
= cpu_map__dummy_new();
1361 cpus
= cpu_map__new(target
->cpu_list
);
1364 goto out_delete_threads
;
1366 evlist
->has_user_cpus
= !!target
->cpu_list
;
1368 perf_evlist__set_maps(evlist
, cpus
, threads
);
1373 thread_map__put(threads
);
1377 void perf_evlist__set_maps(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
1378 struct thread_map
*threads
)
1381 * Allow for the possibility that one or another of the maps isn't being
1382 * changed i.e. don't put it. Note we are assuming the maps that are
1383 * being applied are brand new and evlist is taking ownership of the
1384 * original reference count of 1. If that is not the case it is up to
1385 * the caller to increase the reference count.
1387 if (cpus
!= evlist
->cpus
) {
1388 cpu_map__put(evlist
->cpus
);
1389 evlist
->cpus
= cpu_map__get(cpus
);
1392 if (threads
!= evlist
->threads
) {
1393 thread_map__put(evlist
->threads
);
1394 evlist
->threads
= thread_map__get(threads
);
1397 perf_evlist__propagate_maps(evlist
);
1400 void __perf_evlist__set_sample_bit(struct perf_evlist
*evlist
,
1401 enum perf_event_sample_format bit
)
1403 struct perf_evsel
*evsel
;
1405 evlist__for_each_entry(evlist
, evsel
)
1406 __perf_evsel__set_sample_bit(evsel
, bit
);
1409 void __perf_evlist__reset_sample_bit(struct perf_evlist
*evlist
,
1410 enum perf_event_sample_format bit
)
1412 struct perf_evsel
*evsel
;
1414 evlist__for_each_entry(evlist
, evsel
)
1415 __perf_evsel__reset_sample_bit(evsel
, bit
);
1418 int perf_evlist__apply_filters(struct perf_evlist
*evlist
, struct perf_evsel
**err_evsel
)
1420 struct perf_evsel
*evsel
;
1422 const int ncpus
= cpu_map__nr(evlist
->cpus
),
1423 nthreads
= thread_map__nr(evlist
->threads
);
1425 evlist__for_each_entry(evlist
, evsel
) {
1426 if (evsel
->filter
== NULL
)
1430 * filters only work for tracepoint event, which doesn't have cpu limit.
1431 * So evlist and evsel should always be same.
1433 err
= perf_evsel__apply_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
1443 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
1445 struct perf_evsel
*evsel
;
1448 evlist__for_each_entry(evlist
, evsel
) {
1449 if (evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1452 err
= perf_evsel__set_filter(evsel
, filter
);
1460 int perf_evlist__set_filter_pids(struct perf_evlist
*evlist
, size_t npids
, pid_t
*pids
)
1466 for (i
= 0; i
< npids
; ++i
) {
1468 if (asprintf(&filter
, "common_pid != %d", pids
[i
]) < 0)
1473 if (asprintf(&tmp
, "%s && common_pid != %d", filter
, pids
[i
]) < 0)
1481 ret
= perf_evlist__set_filter(evlist
, filter
);
1487 int perf_evlist__set_filter_pid(struct perf_evlist
*evlist
, pid_t pid
)
1489 return perf_evlist__set_filter_pids(evlist
, 1, &pid
);
1492 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
1494 struct perf_evsel
*pos
;
1496 if (evlist
->nr_entries
== 1)
1499 if (evlist
->id_pos
< 0 || evlist
->is_pos
< 0)
1502 evlist__for_each_entry(evlist
, pos
) {
1503 if (pos
->id_pos
!= evlist
->id_pos
||
1504 pos
->is_pos
!= evlist
->is_pos
)
1511 u64
__perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1513 struct perf_evsel
*evsel
;
1515 if (evlist
->combined_sample_type
)
1516 return evlist
->combined_sample_type
;
1518 evlist__for_each_entry(evlist
, evsel
)
1519 evlist
->combined_sample_type
|= evsel
->attr
.sample_type
;
1521 return evlist
->combined_sample_type
;
1524 u64
perf_evlist__combined_sample_type(struct perf_evlist
*evlist
)
1526 evlist
->combined_sample_type
= 0;
1527 return __perf_evlist__combined_sample_type(evlist
);
1530 u64
perf_evlist__combined_branch_type(struct perf_evlist
*evlist
)
1532 struct perf_evsel
*evsel
;
1533 u64 branch_type
= 0;
1535 evlist__for_each_entry(evlist
, evsel
)
1536 branch_type
|= evsel
->attr
.branch_sample_type
;
1540 bool perf_evlist__valid_read_format(struct perf_evlist
*evlist
)
1542 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1543 u64 read_format
= first
->attr
.read_format
;
1544 u64 sample_type
= first
->attr
.sample_type
;
1546 evlist__for_each_entry(evlist
, pos
) {
1547 if (read_format
!= pos
->attr
.read_format
)
1551 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1552 if ((sample_type
& PERF_SAMPLE_READ
) &&
1553 !(read_format
& PERF_FORMAT_ID
)) {
1560 u64
perf_evlist__read_format(struct perf_evlist
*evlist
)
1562 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1563 return first
->attr
.read_format
;
1566 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
1568 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1569 struct perf_sample
*data
;
1573 if (!first
->attr
.sample_id_all
)
1576 sample_type
= first
->attr
.sample_type
;
1578 if (sample_type
& PERF_SAMPLE_TID
)
1579 size
+= sizeof(data
->tid
) * 2;
1581 if (sample_type
& PERF_SAMPLE_TIME
)
1582 size
+= sizeof(data
->time
);
1584 if (sample_type
& PERF_SAMPLE_ID
)
1585 size
+= sizeof(data
->id
);
1587 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
1588 size
+= sizeof(data
->stream_id
);
1590 if (sample_type
& PERF_SAMPLE_CPU
)
1591 size
+= sizeof(data
->cpu
) * 2;
1593 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
1594 size
+= sizeof(data
->id
);
1599 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
1601 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
1603 evlist__for_each_entry_continue(evlist
, pos
) {
1604 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
1611 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
1613 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1614 return first
->attr
.sample_id_all
;
1617 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
1618 struct perf_evsel
*evsel
)
1620 evlist
->selected
= evsel
;
1623 void perf_evlist__close(struct perf_evlist
*evlist
)
1625 struct perf_evsel
*evsel
;
1626 int ncpus
= cpu_map__nr(evlist
->cpus
);
1627 int nthreads
= thread_map__nr(evlist
->threads
);
1629 evlist__for_each_entry_reverse(evlist
, evsel
) {
1630 int n
= evsel
->cpus
? evsel
->cpus
->nr
: ncpus
;
1631 perf_evsel__close(evsel
, n
, nthreads
);
1635 static int perf_evlist__create_syswide_maps(struct perf_evlist
*evlist
)
1637 struct cpu_map
*cpus
;
1638 struct thread_map
*threads
;
1642 * Try reading /sys/devices/system/cpu/online to get
1645 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1646 * code needs an overhaul to properly forward the
1647 * error, and we may not want to do that fallback to a
1648 * default cpu identity map :-\
1650 cpus
= cpu_map__new(NULL
);
1654 threads
= thread_map__new_dummy();
1658 perf_evlist__set_maps(evlist
, cpus
, threads
);
1666 int perf_evlist__open(struct perf_evlist
*evlist
)
1668 struct perf_evsel
*evsel
;
1672 * Default: one fd per CPU, all threads, aka systemwide
1673 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1675 if (evlist
->threads
== NULL
&& evlist
->cpus
== NULL
) {
1676 err
= perf_evlist__create_syswide_maps(evlist
);
1681 perf_evlist__update_id_pos(evlist
);
1683 evlist__for_each_entry(evlist
, evsel
) {
1684 err
= perf_evsel__open(evsel
, evsel
->cpus
, evsel
->threads
);
1691 perf_evlist__close(evlist
);
1696 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
, struct target
*target
,
1697 const char *argv
[], bool pipe_output
,
1698 void (*exec_error
)(int signo
, siginfo_t
*info
, void *ucontext
))
1700 int child_ready_pipe
[2], go_pipe
[2];
1703 if (pipe(child_ready_pipe
) < 0) {
1704 perror("failed to create 'ready' pipe");
1708 if (pipe(go_pipe
) < 0) {
1709 perror("failed to create 'go' pipe");
1710 goto out_close_ready_pipe
;
1713 evlist
->workload
.pid
= fork();
1714 if (evlist
->workload
.pid
< 0) {
1715 perror("failed to fork");
1716 goto out_close_pipes
;
1719 if (!evlist
->workload
.pid
) {
1725 signal(SIGTERM
, SIG_DFL
);
1727 close(child_ready_pipe
[0]);
1729 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
1732 * Tell the parent we're ready to go
1734 close(child_ready_pipe
[1]);
1737 * Wait until the parent tells us to go.
1739 ret
= read(go_pipe
[0], &bf
, 1);
1741 * The parent will ask for the execvp() to be performed by
1742 * writing exactly one byte, in workload.cork_fd, usually via
1743 * perf_evlist__start_workload().
1745 * For cancelling the workload without actually running it,
1746 * the parent will just close workload.cork_fd, without writing
1747 * anything, i.e. read will return zero and we just exit()
1752 perror("unable to read pipe");
1756 execvp(argv
[0], (char **)argv
);
1761 val
.sival_int
= errno
;
1762 if (sigqueue(getppid(), SIGUSR1
, val
))
1770 struct sigaction act
= {
1771 .sa_flags
= SA_SIGINFO
,
1772 .sa_sigaction
= exec_error
,
1774 sigaction(SIGUSR1
, &act
, NULL
);
1777 if (target__none(target
)) {
1778 if (evlist
->threads
== NULL
) {
1779 fprintf(stderr
, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1780 __func__
, __LINE__
);
1781 goto out_close_pipes
;
1783 thread_map__set_pid(evlist
->threads
, 0, evlist
->workload
.pid
);
1786 close(child_ready_pipe
[1]);
1789 * wait for child to settle
1791 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
1792 perror("unable to read pipe");
1793 goto out_close_pipes
;
1796 fcntl(go_pipe
[1], F_SETFD
, FD_CLOEXEC
);
1797 evlist
->workload
.cork_fd
= go_pipe
[1];
1798 close(child_ready_pipe
[0]);
1804 out_close_ready_pipe
:
1805 close(child_ready_pipe
[0]);
1806 close(child_ready_pipe
[1]);
1810 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
1812 if (evlist
->workload
.cork_fd
> 0) {
1816 * Remove the cork, let it rip!
1818 ret
= write(evlist
->workload
.cork_fd
, &bf
, 1);
1820 perror("unable to write to pipe");
1822 close(evlist
->workload
.cork_fd
);
1829 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
1830 struct perf_sample
*sample
)
1832 struct perf_evsel
*evsel
= perf_evlist__event2evsel(evlist
, event
);
1836 return perf_evsel__parse_sample(evsel
, event
, sample
);
1839 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
1841 struct perf_evsel
*evsel
;
1844 evlist__for_each_entry(evlist
, evsel
) {
1845 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
1846 perf_evsel__name(evsel
));
1849 return printed
+ fprintf(fp
, "\n");
1852 int perf_evlist__strerror_open(struct perf_evlist
*evlist
,
1853 int err
, char *buf
, size_t size
)
1856 char sbuf
[STRERR_BUFSIZE
], *emsg
= str_error_r(err
, sbuf
, sizeof(sbuf
));
1861 printed
= scnprintf(buf
, size
,
1863 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg
);
1865 value
= perf_event_paranoid();
1867 printed
+= scnprintf(buf
+ printed
, size
- printed
, "\nHint:\t");
1870 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1871 "For your workloads it needs to be <= 1\nHint:\t");
1873 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1874 "For system wide tracing it needs to be set to -1.\n");
1876 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1877 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1878 "Hint:\tThe current value is %d.", value
);
1881 struct perf_evsel
*first
= perf_evlist__first(evlist
);
1884 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq
) < 0)
1887 if (first
->attr
.sample_freq
< (u64
)max_freq
)
1890 printed
= scnprintf(buf
, size
,
1892 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1893 "Hint:\tThe current value is %d and %" PRIu64
" is being requested.",
1894 emsg
, max_freq
, first
->attr
.sample_freq
);
1899 scnprintf(buf
, size
, "%s", emsg
);
1906 int perf_evlist__strerror_mmap(struct perf_evlist
*evlist
, int err
, char *buf
, size_t size
)
1908 char sbuf
[STRERR_BUFSIZE
], *emsg
= str_error_r(err
, sbuf
, sizeof(sbuf
));
1909 int pages_attempted
= evlist
->mmap_len
/ 1024, pages_max_per_user
, printed
= 0;
1913 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user
);
1914 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1916 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1917 "Hint:\tTried using %zd kB.\n",
1918 emsg
, pages_max_per_user
, pages_attempted
);
1920 if (pages_attempted
>= pages_max_per_user
) {
1921 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1922 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1923 pages_max_per_user
+ pages_attempted
);
1926 printed
+= scnprintf(buf
+ printed
, size
- printed
,
1927 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1930 scnprintf(buf
, size
, "%s", emsg
);
1937 void perf_evlist__to_front(struct perf_evlist
*evlist
,
1938 struct perf_evsel
*move_evsel
)
1940 struct perf_evsel
*evsel
, *n
;
1943 if (move_evsel
== perf_evlist__first(evlist
))
1946 evlist__for_each_entry_safe(evlist
, n
, evsel
) {
1947 if (evsel
->leader
== move_evsel
->leader
)
1948 list_move_tail(&evsel
->node
, &move
);
1951 list_splice(&move
, &evlist
->entries
);
1954 void perf_evlist__set_tracking_event(struct perf_evlist
*evlist
,
1955 struct perf_evsel
*tracking_evsel
)
1957 struct perf_evsel
*evsel
;
1959 if (tracking_evsel
->tracking
)
1962 evlist__for_each_entry(evlist
, evsel
) {
1963 if (evsel
!= tracking_evsel
)
1964 evsel
->tracking
= false;
1967 tracking_evsel
->tracking
= true;
1971 perf_evlist__find_evsel_by_str(struct perf_evlist
*evlist
,
1974 struct perf_evsel
*evsel
;
1976 evlist__for_each_entry(evlist
, evsel
) {
1979 if (strcmp(str
, evsel
->name
) == 0)
1986 void perf_evlist__toggle_bkw_mmap(struct perf_evlist
*evlist
,
1987 enum bkw_mmap_state state
)
1989 enum bkw_mmap_state old_state
= evlist
->bkw_mmap_state
;
1996 if (!evlist
->backward_mmap
)
1999 switch (old_state
) {
2000 case BKW_MMAP_NOTREADY
: {
2001 if (state
!= BKW_MMAP_RUNNING
)
2005 case BKW_MMAP_RUNNING
: {
2006 if (state
!= BKW_MMAP_DATA_PENDING
)
2011 case BKW_MMAP_DATA_PENDING
: {
2012 if (state
!= BKW_MMAP_EMPTY
)
2016 case BKW_MMAP_EMPTY
: {
2017 if (state
!= BKW_MMAP_RUNNING
)
2023 WARN_ONCE(1, "Shouldn't get there\n");
2026 evlist
->bkw_mmap_state
= state
;
2030 perf_evlist__pause(evlist
);
2033 perf_evlist__resume(evlist
);