1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
12 #ifdef HAVE_LIBNUMA_SUPPORT
18 #include "util.h" /* page_size */
20 size_t perf_mmap__mmap_len(struct perf_mmap
*map
)
22 return map
->mask
+ 1 + page_size
;
25 /* When check_messup is true, 'end' must points to a good entry */
26 static union perf_event
*perf_mmap__read(struct perf_mmap
*map
,
29 unsigned char *data
= map
->base
+ page_size
;
30 union perf_event
*event
= NULL
;
31 int diff
= end
- *startp
;
33 if (diff
>= (int)sizeof(event
->header
)) {
36 event
= (union perf_event
*)&data
[*startp
& map
->mask
];
37 size
= event
->header
.size
;
39 if (size
< sizeof(event
->header
) || diff
< (int)size
)
43 * Event straddles the mmap boundary -- header should always
44 * be inside due to u64 alignment of output.
46 if ((*startp
& map
->mask
) + size
!= ((*startp
+ size
) & map
->mask
)) {
47 unsigned int offset
= *startp
;
48 unsigned int len
= min(sizeof(*event
), size
), cpy
;
49 void *dst
= map
->event_copy
;
52 cpy
= min(map
->mask
+ 1 - (offset
& map
->mask
), len
);
53 memcpy(dst
, &data
[offset
& map
->mask
], cpy
);
59 event
= (union perf_event
*)map
->event_copy
;
69 * Read event from ring buffer one by one.
70 * Return one event for each call.
73 * perf_mmap__read_init()
74 * while(event = perf_mmap__read_event()) {
76 * perf_mmap__consume()
78 * perf_mmap__read_done()
80 union perf_event
*perf_mmap__read_event(struct perf_mmap
*map
)
82 union perf_event
*event
;
85 * Check if event was unmapped due to a POLLHUP/POLLERR.
87 if (!refcount_read(&map
->refcnt
))
90 /* non-overwirte doesn't pause the ringbuffer */
92 map
->end
= perf_mmap__read_head(map
);
94 event
= perf_mmap__read(map
, &map
->start
, map
->end
);
97 map
->prev
= map
->start
;
102 static bool perf_mmap__empty(struct perf_mmap
*map
)
104 return perf_mmap__read_head(map
) == map
->prev
&& !map
->auxtrace_mmap
.base
;
107 void perf_mmap__get(struct perf_mmap
*map
)
109 refcount_inc(&map
->refcnt
);
112 void perf_mmap__put(struct perf_mmap
*map
)
114 BUG_ON(map
->base
&& refcount_read(&map
->refcnt
) == 0);
116 if (refcount_dec_and_test(&map
->refcnt
))
117 perf_mmap__munmap(map
);
120 void perf_mmap__consume(struct perf_mmap
*map
)
122 if (!map
->overwrite
) {
125 perf_mmap__write_tail(map
, old
);
128 if (refcount_read(&map
->refcnt
) == 1 && perf_mmap__empty(map
))
132 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
133 struct auxtrace_mmap_params
*mp __maybe_unused
,
134 void *userpg __maybe_unused
,
135 int fd __maybe_unused
)
140 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
144 void __weak
auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp __maybe_unused
,
145 off_t auxtrace_offset __maybe_unused
,
146 unsigned int auxtrace_pages __maybe_unused
,
147 bool auxtrace_overwrite __maybe_unused
)
151 void __weak
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp __maybe_unused
,
152 struct perf_evlist
*evlist __maybe_unused
,
153 int idx __maybe_unused
,
154 bool per_cpu __maybe_unused
)
158 #ifdef HAVE_AIO_SUPPORT
159 static int perf_mmap__aio_enabled(struct perf_mmap
*map
)
161 return map
->aio
.nr_cblocks
> 0;
164 #ifdef HAVE_LIBNUMA_SUPPORT
165 static int perf_mmap__aio_alloc(struct perf_mmap
*map
, int idx
)
167 map
->aio
.data
[idx
] = mmap(NULL
, perf_mmap__mmap_len(map
), PROT_READ
|PROT_WRITE
,
168 MAP_PRIVATE
|MAP_ANONYMOUS
, 0, 0);
169 if (map
->aio
.data
[idx
] == MAP_FAILED
) {
170 map
->aio
.data
[idx
] = NULL
;
177 static void perf_mmap__aio_free(struct perf_mmap
*map
, int idx
)
179 if (map
->aio
.data
[idx
]) {
180 munmap(map
->aio
.data
[idx
], perf_mmap__mmap_len(map
));
181 map
->aio
.data
[idx
] = NULL
;
185 static int perf_mmap__aio_bind(struct perf_mmap
*map
, int idx
, int cpu
, int affinity
)
189 unsigned long node_mask
;
191 if (affinity
!= PERF_AFFINITY_SYS
&& cpu__max_node() > 1) {
192 data
= map
->aio
.data
[idx
];
193 mmap_len
= perf_mmap__mmap_len(map
);
194 node_mask
= 1UL << cpu__get_node(cpu
);
195 if (mbind(data
, mmap_len
, MPOL_BIND
, &node_mask
, 1, 0)) {
196 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
197 data
, data
+ mmap_len
, cpu__get_node(cpu
));
204 #else /* !HAVE_LIBNUMA_SUPPORT */
205 static int perf_mmap__aio_alloc(struct perf_mmap
*map
, int idx
)
207 map
->aio
.data
[idx
] = malloc(perf_mmap__mmap_len(map
));
208 if (map
->aio
.data
[idx
] == NULL
)
214 static void perf_mmap__aio_free(struct perf_mmap
*map
, int idx
)
216 zfree(&(map
->aio
.data
[idx
]));
219 static int perf_mmap__aio_bind(struct perf_mmap
*map __maybe_unused
, int idx __maybe_unused
,
220 int cpu __maybe_unused
, int affinity __maybe_unused
)
226 static int perf_mmap__aio_mmap(struct perf_mmap
*map
, struct mmap_params
*mp
)
228 int delta_max
, i
, prio
, ret
;
230 map
->aio
.nr_cblocks
= mp
->nr_cblocks
;
231 if (map
->aio
.nr_cblocks
) {
232 map
->aio
.aiocb
= calloc(map
->aio
.nr_cblocks
, sizeof(struct aiocb
*));
233 if (!map
->aio
.aiocb
) {
234 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
237 map
->aio
.cblocks
= calloc(map
->aio
.nr_cblocks
, sizeof(struct aiocb
));
238 if (!map
->aio
.cblocks
) {
239 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
242 map
->aio
.data
= calloc(map
->aio
.nr_cblocks
, sizeof(void *));
243 if (!map
->aio
.data
) {
244 pr_debug2("failed to allocate data buffer, error %m\n");
247 delta_max
= sysconf(_SC_AIO_PRIO_DELTA_MAX
);
248 for (i
= 0; i
< map
->aio
.nr_cblocks
; ++i
) {
249 ret
= perf_mmap__aio_alloc(map
, i
);
251 pr_debug2("failed to allocate data buffer area, error %m");
254 ret
= perf_mmap__aio_bind(map
, i
, map
->cpu
, mp
->affinity
);
258 * Use cblock.aio_fildes value different from -1
259 * to denote started aio write operation on the
260 * cblock so it requires explicit record__aio_sync()
261 * call prior the cblock may be reused again.
263 map
->aio
.cblocks
[i
].aio_fildes
= -1;
265 * Allocate cblocks with priority delta to have
266 * faster aio write system calls because queued requests
267 * are kept in separate per-prio queues and adding
268 * a new request will iterate thru shorter per-prio
269 * list. Blocks with numbers higher than
270 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
272 prio
= delta_max
- i
;
273 map
->aio
.cblocks
[i
].aio_reqprio
= prio
>= 0 ? prio
: 0;
280 static void perf_mmap__aio_munmap(struct perf_mmap
*map
)
284 for (i
= 0; i
< map
->aio
.nr_cblocks
; ++i
)
285 perf_mmap__aio_free(map
, i
);
287 zfree(&map
->aio
.data
);
288 zfree(&map
->aio
.cblocks
);
289 zfree(&map
->aio
.aiocb
);
291 #else /* !HAVE_AIO_SUPPORT */
292 static int perf_mmap__aio_enabled(struct perf_mmap
*map __maybe_unused
)
297 static int perf_mmap__aio_mmap(struct perf_mmap
*map __maybe_unused
,
298 struct mmap_params
*mp __maybe_unused
)
303 static void perf_mmap__aio_munmap(struct perf_mmap
*map __maybe_unused
)
308 void perf_mmap__munmap(struct perf_mmap
*map
)
310 perf_mmap__aio_munmap(map
);
311 if (map
->data
!= NULL
) {
312 munmap(map
->data
, perf_mmap__mmap_len(map
));
315 if (map
->base
!= NULL
) {
316 munmap(map
->base
, perf_mmap__mmap_len(map
));
319 refcount_set(&map
->refcnt
, 0);
321 auxtrace_mmap__munmap(&map
->auxtrace_mmap
);
324 static void build_node_mask(int node
, cpu_set_t
*mask
)
327 const struct cpu_map
*cpu_map
= NULL
;
329 cpu_map
= cpu_map__online();
333 nr_cpus
= cpu_map__nr(cpu_map
);
334 for (c
= 0; c
< nr_cpus
; c
++) {
335 cpu
= cpu_map
->map
[c
]; /* map c index to online cpu index */
336 if (cpu__get_node(cpu
) == node
)
341 static void perf_mmap__setup_affinity_mask(struct perf_mmap
*map
, struct mmap_params
*mp
)
343 CPU_ZERO(&map
->affinity_mask
);
344 if (mp
->affinity
== PERF_AFFINITY_NODE
&& cpu__max_node() > 1)
345 build_node_mask(cpu__get_node(map
->cpu
), &map
->affinity_mask
);
346 else if (mp
->affinity
== PERF_AFFINITY_CPU
)
347 CPU_SET(map
->cpu
, &map
->affinity_mask
);
350 int perf_mmap__mmap(struct perf_mmap
*map
, struct mmap_params
*mp
, int fd
, int cpu
)
353 * The last one will be done at perf_mmap__consume(), so that we
354 * make sure we don't prevent tools from consuming every last event in
357 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
358 * anymore, but the last events for it are still in the ring buffer,
359 * waiting to be consumed.
361 * Tools can chose to ignore this at their own discretion, but the
362 * evlist layer can't just drop it when filtering events in
363 * perf_evlist__filter_pollfd().
365 refcount_set(&map
->refcnt
, 2);
367 map
->mask
= mp
->mask
;
368 map
->base
= mmap(NULL
, perf_mmap__mmap_len(map
), mp
->prot
,
370 if (map
->base
== MAP_FAILED
) {
371 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
379 perf_mmap__setup_affinity_mask(map
, mp
);
381 map
->flush
= mp
->flush
;
383 map
->comp_level
= mp
->comp_level
;
385 if (map
->comp_level
&& !perf_mmap__aio_enabled(map
)) {
386 map
->data
= mmap(NULL
, perf_mmap__mmap_len(map
), PROT_READ
|PROT_WRITE
,
387 MAP_PRIVATE
|MAP_ANONYMOUS
, 0, 0);
388 if (map
->data
== MAP_FAILED
) {
389 pr_debug2("failed to mmap data buffer, error %d\n",
396 if (auxtrace_mmap__mmap(&map
->auxtrace_mmap
,
397 &mp
->auxtrace_mp
, map
->base
, fd
))
400 return perf_mmap__aio_mmap(map
, mp
);
403 static int overwrite_rb_find_range(void *buf
, int mask
, u64
*start
, u64
*end
)
405 struct perf_event_header
*pheader
;
406 u64 evt_head
= *start
;
409 pr_debug2("%s: buf=%p, start=%"PRIx64
"\n", __func__
, buf
, *start
);
410 pheader
= (struct perf_event_header
*)(buf
+ (*start
& mask
));
412 if (evt_head
- *start
>= (unsigned int)size
) {
413 pr_debug("Finished reading overwrite ring buffer: rewind\n");
414 if (evt_head
- *start
> (unsigned int)size
)
415 evt_head
-= pheader
->size
;
420 pheader
= (struct perf_event_header
*)(buf
+ (evt_head
& mask
));
422 if (pheader
->size
== 0) {
423 pr_debug("Finished reading overwrite ring buffer: get start\n");
428 evt_head
+= pheader
->size
;
429 pr_debug3("move evt_head: %"PRIx64
"\n", evt_head
);
431 WARN_ONCE(1, "Shouldn't get here\n");
436 * Report the start and end of the available data in ringbuffer
438 static int __perf_mmap__read_init(struct perf_mmap
*md
)
440 u64 head
= perf_mmap__read_head(md
);
442 unsigned char *data
= md
->base
+ page_size
;
445 md
->start
= md
->overwrite
? head
: old
;
446 md
->end
= md
->overwrite
? old
: head
;
448 if ((md
->end
- md
->start
) < md
->flush
)
451 size
= md
->end
- md
->start
;
452 if (size
> (unsigned long)(md
->mask
) + 1) {
453 if (!md
->overwrite
) {
454 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
457 perf_mmap__consume(md
);
462 * Backward ring buffer is full. We still have a chance to read
463 * most of data from it.
465 if (overwrite_rb_find_range(data
, md
->mask
, &md
->start
, &md
->end
))
472 int perf_mmap__read_init(struct perf_mmap
*map
)
475 * Check if event was unmapped due to a POLLHUP/POLLERR.
477 if (!refcount_read(&map
->refcnt
))
480 return __perf_mmap__read_init(map
);
483 int perf_mmap__push(struct perf_mmap
*md
, void *to
,
484 int push(struct perf_mmap
*map
, void *to
, void *buf
, size_t size
))
486 u64 head
= perf_mmap__read_head(md
);
487 unsigned char *data
= md
->base
+ page_size
;
492 rc
= perf_mmap__read_init(md
);
494 return (rc
== -EAGAIN
) ? 1 : -1;
496 size
= md
->end
- md
->start
;
498 if ((md
->start
& md
->mask
) + size
!= (md
->end
& md
->mask
)) {
499 buf
= &data
[md
->start
& md
->mask
];
500 size
= md
->mask
+ 1 - (md
->start
& md
->mask
);
503 if (push(md
, to
, buf
, size
) < 0) {
509 buf
= &data
[md
->start
& md
->mask
];
510 size
= md
->end
- md
->start
;
513 if (push(md
, to
, buf
, size
) < 0) {
519 perf_mmap__consume(md
);
525 * Mandatory for overwrite mode
526 * The direction of overwrite mode is backward.
527 * The last perf_mmap__read() will set tail to map->prev.
528 * Need to correct the map->prev to head which is the end of next read.
530 void perf_mmap__read_done(struct perf_mmap
*map
)
533 * Check if event was unmapped due to a POLLHUP/POLLERR.
535 if (!refcount_read(&map
->refcnt
))
538 map
->prev
= perf_mmap__read_head(map
);