]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/events/ring_buffer.c
Merge tag 'phy-for-4.14-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon...
[mirror_ubuntu-focal-kernel.git] / kernel / events / ring_buffer.c
1 /*
2 * Performance events ring-buffer code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
17
18 #include "internal.h"
19
20 static void perf_output_wakeup(struct perf_output_handle *handle)
21 {
22 atomic_set(&handle->rb->poll, POLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
26 }
27
28 /*
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
32 *
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
35 */
36 static void perf_output_get_handle(struct perf_output_handle *handle)
37 {
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
43 }
44
45 static void perf_output_put_handle(struct perf_output_handle *handle)
46 {
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
49
50 again:
51 head = local_read(&rb->head);
52
53 /*
54 * IRQ/NMI can happen here, which means we can miss a head update.
55 */
56
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
59
60 /*
61 * Since the mmap() consumer (userspace) can run on a different CPU:
62 *
63 * kernel user
64 *
65 * if (LOAD ->data_tail) { LOAD ->data_head
66 * (A) smp_rmb() (C)
67 * STORE $data LOAD $data
68 * smp_wmb() (B) smp_mb() (D)
69 * STORE ->data_head STORE ->data_tail
70 * }
71 *
72 * Where A pairs with D, and B pairs with C.
73 *
74 * In our case (A) is a control dependency that separates the load of
75 * the ->data_tail and the stores of $data. In case ->data_tail
76 * indicates there is no room in the buffer to store $data we do not.
77 *
78 * D needs to be a full barrier since it separates the data READ
79 * from the tail WRITE.
80 *
81 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 * an RMB is sufficient since it separates two READs.
83 *
84 * See perf_output_begin().
85 */
86 smp_wmb(); /* B, matches C */
87 rb->user_page->data_head = head;
88
89 /*
90 * Now check if we missed an update -- rely on previous implied
91 * compiler barriers to force a re-read.
92 */
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
96 }
97
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
100
101 out:
102 preempt_enable();
103 }
104
105 static bool __always_inline
106 ring_buffer_has_space(unsigned long head, unsigned long tail,
107 unsigned long data_size, unsigned int size,
108 bool backward)
109 {
110 if (!backward)
111 return CIRC_SPACE(head, tail, data_size) >= size;
112 else
113 return CIRC_SPACE(tail, head, data_size) >= size;
114 }
115
116 static int __always_inline
117 __perf_output_begin(struct perf_output_handle *handle,
118 struct perf_event *event, unsigned int size,
119 bool backward)
120 {
121 struct ring_buffer *rb;
122 unsigned long tail, offset, head;
123 int have_lost, page_shift;
124 struct {
125 struct perf_event_header header;
126 u64 id;
127 u64 lost;
128 } lost_event;
129
130 rcu_read_lock();
131 /*
132 * For inherited events we send all the output towards the parent.
133 */
134 if (event->parent)
135 event = event->parent;
136
137 rb = rcu_dereference(event->rb);
138 if (unlikely(!rb))
139 goto out;
140
141 if (unlikely(rb->paused)) {
142 if (rb->nr_pages)
143 local_inc(&rb->lost);
144 goto out;
145 }
146
147 handle->rb = rb;
148 handle->event = event;
149
150 have_lost = local_read(&rb->lost);
151 if (unlikely(have_lost)) {
152 size += sizeof(lost_event);
153 if (event->attr.sample_id_all)
154 size += event->id_header_size;
155 }
156
157 perf_output_get_handle(handle);
158
159 do {
160 tail = READ_ONCE(rb->user_page->data_tail);
161 offset = head = local_read(&rb->head);
162 if (!rb->overwrite) {
163 if (unlikely(!ring_buffer_has_space(head, tail,
164 perf_data_size(rb),
165 size, backward)))
166 goto fail;
167 }
168
169 /*
170 * The above forms a control dependency barrier separating the
171 * @tail load above from the data stores below. Since the @tail
172 * load is required to compute the branch to fail below.
173 *
174 * A, matches D; the full memory barrier userspace SHOULD issue
175 * after reading the data and before storing the new tail
176 * position.
177 *
178 * See perf_output_put_handle().
179 */
180
181 if (!backward)
182 head += size;
183 else
184 head -= size;
185 } while (local_cmpxchg(&rb->head, offset, head) != offset);
186
187 if (backward) {
188 offset = head;
189 head = (u64)(-head);
190 }
191
192 /*
193 * We rely on the implied barrier() by local_cmpxchg() to ensure
194 * none of the data stores below can be lifted up by the compiler.
195 */
196
197 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
198 local_add(rb->watermark, &rb->wakeup);
199
200 page_shift = PAGE_SHIFT + page_order(rb);
201
202 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
203 offset &= (1UL << page_shift) - 1;
204 handle->addr = rb->data_pages[handle->page] + offset;
205 handle->size = (1UL << page_shift) - offset;
206
207 if (unlikely(have_lost)) {
208 struct perf_sample_data sample_data;
209
210 lost_event.header.size = sizeof(lost_event);
211 lost_event.header.type = PERF_RECORD_LOST;
212 lost_event.header.misc = 0;
213 lost_event.id = event->id;
214 lost_event.lost = local_xchg(&rb->lost, 0);
215
216 perf_event_header__init_id(&lost_event.header,
217 &sample_data, event);
218 perf_output_put(handle, lost_event);
219 perf_event__output_id_sample(event, handle, &sample_data);
220 }
221
222 return 0;
223
224 fail:
225 local_inc(&rb->lost);
226 perf_output_put_handle(handle);
227 out:
228 rcu_read_unlock();
229
230 return -ENOSPC;
231 }
232
233 int perf_output_begin_forward(struct perf_output_handle *handle,
234 struct perf_event *event, unsigned int size)
235 {
236 return __perf_output_begin(handle, event, size, false);
237 }
238
239 int perf_output_begin_backward(struct perf_output_handle *handle,
240 struct perf_event *event, unsigned int size)
241 {
242 return __perf_output_begin(handle, event, size, true);
243 }
244
245 int perf_output_begin(struct perf_output_handle *handle,
246 struct perf_event *event, unsigned int size)
247 {
248
249 return __perf_output_begin(handle, event, size,
250 unlikely(is_write_backward(event)));
251 }
252
253 unsigned int perf_output_copy(struct perf_output_handle *handle,
254 const void *buf, unsigned int len)
255 {
256 return __output_copy(handle, buf, len);
257 }
258
259 unsigned int perf_output_skip(struct perf_output_handle *handle,
260 unsigned int len)
261 {
262 return __output_skip(handle, NULL, len);
263 }
264
265 void perf_output_end(struct perf_output_handle *handle)
266 {
267 perf_output_put_handle(handle);
268 rcu_read_unlock();
269 }
270
271 static void
272 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
273 {
274 long max_size = perf_data_size(rb);
275
276 if (watermark)
277 rb->watermark = min(max_size, watermark);
278
279 if (!rb->watermark)
280 rb->watermark = max_size / 2;
281
282 if (flags & RING_BUFFER_WRITABLE)
283 rb->overwrite = 0;
284 else
285 rb->overwrite = 1;
286
287 atomic_set(&rb->refcount, 1);
288
289 INIT_LIST_HEAD(&rb->event_list);
290 spin_lock_init(&rb->event_lock);
291
292 /*
293 * perf_output_begin() only checks rb->paused, therefore
294 * rb->paused must be true if we have no pages for output.
295 */
296 if (!rb->nr_pages)
297 rb->paused = 1;
298 }
299
300 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
301 {
302 /*
303 * OVERWRITE is determined by perf_aux_output_end() and can't
304 * be passed in directly.
305 */
306 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
307 return;
308
309 handle->aux_flags |= flags;
310 }
311 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
312
313 /*
314 * This is called before hardware starts writing to the AUX area to
315 * obtain an output handle and make sure there's room in the buffer.
316 * When the capture completes, call perf_aux_output_end() to commit
317 * the recorded data to the buffer.
318 *
319 * The ordering is similar to that of perf_output_{begin,end}, with
320 * the exception of (B), which should be taken care of by the pmu
321 * driver, since ordering rules will differ depending on hardware.
322 *
323 * Call this from pmu::start(); see the comment in perf_aux_output_end()
324 * about its use in pmu callbacks. Both can also be called from the PMI
325 * handler if needed.
326 */
327 void *perf_aux_output_begin(struct perf_output_handle *handle,
328 struct perf_event *event)
329 {
330 struct perf_event *output_event = event;
331 unsigned long aux_head, aux_tail;
332 struct ring_buffer *rb;
333
334 if (output_event->parent)
335 output_event = output_event->parent;
336
337 /*
338 * Since this will typically be open across pmu::add/pmu::del, we
339 * grab ring_buffer's refcount instead of holding rcu read lock
340 * to make sure it doesn't disappear under us.
341 */
342 rb = ring_buffer_get(output_event);
343 if (!rb)
344 return NULL;
345
346 if (!rb_has_aux(rb))
347 goto err;
348
349 /*
350 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
351 * about to get freed, so we leave immediately.
352 *
353 * Checking rb::aux_mmap_count and rb::refcount has to be done in
354 * the same order, see perf_mmap_close. Otherwise we end up freeing
355 * aux pages in this path, which is a bug, because in_atomic().
356 */
357 if (!atomic_read(&rb->aux_mmap_count))
358 goto err;
359
360 if (!atomic_inc_not_zero(&rb->aux_refcount))
361 goto err;
362
363 /*
364 * Nesting is not supported for AUX area, make sure nested
365 * writers are caught early
366 */
367 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
368 goto err_put;
369
370 aux_head = rb->aux_head;
371
372 handle->rb = rb;
373 handle->event = event;
374 handle->head = aux_head;
375 handle->size = 0;
376 handle->aux_flags = 0;
377
378 /*
379 * In overwrite mode, AUX data stores do not depend on aux_tail,
380 * therefore (A) control dependency barrier does not exist. The
381 * (B) <-> (C) ordering is still observed by the pmu driver.
382 */
383 if (!rb->aux_overwrite) {
384 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
385 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
386 if (aux_head - aux_tail < perf_aux_size(rb))
387 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
388
389 /*
390 * handle->size computation depends on aux_tail load; this forms a
391 * control dependency barrier separating aux_tail load from aux data
392 * store that will be enabled on successful return
393 */
394 if (!handle->size) { /* A, matches D */
395 event->pending_disable = 1;
396 perf_output_wakeup(handle);
397 local_set(&rb->aux_nest, 0);
398 goto err_put;
399 }
400 }
401
402 return handle->rb->aux_priv;
403
404 err_put:
405 /* can't be last */
406 rb_free_aux(rb);
407
408 err:
409 ring_buffer_put(rb);
410 handle->event = NULL;
411
412 return NULL;
413 }
414
415 static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
416 {
417 if (rb->aux_overwrite)
418 return false;
419
420 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
421 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
422 return true;
423 }
424
425 return false;
426 }
427
428 /*
429 * Commit the data written by hardware into the ring buffer by adjusting
430 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
431 * pmu driver's responsibility to observe ordering rules of the hardware,
432 * so that all the data is externally visible before this is called.
433 *
434 * Note: this has to be called from pmu::stop() callback, as the assumption
435 * of the AUX buffer management code is that after pmu::stop(), the AUX
436 * transaction must be stopped and therefore drop the AUX reference count.
437 */
438 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
439 {
440 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
441 struct ring_buffer *rb = handle->rb;
442 unsigned long aux_head;
443
444 /* in overwrite mode, driver provides aux_head via handle */
445 if (rb->aux_overwrite) {
446 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
447
448 aux_head = handle->head;
449 rb->aux_head = aux_head;
450 } else {
451 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
452
453 aux_head = rb->aux_head;
454 rb->aux_head += size;
455 }
456
457 if (size || handle->aux_flags) {
458 /*
459 * Only send RECORD_AUX if we have something useful to communicate
460 */
461
462 perf_event_aux_event(handle->event, aux_head, size,
463 handle->aux_flags);
464 }
465
466 rb->user_page->aux_head = rb->aux_head;
467 if (rb_need_aux_wakeup(rb))
468 wakeup = true;
469
470 if (wakeup) {
471 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
472 handle->event->pending_disable = 1;
473 perf_output_wakeup(handle);
474 }
475
476 handle->event = NULL;
477
478 local_set(&rb->aux_nest, 0);
479 /* can't be last */
480 rb_free_aux(rb);
481 ring_buffer_put(rb);
482 }
483
484 /*
485 * Skip over a given number of bytes in the AUX buffer, due to, for example,
486 * hardware's alignment constraints.
487 */
488 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
489 {
490 struct ring_buffer *rb = handle->rb;
491
492 if (size > handle->size)
493 return -ENOSPC;
494
495 rb->aux_head += size;
496
497 rb->user_page->aux_head = rb->aux_head;
498 if (rb_need_aux_wakeup(rb)) {
499 perf_output_wakeup(handle);
500 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
501 }
502
503 handle->head = rb->aux_head;
504 handle->size -= size;
505
506 return 0;
507 }
508
509 void *perf_get_aux(struct perf_output_handle *handle)
510 {
511 /* this is only valid between perf_aux_output_begin and *_end */
512 if (!handle->event)
513 return NULL;
514
515 return handle->rb->aux_priv;
516 }
517
518 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
519
520 static struct page *rb_alloc_aux_page(int node, int order)
521 {
522 struct page *page;
523
524 if (order > MAX_ORDER)
525 order = MAX_ORDER;
526
527 do {
528 page = alloc_pages_node(node, PERF_AUX_GFP, order);
529 } while (!page && order--);
530
531 if (page && order) {
532 /*
533 * Communicate the allocation size to the driver:
534 * if we managed to secure a high-order allocation,
535 * set its first page's private to this order;
536 * !PagePrivate(page) means it's just a normal page.
537 */
538 split_page(page, order);
539 SetPagePrivate(page);
540 set_page_private(page, order);
541 }
542
543 return page;
544 }
545
546 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
547 {
548 struct page *page = virt_to_page(rb->aux_pages[idx]);
549
550 ClearPagePrivate(page);
551 page->mapping = NULL;
552 __free_page(page);
553 }
554
555 static void __rb_free_aux(struct ring_buffer *rb)
556 {
557 int pg;
558
559 /*
560 * Should never happen, the last reference should be dropped from
561 * perf_mmap_close() path, which first stops aux transactions (which
562 * in turn are the atomic holders of aux_refcount) and then does the
563 * last rb_free_aux().
564 */
565 WARN_ON_ONCE(in_atomic());
566
567 if (rb->aux_priv) {
568 rb->free_aux(rb->aux_priv);
569 rb->free_aux = NULL;
570 rb->aux_priv = NULL;
571 }
572
573 if (rb->aux_nr_pages) {
574 for (pg = 0; pg < rb->aux_nr_pages; pg++)
575 rb_free_aux_page(rb, pg);
576
577 kfree(rb->aux_pages);
578 rb->aux_nr_pages = 0;
579 }
580 }
581
582 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
583 pgoff_t pgoff, int nr_pages, long watermark, int flags)
584 {
585 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
586 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
587 int ret = -ENOMEM, max_order = 0;
588
589 if (!has_aux(event))
590 return -EOPNOTSUPP;
591
592 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
593 /*
594 * We need to start with the max_order that fits in nr_pages,
595 * not the other way around, hence ilog2() and not get_order.
596 */
597 max_order = ilog2(nr_pages);
598
599 /*
600 * PMU requests more than one contiguous chunks of memory
601 * for SW double buffering
602 */
603 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
604 !overwrite) {
605 if (!max_order)
606 return -EINVAL;
607
608 max_order--;
609 }
610 }
611
612 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
613 if (!rb->aux_pages)
614 return -ENOMEM;
615
616 rb->free_aux = event->pmu->free_aux;
617 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
618 struct page *page;
619 int last, order;
620
621 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
622 page = rb_alloc_aux_page(node, order);
623 if (!page)
624 goto out;
625
626 for (last = rb->aux_nr_pages + (1 << page_private(page));
627 last > rb->aux_nr_pages; rb->aux_nr_pages++)
628 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
629 }
630
631 /*
632 * In overwrite mode, PMUs that don't support SG may not handle more
633 * than one contiguous allocation, since they rely on PMI to do double
634 * buffering. In this case, the entire buffer has to be one contiguous
635 * chunk.
636 */
637 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
638 overwrite) {
639 struct page *page = virt_to_page(rb->aux_pages[0]);
640
641 if (page_private(page) != max_order)
642 goto out;
643 }
644
645 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
646 overwrite);
647 if (!rb->aux_priv)
648 goto out;
649
650 ret = 0;
651
652 /*
653 * aux_pages (and pmu driver's private data, aux_priv) will be
654 * referenced in both producer's and consumer's contexts, thus
655 * we keep a refcount here to make sure either of the two can
656 * reference them safely.
657 */
658 atomic_set(&rb->aux_refcount, 1);
659
660 rb->aux_overwrite = overwrite;
661 rb->aux_watermark = watermark;
662
663 if (!rb->aux_watermark && !rb->aux_overwrite)
664 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
665
666 out:
667 if (!ret)
668 rb->aux_pgoff = pgoff;
669 else
670 __rb_free_aux(rb);
671
672 return ret;
673 }
674
675 void rb_free_aux(struct ring_buffer *rb)
676 {
677 if (atomic_dec_and_test(&rb->aux_refcount))
678 __rb_free_aux(rb);
679 }
680
681 #ifndef CONFIG_PERF_USE_VMALLOC
682
683 /*
684 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
685 */
686
687 static struct page *
688 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
689 {
690 if (pgoff > rb->nr_pages)
691 return NULL;
692
693 if (pgoff == 0)
694 return virt_to_page(rb->user_page);
695
696 return virt_to_page(rb->data_pages[pgoff - 1]);
697 }
698
699 static void *perf_mmap_alloc_page(int cpu)
700 {
701 struct page *page;
702 int node;
703
704 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
705 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
706 if (!page)
707 return NULL;
708
709 return page_address(page);
710 }
711
712 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
713 {
714 struct ring_buffer *rb;
715 unsigned long size;
716 int i;
717
718 size = sizeof(struct ring_buffer);
719 size += nr_pages * sizeof(void *);
720
721 rb = kzalloc(size, GFP_KERNEL);
722 if (!rb)
723 goto fail;
724
725 rb->user_page = perf_mmap_alloc_page(cpu);
726 if (!rb->user_page)
727 goto fail_user_page;
728
729 for (i = 0; i < nr_pages; i++) {
730 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
731 if (!rb->data_pages[i])
732 goto fail_data_pages;
733 }
734
735 rb->nr_pages = nr_pages;
736
737 ring_buffer_init(rb, watermark, flags);
738
739 return rb;
740
741 fail_data_pages:
742 for (i--; i >= 0; i--)
743 free_page((unsigned long)rb->data_pages[i]);
744
745 free_page((unsigned long)rb->user_page);
746
747 fail_user_page:
748 kfree(rb);
749
750 fail:
751 return NULL;
752 }
753
754 static void perf_mmap_free_page(unsigned long addr)
755 {
756 struct page *page = virt_to_page((void *)addr);
757
758 page->mapping = NULL;
759 __free_page(page);
760 }
761
762 void rb_free(struct ring_buffer *rb)
763 {
764 int i;
765
766 perf_mmap_free_page((unsigned long)rb->user_page);
767 for (i = 0; i < rb->nr_pages; i++)
768 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
769 kfree(rb);
770 }
771
772 #else
773 static int data_page_nr(struct ring_buffer *rb)
774 {
775 return rb->nr_pages << page_order(rb);
776 }
777
778 static struct page *
779 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
780 {
781 /* The '>' counts in the user page. */
782 if (pgoff > data_page_nr(rb))
783 return NULL;
784
785 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
786 }
787
788 static void perf_mmap_unmark_page(void *addr)
789 {
790 struct page *page = vmalloc_to_page(addr);
791
792 page->mapping = NULL;
793 }
794
795 static void rb_free_work(struct work_struct *work)
796 {
797 struct ring_buffer *rb;
798 void *base;
799 int i, nr;
800
801 rb = container_of(work, struct ring_buffer, work);
802 nr = data_page_nr(rb);
803
804 base = rb->user_page;
805 /* The '<=' counts in the user page. */
806 for (i = 0; i <= nr; i++)
807 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
808
809 vfree(base);
810 kfree(rb);
811 }
812
813 void rb_free(struct ring_buffer *rb)
814 {
815 schedule_work(&rb->work);
816 }
817
818 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
819 {
820 struct ring_buffer *rb;
821 unsigned long size;
822 void *all_buf;
823
824 size = sizeof(struct ring_buffer);
825 size += sizeof(void *);
826
827 rb = kzalloc(size, GFP_KERNEL);
828 if (!rb)
829 goto fail;
830
831 INIT_WORK(&rb->work, rb_free_work);
832
833 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
834 if (!all_buf)
835 goto fail_all_buf;
836
837 rb->user_page = all_buf;
838 rb->data_pages[0] = all_buf + PAGE_SIZE;
839 if (nr_pages) {
840 rb->nr_pages = 1;
841 rb->page_order = ilog2(nr_pages);
842 }
843
844 ring_buffer_init(rb, watermark, flags);
845
846 return rb;
847
848 fail_all_buf:
849 kfree(rb);
850
851 fail:
852 return NULL;
853 }
854
855 #endif
856
857 struct page *
858 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
859 {
860 if (rb->aux_nr_pages) {
861 /* above AUX space */
862 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
863 return NULL;
864
865 /* AUX space */
866 if (pgoff >= rb->aux_pgoff)
867 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
868 }
869
870 return __perf_mmap_to_page(rb, pgoff);
871 }