]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/ring_buffer.c
ring-buffer: replace most bug ons with warn on and disable buffer
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
182e9f5f
SR
19#include "trace.h"
20
7a8e76a3
SR
21/* Up this if you want to test the TIME_EXTENTS and normalization */
22#define DEBUG_SHIFT 0
23
24/* FIXME!!! */
25u64 ring_buffer_time_stamp(int cpu)
26{
27 /* shift to debug/test normalization and TIME_EXTENTS */
28 return sched_clock() << DEBUG_SHIFT;
29}
30
31void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
32{
33 /* Just stupid testing the normalize function and deltas */
34 *ts >>= DEBUG_SHIFT;
35}
36
37#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
38#define RB_ALIGNMENT_SHIFT 2
39#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
40#define RB_MAX_SMALL_DATA 28
41
42enum {
43 RB_LEN_TIME_EXTEND = 8,
44 RB_LEN_TIME_STAMP = 16,
45};
46
47/* inline for ring buffer fast paths */
48static inline unsigned
49rb_event_length(struct ring_buffer_event *event)
50{
51 unsigned length;
52
53 switch (event->type) {
54 case RINGBUF_TYPE_PADDING:
55 /* undefined */
56 return -1;
57
58 case RINGBUF_TYPE_TIME_EXTEND:
59 return RB_LEN_TIME_EXTEND;
60
61 case RINGBUF_TYPE_TIME_STAMP:
62 return RB_LEN_TIME_STAMP;
63
64 case RINGBUF_TYPE_DATA:
65 if (event->len)
66 length = event->len << RB_ALIGNMENT_SHIFT;
67 else
68 length = event->array[0];
69 return length + RB_EVNT_HDR_SIZE;
70 default:
71 BUG();
72 }
73 /* not hit */
74 return 0;
75}
76
77/**
78 * ring_buffer_event_length - return the length of the event
79 * @event: the event to get the length of
80 */
81unsigned ring_buffer_event_length(struct ring_buffer_event *event)
82{
83 return rb_event_length(event);
84}
85
86/* inline for ring buffer fast paths */
87static inline void *
88rb_event_data(struct ring_buffer_event *event)
89{
90 BUG_ON(event->type != RINGBUF_TYPE_DATA);
91 /* If length is in len field, then array[0] has the data */
92 if (event->len)
93 return (void *)&event->array[0];
94 /* Otherwise length is in array[0] and array[1] has the data */
95 return (void *)&event->array[1];
96}
97
98/**
99 * ring_buffer_event_data - return the data of the event
100 * @event: the event to get the data from
101 */
102void *ring_buffer_event_data(struct ring_buffer_event *event)
103{
104 return rb_event_data(event);
105}
106
107#define for_each_buffer_cpu(buffer, cpu) \
108 for_each_cpu_mask(cpu, buffer->cpumask)
109
110#define TS_SHIFT 27
111#define TS_MASK ((1ULL << TS_SHIFT) - 1)
112#define TS_DELTA_TEST (~TS_MASK)
113
114/*
115 * This hack stolen from mm/slob.c.
116 * We can store per page timing information in the page frame of the page.
117 * Thanks to Peter Zijlstra for suggesting this idea.
118 */
119struct buffer_page {
e4c2ce82 120 u64 time_stamp; /* page time stamp */
bf41a158
SR
121 local_t write; /* index for next write */
122 local_t commit; /* write commited index */
6f807acd 123 unsigned read; /* index for next read */
e4c2ce82
SR
124 struct list_head list; /* list of free pages */
125 void *page; /* Actual data page */
7a8e76a3
SR
126};
127
ed56829c
SR
128/*
129 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
130 * this issue out.
131 */
132static inline void free_buffer_page(struct buffer_page *bpage)
133{
e4c2ce82 134 if (bpage->page)
6ae2a076 135 free_page((unsigned long)bpage->page);
e4c2ce82 136 kfree(bpage);
ed56829c
SR
137}
138
7a8e76a3
SR
139/*
140 * We need to fit the time_stamp delta into 27 bits.
141 */
142static inline int test_time_stamp(u64 delta)
143{
144 if (delta & TS_DELTA_TEST)
145 return 1;
146 return 0;
147}
148
149#define BUF_PAGE_SIZE PAGE_SIZE
150
151/*
152 * head_page == tail_page && head == tail then buffer is empty.
153 */
154struct ring_buffer_per_cpu {
155 int cpu;
156 struct ring_buffer *buffer;
3e03fb7f 157 raw_spinlock_t lock;
7a8e76a3
SR
158 struct lock_class_key lock_key;
159 struct list_head pages;
6f807acd
SR
160 struct buffer_page *head_page; /* read from head */
161 struct buffer_page *tail_page; /* write to tail */
bf41a158 162 struct buffer_page *commit_page; /* commited pages */
d769041f 163 struct buffer_page *reader_page;
7a8e76a3
SR
164 unsigned long overrun;
165 unsigned long entries;
166 u64 write_stamp;
167 u64 read_stamp;
168 atomic_t record_disabled;
169};
170
171struct ring_buffer {
172 unsigned long size;
173 unsigned pages;
174 unsigned flags;
175 int cpus;
176 cpumask_t cpumask;
177 atomic_t record_disabled;
178
179 struct mutex mutex;
180
181 struct ring_buffer_per_cpu **buffers;
182};
183
184struct ring_buffer_iter {
185 struct ring_buffer_per_cpu *cpu_buffer;
186 unsigned long head;
187 struct buffer_page *head_page;
188 u64 read_stamp;
189};
190
f536aafc 191/* buffer may be either ring_buffer or ring_buffer_per_cpu */
bf41a158
SR
192#define RB_WARN_ON(buffer, cond) \
193 do { \
194 if (unlikely(cond)) { \
195 atomic_inc(&buffer->record_disabled); \
196 WARN_ON(1); \
197 } \
198 } while (0)
199
200#define RB_WARN_ON_RET(buffer, cond) \
f536aafc
SR
201 do { \
202 if (unlikely(cond)) { \
203 atomic_inc(&buffer->record_disabled); \
204 WARN_ON(1); \
205 return; \
206 } \
207 } while (0)
208
209#define RB_WARN_ON_RET_INT(buffer, cond) \
bf41a158
SR
210 do { \
211 if (unlikely(cond)) { \
212 atomic_inc(&buffer->record_disabled); \
213 WARN_ON(1); \
214 return -1; \
215 } \
216 } while (0)
217
f536aafc
SR
218#define RB_WARN_ON_RET_NULL(buffer, cond) \
219 do { \
220 if (unlikely(cond)) { \
221 atomic_inc(&buffer->record_disabled); \
222 WARN_ON(1); \
223 return NULL; \
224 } \
225 } while (0)
226
bf41a158
SR
227#define RB_WARN_ON_ONCE(buffer, cond) \
228 do { \
229 static int once; \
230 if (unlikely(cond) && !once) { \
231 once++; \
232 atomic_inc(&buffer->record_disabled); \
233 WARN_ON(1); \
234 } \
235 } while (0)
7a8e76a3 236
f536aafc
SR
237/* buffer must be ring_buffer not per_cpu */
238#define RB_WARN_ON_UNLOCK(buffer, cond) \
239 do { \
240 if (unlikely(cond)) { \
241 mutex_unlock(&buffer->mutex); \
242 atomic_inc(&buffer->record_disabled); \
243 WARN_ON(1); \
244 return -1; \
245 } \
246 } while (0)
247
7a8e76a3
SR
248/**
249 * check_pages - integrity check of buffer pages
250 * @cpu_buffer: CPU buffer with pages to test
251 *
252 * As a safty measure we check to make sure the data pages have not
253 * been corrupted.
254 */
255static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
256{
257 struct list_head *head = &cpu_buffer->pages;
258 struct buffer_page *page, *tmp;
259
f536aafc
SR
260 RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head);
261 RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head);
7a8e76a3
SR
262
263 list_for_each_entry_safe(page, tmp, head, list) {
f536aafc 264 RB_WARN_ON_RET_INT(cpu_buffer,
bf41a158 265 page->list.next->prev != &page->list);
f536aafc 266 RB_WARN_ON_RET_INT(cpu_buffer,
bf41a158 267 page->list.prev->next != &page->list);
7a8e76a3
SR
268 }
269
270 return 0;
271}
272
7a8e76a3
SR
273static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
274 unsigned nr_pages)
275{
276 struct list_head *head = &cpu_buffer->pages;
277 struct buffer_page *page, *tmp;
278 unsigned long addr;
279 LIST_HEAD(pages);
280 unsigned i;
281
282 for (i = 0; i < nr_pages; i++) {
e4c2ce82 283 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
aa1e0e3b 284 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
e4c2ce82
SR
285 if (!page)
286 goto free_pages;
287 list_add(&page->list, &pages);
288
7a8e76a3
SR
289 addr = __get_free_page(GFP_KERNEL);
290 if (!addr)
291 goto free_pages;
e4c2ce82 292 page->page = (void *)addr;
7a8e76a3
SR
293 }
294
295 list_splice(&pages, head);
296
297 rb_check_pages(cpu_buffer);
298
299 return 0;
300
301 free_pages:
302 list_for_each_entry_safe(page, tmp, &pages, list) {
303 list_del_init(&page->list);
ed56829c 304 free_buffer_page(page);
7a8e76a3
SR
305 }
306 return -ENOMEM;
307}
308
309static struct ring_buffer_per_cpu *
310rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
311{
312 struct ring_buffer_per_cpu *cpu_buffer;
e4c2ce82 313 struct buffer_page *page;
d769041f 314 unsigned long addr;
7a8e76a3
SR
315 int ret;
316
317 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
318 GFP_KERNEL, cpu_to_node(cpu));
319 if (!cpu_buffer)
320 return NULL;
321
322 cpu_buffer->cpu = cpu;
323 cpu_buffer->buffer = buffer;
3e03fb7f 324 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
7a8e76a3
SR
325 INIT_LIST_HEAD(&cpu_buffer->pages);
326
e4c2ce82
SR
327 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
328 GFP_KERNEL, cpu_to_node(cpu));
329 if (!page)
330 goto fail_free_buffer;
331
332 cpu_buffer->reader_page = page;
d769041f
SR
333 addr = __get_free_page(GFP_KERNEL);
334 if (!addr)
e4c2ce82
SR
335 goto fail_free_reader;
336 page->page = (void *)addr;
337
d769041f 338 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
d769041f 339
7a8e76a3
SR
340 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
341 if (ret < 0)
d769041f 342 goto fail_free_reader;
7a8e76a3
SR
343
344 cpu_buffer->head_page
345 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158 346 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3
SR
347
348 return cpu_buffer;
349
d769041f
SR
350 fail_free_reader:
351 free_buffer_page(cpu_buffer->reader_page);
352
7a8e76a3
SR
353 fail_free_buffer:
354 kfree(cpu_buffer);
355 return NULL;
356}
357
358static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
359{
360 struct list_head *head = &cpu_buffer->pages;
361 struct buffer_page *page, *tmp;
362
d769041f
SR
363 list_del_init(&cpu_buffer->reader_page->list);
364 free_buffer_page(cpu_buffer->reader_page);
365
7a8e76a3
SR
366 list_for_each_entry_safe(page, tmp, head, list) {
367 list_del_init(&page->list);
ed56829c 368 free_buffer_page(page);
7a8e76a3
SR
369 }
370 kfree(cpu_buffer);
371}
372
a7b13743
SR
373/*
374 * Causes compile errors if the struct buffer_page gets bigger
375 * than the struct page.
376 */
377extern int ring_buffer_page_too_big(void);
378
7a8e76a3
SR
379/**
380 * ring_buffer_alloc - allocate a new ring_buffer
381 * @size: the size in bytes that is needed.
382 * @flags: attributes to set for the ring buffer.
383 *
384 * Currently the only flag that is available is the RB_FL_OVERWRITE
385 * flag. This flag means that the buffer will overwrite old data
386 * when the buffer wraps. If this flag is not set, the buffer will
387 * drop data when the tail hits the head.
388 */
389struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
390{
391 struct ring_buffer *buffer;
392 int bsize;
393 int cpu;
394
a7b13743
SR
395 /* Paranoid! Optimizes out when all is well */
396 if (sizeof(struct buffer_page) > sizeof(struct page))
397 ring_buffer_page_too_big();
398
399
7a8e76a3
SR
400 /* keep it in its own cache line */
401 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
402 GFP_KERNEL);
403 if (!buffer)
404 return NULL;
405
406 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
407 buffer->flags = flags;
408
409 /* need at least two pages */
410 if (buffer->pages == 1)
411 buffer->pages++;
412
413 buffer->cpumask = cpu_possible_map;
414 buffer->cpus = nr_cpu_ids;
415
416 bsize = sizeof(void *) * nr_cpu_ids;
417 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
418 GFP_KERNEL);
419 if (!buffer->buffers)
420 goto fail_free_buffer;
421
422 for_each_buffer_cpu(buffer, cpu) {
423 buffer->buffers[cpu] =
424 rb_allocate_cpu_buffer(buffer, cpu);
425 if (!buffer->buffers[cpu])
426 goto fail_free_buffers;
427 }
428
429 mutex_init(&buffer->mutex);
430
431 return buffer;
432
433 fail_free_buffers:
434 for_each_buffer_cpu(buffer, cpu) {
435 if (buffer->buffers[cpu])
436 rb_free_cpu_buffer(buffer->buffers[cpu]);
437 }
438 kfree(buffer->buffers);
439
440 fail_free_buffer:
441 kfree(buffer);
442 return NULL;
443}
444
445/**
446 * ring_buffer_free - free a ring buffer.
447 * @buffer: the buffer to free.
448 */
449void
450ring_buffer_free(struct ring_buffer *buffer)
451{
452 int cpu;
453
454 for_each_buffer_cpu(buffer, cpu)
455 rb_free_cpu_buffer(buffer->buffers[cpu]);
456
457 kfree(buffer);
458}
459
460static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
461
462static void
463rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
464{
465 struct buffer_page *page;
466 struct list_head *p;
467 unsigned i;
468
469 atomic_inc(&cpu_buffer->record_disabled);
470 synchronize_sched();
471
472 for (i = 0; i < nr_pages; i++) {
f536aafc 473 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
7a8e76a3
SR
474 p = cpu_buffer->pages.next;
475 page = list_entry(p, struct buffer_page, list);
476 list_del_init(&page->list);
ed56829c 477 free_buffer_page(page);
7a8e76a3 478 }
f536aafc 479 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
7a8e76a3
SR
480
481 rb_reset_cpu(cpu_buffer);
482
483 rb_check_pages(cpu_buffer);
484
485 atomic_dec(&cpu_buffer->record_disabled);
486
487}
488
489static void
490rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
491 struct list_head *pages, unsigned nr_pages)
492{
493 struct buffer_page *page;
494 struct list_head *p;
495 unsigned i;
496
497 atomic_inc(&cpu_buffer->record_disabled);
498 synchronize_sched();
499
500 for (i = 0; i < nr_pages; i++) {
f536aafc 501 RB_WARN_ON_RET(cpu_buffer, list_empty(pages));
7a8e76a3
SR
502 p = pages->next;
503 page = list_entry(p, struct buffer_page, list);
504 list_del_init(&page->list);
505 list_add_tail(&page->list, &cpu_buffer->pages);
506 }
507 rb_reset_cpu(cpu_buffer);
508
509 rb_check_pages(cpu_buffer);
510
511 atomic_dec(&cpu_buffer->record_disabled);
512}
513
514/**
515 * ring_buffer_resize - resize the ring buffer
516 * @buffer: the buffer to resize.
517 * @size: the new size.
518 *
519 * The tracer is responsible for making sure that the buffer is
520 * not being used while changing the size.
521 * Note: We may be able to change the above requirement by using
522 * RCU synchronizations.
523 *
524 * Minimum size is 2 * BUF_PAGE_SIZE.
525 *
526 * Returns -1 on failure.
527 */
528int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
529{
530 struct ring_buffer_per_cpu *cpu_buffer;
531 unsigned nr_pages, rm_pages, new_pages;
532 struct buffer_page *page, *tmp;
533 unsigned long buffer_size;
534 unsigned long addr;
535 LIST_HEAD(pages);
536 int i, cpu;
537
538 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
539 size *= BUF_PAGE_SIZE;
540 buffer_size = buffer->pages * BUF_PAGE_SIZE;
541
542 /* we need a minimum of two pages */
543 if (size < BUF_PAGE_SIZE * 2)
544 size = BUF_PAGE_SIZE * 2;
545
546 if (size == buffer_size)
547 return size;
548
549 mutex_lock(&buffer->mutex);
550
551 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
552
553 if (size < buffer_size) {
554
555 /* easy case, just free pages */
f536aafc 556 RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages);
7a8e76a3
SR
557
558 rm_pages = buffer->pages - nr_pages;
559
560 for_each_buffer_cpu(buffer, cpu) {
561 cpu_buffer = buffer->buffers[cpu];
562 rb_remove_pages(cpu_buffer, rm_pages);
563 }
564 goto out;
565 }
566
567 /*
568 * This is a bit more difficult. We only want to add pages
569 * when we can allocate enough for all CPUs. We do this
570 * by allocating all the pages and storing them on a local
571 * link list. If we succeed in our allocation, then we
572 * add these pages to the cpu_buffers. Otherwise we just free
573 * them all and return -ENOMEM;
574 */
f536aafc
SR
575 RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages);
576
7a8e76a3
SR
577 new_pages = nr_pages - buffer->pages;
578
579 for_each_buffer_cpu(buffer, cpu) {
580 for (i = 0; i < new_pages; i++) {
e4c2ce82
SR
581 page = kzalloc_node(ALIGN(sizeof(*page),
582 cache_line_size()),
583 GFP_KERNEL, cpu_to_node(cpu));
584 if (!page)
585 goto free_pages;
586 list_add(&page->list, &pages);
7a8e76a3
SR
587 addr = __get_free_page(GFP_KERNEL);
588 if (!addr)
589 goto free_pages;
e4c2ce82 590 page->page = (void *)addr;
7a8e76a3
SR
591 }
592 }
593
594 for_each_buffer_cpu(buffer, cpu) {
595 cpu_buffer = buffer->buffers[cpu];
596 rb_insert_pages(cpu_buffer, &pages, new_pages);
597 }
598
f536aafc 599 RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages));
7a8e76a3
SR
600
601 out:
602 buffer->pages = nr_pages;
603 mutex_unlock(&buffer->mutex);
604
605 return size;
606
607 free_pages:
608 list_for_each_entry_safe(page, tmp, &pages, list) {
609 list_del_init(&page->list);
ed56829c 610 free_buffer_page(page);
7a8e76a3
SR
611 }
612 return -ENOMEM;
613}
614
7a8e76a3
SR
615static inline int rb_null_event(struct ring_buffer_event *event)
616{
617 return event->type == RINGBUF_TYPE_PADDING;
618}
619
6f807acd 620static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
7a8e76a3 621{
e4c2ce82 622 return page->page + index;
7a8e76a3
SR
623}
624
625static inline struct ring_buffer_event *
d769041f 626rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 627{
6f807acd
SR
628 return __rb_page_index(cpu_buffer->reader_page,
629 cpu_buffer->reader_page->read);
630}
631
632static inline struct ring_buffer_event *
633rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
634{
635 return __rb_page_index(cpu_buffer->head_page,
636 cpu_buffer->head_page->read);
7a8e76a3
SR
637}
638
639static inline struct ring_buffer_event *
640rb_iter_head_event(struct ring_buffer_iter *iter)
641{
6f807acd 642 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
643}
644
bf41a158
SR
645static inline unsigned rb_page_write(struct buffer_page *bpage)
646{
647 return local_read(&bpage->write);
648}
649
650static inline unsigned rb_page_commit(struct buffer_page *bpage)
651{
652 return local_read(&bpage->commit);
653}
654
655/* Size is determined by what has been commited */
656static inline unsigned rb_page_size(struct buffer_page *bpage)
657{
658 return rb_page_commit(bpage);
659}
660
661static inline unsigned
662rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
663{
664 return rb_page_commit(cpu_buffer->commit_page);
665}
666
667static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
668{
669 return rb_page_commit(cpu_buffer->head_page);
670}
671
7a8e76a3
SR
672/*
673 * When the tail hits the head and the buffer is in overwrite mode,
674 * the head jumps to the next page and all content on the previous
675 * page is discarded. But before doing so, we update the overrun
676 * variable of the buffer.
677 */
678static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
679{
680 struct ring_buffer_event *event;
681 unsigned long head;
682
683 for (head = 0; head < rb_head_size(cpu_buffer);
684 head += rb_event_length(event)) {
685
6f807acd 686 event = __rb_page_index(cpu_buffer->head_page, head);
f536aafc 687 RB_WARN_ON_RET(cpu_buffer, rb_null_event(event));
7a8e76a3
SR
688 /* Only count data entries */
689 if (event->type != RINGBUF_TYPE_DATA)
690 continue;
691 cpu_buffer->overrun++;
692 cpu_buffer->entries--;
693 }
694}
695
696static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
697 struct buffer_page **page)
698{
699 struct list_head *p = (*page)->list.next;
700
701 if (p == &cpu_buffer->pages)
702 p = p->next;
703
704 *page = list_entry(p, struct buffer_page, list);
705}
706
bf41a158
SR
707static inline unsigned
708rb_event_index(struct ring_buffer_event *event)
709{
710 unsigned long addr = (unsigned long)event;
711
712 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
713}
714
715static inline int
716rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
717 struct ring_buffer_event *event)
718{
719 unsigned long addr = (unsigned long)event;
720 unsigned long index;
721
722 index = rb_event_index(event);
723 addr &= PAGE_MASK;
724
725 return cpu_buffer->commit_page->page == (void *)addr &&
726 rb_commit_index(cpu_buffer) == index;
727}
728
7a8e76a3 729static inline void
bf41a158
SR
730rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
731 struct ring_buffer_event *event)
7a8e76a3 732{
bf41a158
SR
733 unsigned long addr = (unsigned long)event;
734 unsigned long index;
735
736 index = rb_event_index(event);
737 addr &= PAGE_MASK;
738
739 while (cpu_buffer->commit_page->page != (void *)addr) {
740 RB_WARN_ON(cpu_buffer,
741 cpu_buffer->commit_page == cpu_buffer->tail_page);
742 cpu_buffer->commit_page->commit =
743 cpu_buffer->commit_page->write;
744 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
745 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
746 }
747
748 /* Now set the commit to the event's index */
749 local_set(&cpu_buffer->commit_page->commit, index);
7a8e76a3
SR
750}
751
bf41a158
SR
752static inline void
753rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 754{
bf41a158
SR
755 /*
756 * We only race with interrupts and NMIs on this CPU.
757 * If we own the commit event, then we can commit
758 * all others that interrupted us, since the interruptions
759 * are in stack format (they finish before they come
760 * back to us). This allows us to do a simple loop to
761 * assign the commit to the tail.
762 */
763 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
764 cpu_buffer->commit_page->commit =
765 cpu_buffer->commit_page->write;
766 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
767 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
768 /* add barrier to keep gcc from optimizing too much */
769 barrier();
770 }
771 while (rb_commit_index(cpu_buffer) !=
772 rb_page_write(cpu_buffer->commit_page)) {
773 cpu_buffer->commit_page->commit =
774 cpu_buffer->commit_page->write;
775 barrier();
776 }
7a8e76a3
SR
777}
778
d769041f 779static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 780{
d769041f 781 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
6f807acd 782 cpu_buffer->reader_page->read = 0;
d769041f
SR
783}
784
785static inline void rb_inc_iter(struct ring_buffer_iter *iter)
786{
787 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
788
789 /*
790 * The iterator could be on the reader page (it starts there).
791 * But the head could have moved, since the reader was
792 * found. Check for this case and assign the iterator
793 * to the head page instead of next.
794 */
795 if (iter->head_page == cpu_buffer->reader_page)
796 iter->head_page = cpu_buffer->head_page;
797 else
798 rb_inc_page(cpu_buffer, &iter->head_page);
799
7a8e76a3
SR
800 iter->read_stamp = iter->head_page->time_stamp;
801 iter->head = 0;
802}
803
804/**
805 * ring_buffer_update_event - update event type and data
806 * @event: the even to update
807 * @type: the type of event
808 * @length: the size of the event field in the ring buffer
809 *
810 * Update the type and data fields of the event. The length
811 * is the actual size that is written to the ring buffer,
812 * and with this, we can determine what to place into the
813 * data field.
814 */
815static inline void
816rb_update_event(struct ring_buffer_event *event,
817 unsigned type, unsigned length)
818{
819 event->type = type;
820
821 switch (type) {
822
823 case RINGBUF_TYPE_PADDING:
824 break;
825
826 case RINGBUF_TYPE_TIME_EXTEND:
827 event->len =
828 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
829 >> RB_ALIGNMENT_SHIFT;
830 break;
831
832 case RINGBUF_TYPE_TIME_STAMP:
833 event->len =
834 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
835 >> RB_ALIGNMENT_SHIFT;
836 break;
837
838 case RINGBUF_TYPE_DATA:
839 length -= RB_EVNT_HDR_SIZE;
840 if (length > RB_MAX_SMALL_DATA) {
841 event->len = 0;
842 event->array[0] = length;
843 } else
844 event->len =
845 (length + (RB_ALIGNMENT-1))
846 >> RB_ALIGNMENT_SHIFT;
847 break;
848 default:
849 BUG();
850 }
851}
852
853static inline unsigned rb_calculate_event_length(unsigned length)
854{
855 struct ring_buffer_event event; /* Used only for sizeof array */
856
857 /* zero length can cause confusions */
858 if (!length)
859 length = 1;
860
861 if (length > RB_MAX_SMALL_DATA)
862 length += sizeof(event.array[0]);
863
864 length += RB_EVNT_HDR_SIZE;
865 length = ALIGN(length, RB_ALIGNMENT);
866
867 return length;
868}
869
870static struct ring_buffer_event *
871__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
872 unsigned type, unsigned long length, u64 *ts)
873{
d769041f 874 struct buffer_page *tail_page, *head_page, *reader_page;
bf41a158 875 unsigned long tail, write;
7a8e76a3
SR
876 struct ring_buffer *buffer = cpu_buffer->buffer;
877 struct ring_buffer_event *event;
bf41a158 878 unsigned long flags;
7a8e76a3
SR
879
880 tail_page = cpu_buffer->tail_page;
bf41a158
SR
881 write = local_add_return(length, &tail_page->write);
882 tail = write - length;
7a8e76a3 883
bf41a158
SR
884 /* See if we shot pass the end of this buffer page */
885 if (write > BUF_PAGE_SIZE) {
7a8e76a3
SR
886 struct buffer_page *next_page = tail_page;
887
3e03fb7f
SR
888 local_irq_save(flags);
889 __raw_spin_lock(&cpu_buffer->lock);
bf41a158 890
7a8e76a3
SR
891 rb_inc_page(cpu_buffer, &next_page);
892
d769041f
SR
893 head_page = cpu_buffer->head_page;
894 reader_page = cpu_buffer->reader_page;
895
896 /* we grabbed the lock before incrementing */
bf41a158
SR
897 RB_WARN_ON(cpu_buffer, next_page == reader_page);
898
899 /*
900 * If for some reason, we had an interrupt storm that made
901 * it all the way around the buffer, bail, and warn
902 * about it.
903 */
904 if (unlikely(next_page == cpu_buffer->commit_page)) {
905 WARN_ON_ONCE(1);
906 goto out_unlock;
907 }
d769041f 908
7a8e76a3 909 if (next_page == head_page) {
d769041f 910 if (!(buffer->flags & RB_FL_OVERWRITE)) {
bf41a158
SR
911 /* reset write */
912 if (tail <= BUF_PAGE_SIZE)
913 local_set(&tail_page->write, tail);
914 goto out_unlock;
d769041f 915 }
7a8e76a3 916
bf41a158
SR
917 /* tail_page has not moved yet? */
918 if (tail_page == cpu_buffer->tail_page) {
919 /* count overflows */
920 rb_update_overflow(cpu_buffer);
921
922 rb_inc_page(cpu_buffer, &head_page);
923 cpu_buffer->head_page = head_page;
924 cpu_buffer->head_page->read = 0;
925 }
926 }
7a8e76a3 927
bf41a158
SR
928 /*
929 * If the tail page is still the same as what we think
930 * it is, then it is up to us to update the tail
931 * pointer.
932 */
933 if (tail_page == cpu_buffer->tail_page) {
934 local_set(&next_page->write, 0);
935 local_set(&next_page->commit, 0);
936 cpu_buffer->tail_page = next_page;
937
938 /* reread the time stamp */
939 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
940 cpu_buffer->tail_page->time_stamp = *ts;
7a8e76a3
SR
941 }
942
bf41a158
SR
943 /*
944 * The actual tail page has moved forward.
945 */
946 if (tail < BUF_PAGE_SIZE) {
947 /* Mark the rest of the page with padding */
6f807acd 948 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
949 event->type = RINGBUF_TYPE_PADDING;
950 }
951
bf41a158
SR
952 if (tail <= BUF_PAGE_SIZE)
953 /* Set the write back to the previous setting */
954 local_set(&tail_page->write, tail);
955
956 /*
957 * If this was a commit entry that failed,
958 * increment that too
959 */
960 if (tail_page == cpu_buffer->commit_page &&
961 tail == rb_commit_index(cpu_buffer)) {
962 rb_set_commit_to_write(cpu_buffer);
963 }
964
3e03fb7f
SR
965 __raw_spin_unlock(&cpu_buffer->lock);
966 local_irq_restore(flags);
bf41a158
SR
967
968 /* fail and let the caller try again */
969 return ERR_PTR(-EAGAIN);
7a8e76a3
SR
970 }
971
bf41a158
SR
972 /* We reserved something on the buffer */
973
f536aafc 974 RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE);
7a8e76a3 975
6f807acd 976 event = __rb_page_index(tail_page, tail);
7a8e76a3
SR
977 rb_update_event(event, type, length);
978
bf41a158
SR
979 /*
980 * If this is a commit and the tail is zero, then update
981 * this page's time stamp.
982 */
983 if (!tail && rb_is_commit(cpu_buffer, event))
984 cpu_buffer->commit_page->time_stamp = *ts;
985
7a8e76a3 986 return event;
bf41a158
SR
987
988 out_unlock:
3e03fb7f
SR
989 __raw_spin_unlock(&cpu_buffer->lock);
990 local_irq_restore(flags);
bf41a158 991 return NULL;
7a8e76a3
SR
992}
993
994static int
995rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
996 u64 *ts, u64 *delta)
997{
998 struct ring_buffer_event *event;
999 static int once;
bf41a158 1000 int ret;
7a8e76a3
SR
1001
1002 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1003 printk(KERN_WARNING "Delta way too big! %llu"
1004 " ts=%llu write stamp = %llu\n",
e2862c94
SR
1005 (unsigned long long)*delta,
1006 (unsigned long long)*ts,
1007 (unsigned long long)cpu_buffer->write_stamp);
7a8e76a3
SR
1008 WARN_ON(1);
1009 }
1010
1011 /*
1012 * The delta is too big, we to add a
1013 * new timestamp.
1014 */
1015 event = __rb_reserve_next(cpu_buffer,
1016 RINGBUF_TYPE_TIME_EXTEND,
1017 RB_LEN_TIME_EXTEND,
1018 ts);
1019 if (!event)
bf41a158 1020 return -EBUSY;
7a8e76a3 1021
bf41a158
SR
1022 if (PTR_ERR(event) == -EAGAIN)
1023 return -EAGAIN;
1024
1025 /* Only a commited time event can update the write stamp */
1026 if (rb_is_commit(cpu_buffer, event)) {
1027 /*
1028 * If this is the first on the page, then we need to
1029 * update the page itself, and just put in a zero.
1030 */
1031 if (rb_event_index(event)) {
1032 event->time_delta = *delta & TS_MASK;
1033 event->array[0] = *delta >> TS_SHIFT;
1034 } else {
1035 cpu_buffer->commit_page->time_stamp = *ts;
1036 event->time_delta = 0;
1037 event->array[0] = 0;
1038 }
7a8e76a3 1039 cpu_buffer->write_stamp = *ts;
bf41a158
SR
1040 /* let the caller know this was the commit */
1041 ret = 1;
1042 } else {
1043 /* Darn, this is just wasted space */
1044 event->time_delta = 0;
1045 event->array[0] = 0;
1046 ret = 0;
7a8e76a3
SR
1047 }
1048
bf41a158
SR
1049 *delta = 0;
1050
1051 return ret;
7a8e76a3
SR
1052}
1053
1054static struct ring_buffer_event *
1055rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1056 unsigned type, unsigned long length)
1057{
1058 struct ring_buffer_event *event;
1059 u64 ts, delta;
bf41a158 1060 int commit = 0;
818e3dd3 1061 int nr_loops = 0;
7a8e76a3 1062
bf41a158 1063 again:
818e3dd3
SR
1064 /*
1065 * We allow for interrupts to reenter here and do a trace.
1066 * If one does, it will cause this original code to loop
1067 * back here. Even with heavy interrupts happening, this
1068 * should only happen a few times in a row. If this happens
1069 * 1000 times in a row, there must be either an interrupt
1070 * storm or we have something buggy.
1071 * Bail!
1072 */
1073 if (unlikely(++nr_loops > 1000)) {
1074 RB_WARN_ON(cpu_buffer, 1);
1075 return NULL;
1076 }
1077
7a8e76a3
SR
1078 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1079
bf41a158
SR
1080 /*
1081 * Only the first commit can update the timestamp.
1082 * Yes there is a race here. If an interrupt comes in
1083 * just after the conditional and it traces too, then it
1084 * will also check the deltas. More than one timestamp may
1085 * also be made. But only the entry that did the actual
1086 * commit will be something other than zero.
1087 */
1088 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1089 rb_page_write(cpu_buffer->tail_page) ==
1090 rb_commit_index(cpu_buffer)) {
1091
7a8e76a3
SR
1092 delta = ts - cpu_buffer->write_stamp;
1093
bf41a158
SR
1094 /* make sure this delta is calculated here */
1095 barrier();
1096
1097 /* Did the write stamp get updated already? */
1098 if (unlikely(ts < cpu_buffer->write_stamp))
4143c5cb 1099 delta = 0;
bf41a158 1100
7a8e76a3 1101 if (test_time_stamp(delta)) {
7a8e76a3 1102
bf41a158
SR
1103 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1104
1105 if (commit == -EBUSY)
7a8e76a3 1106 return NULL;
bf41a158
SR
1107
1108 if (commit == -EAGAIN)
1109 goto again;
1110
1111 RB_WARN_ON(cpu_buffer, commit < 0);
7a8e76a3 1112 }
bf41a158
SR
1113 } else
1114 /* Non commits have zero deltas */
7a8e76a3 1115 delta = 0;
7a8e76a3
SR
1116
1117 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
bf41a158
SR
1118 if (PTR_ERR(event) == -EAGAIN)
1119 goto again;
1120
1121 if (!event) {
1122 if (unlikely(commit))
1123 /*
1124 * Ouch! We needed a timestamp and it was commited. But
1125 * we didn't get our event reserved.
1126 */
1127 rb_set_commit_to_write(cpu_buffer);
7a8e76a3 1128 return NULL;
bf41a158 1129 }
7a8e76a3 1130
bf41a158
SR
1131 /*
1132 * If the timestamp was commited, make the commit our entry
1133 * now so that we will update it when needed.
1134 */
1135 if (commit)
1136 rb_set_commit_event(cpu_buffer, event);
1137 else if (!rb_is_commit(cpu_buffer, event))
7a8e76a3
SR
1138 delta = 0;
1139
1140 event->time_delta = delta;
1141
1142 return event;
1143}
1144
bf41a158
SR
1145static DEFINE_PER_CPU(int, rb_need_resched);
1146
7a8e76a3
SR
1147/**
1148 * ring_buffer_lock_reserve - reserve a part of the buffer
1149 * @buffer: the ring buffer to reserve from
1150 * @length: the length of the data to reserve (excluding event header)
1151 * @flags: a pointer to save the interrupt flags
1152 *
1153 * Returns a reseverd event on the ring buffer to copy directly to.
1154 * The user of this interface will need to get the body to write into
1155 * and can use the ring_buffer_event_data() interface.
1156 *
1157 * The length is the length of the data needed, not the event length
1158 * which also includes the event header.
1159 *
1160 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1161 * If NULL is returned, then nothing has been allocated or locked.
1162 */
1163struct ring_buffer_event *
1164ring_buffer_lock_reserve(struct ring_buffer *buffer,
1165 unsigned long length,
1166 unsigned long *flags)
1167{
1168 struct ring_buffer_per_cpu *cpu_buffer;
1169 struct ring_buffer_event *event;
bf41a158 1170 int cpu, resched;
7a8e76a3
SR
1171
1172 if (atomic_read(&buffer->record_disabled))
1173 return NULL;
1174
bf41a158 1175 /* If we are tracing schedule, we don't want to recurse */
182e9f5f 1176 resched = ftrace_preempt_disable();
bf41a158 1177
7a8e76a3
SR
1178 cpu = raw_smp_processor_id();
1179
1180 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1181 goto out;
7a8e76a3
SR
1182
1183 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1184
1185 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 1186 goto out;
7a8e76a3
SR
1187
1188 length = rb_calculate_event_length(length);
1189 if (length > BUF_PAGE_SIZE)
bf41a158 1190 goto out;
7a8e76a3
SR
1191
1192 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1193 if (!event)
d769041f 1194 goto out;
7a8e76a3 1195
bf41a158
SR
1196 /*
1197 * Need to store resched state on this cpu.
1198 * Only the first needs to.
1199 */
1200
1201 if (preempt_count() == 1)
1202 per_cpu(rb_need_resched, cpu) = resched;
1203
7a8e76a3
SR
1204 return event;
1205
d769041f 1206 out:
182e9f5f 1207 ftrace_preempt_enable(resched);
7a8e76a3
SR
1208 return NULL;
1209}
1210
1211static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1212 struct ring_buffer_event *event)
1213{
7a8e76a3 1214 cpu_buffer->entries++;
bf41a158
SR
1215
1216 /* Only process further if we own the commit */
1217 if (!rb_is_commit(cpu_buffer, event))
1218 return;
1219
1220 cpu_buffer->write_stamp += event->time_delta;
1221
1222 rb_set_commit_to_write(cpu_buffer);
7a8e76a3
SR
1223}
1224
1225/**
1226 * ring_buffer_unlock_commit - commit a reserved
1227 * @buffer: The buffer to commit to
1228 * @event: The event pointer to commit.
1229 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1230 *
1231 * This commits the data to the ring buffer, and releases any locks held.
1232 *
1233 * Must be paired with ring_buffer_lock_reserve.
1234 */
1235int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1236 struct ring_buffer_event *event,
1237 unsigned long flags)
1238{
1239 struct ring_buffer_per_cpu *cpu_buffer;
1240 int cpu = raw_smp_processor_id();
1241
1242 cpu_buffer = buffer->buffers[cpu];
1243
7a8e76a3
SR
1244 rb_commit(cpu_buffer, event);
1245
bf41a158
SR
1246 /*
1247 * Only the last preempt count needs to restore preemption.
1248 */
182e9f5f
SR
1249 if (preempt_count() == 1)
1250 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1251 else
bf41a158 1252 preempt_enable_no_resched_notrace();
7a8e76a3
SR
1253
1254 return 0;
1255}
1256
1257/**
1258 * ring_buffer_write - write data to the buffer without reserving
1259 * @buffer: The ring buffer to write to.
1260 * @length: The length of the data being written (excluding the event header)
1261 * @data: The data to write to the buffer.
1262 *
1263 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1264 * one function. If you already have the data to write to the buffer, it
1265 * may be easier to simply call this function.
1266 *
1267 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1268 * and not the length of the event which would hold the header.
1269 */
1270int ring_buffer_write(struct ring_buffer *buffer,
1271 unsigned long length,
1272 void *data)
1273{
1274 struct ring_buffer_per_cpu *cpu_buffer;
1275 struct ring_buffer_event *event;
bf41a158 1276 unsigned long event_length;
7a8e76a3
SR
1277 void *body;
1278 int ret = -EBUSY;
bf41a158 1279 int cpu, resched;
7a8e76a3
SR
1280
1281 if (atomic_read(&buffer->record_disabled))
1282 return -EBUSY;
1283
182e9f5f 1284 resched = ftrace_preempt_disable();
bf41a158 1285
7a8e76a3
SR
1286 cpu = raw_smp_processor_id();
1287
1288 if (!cpu_isset(cpu, buffer->cpumask))
d769041f 1289 goto out;
7a8e76a3
SR
1290
1291 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
1292
1293 if (atomic_read(&cpu_buffer->record_disabled))
1294 goto out;
1295
1296 event_length = rb_calculate_event_length(length);
1297 event = rb_reserve_next_event(cpu_buffer,
1298 RINGBUF_TYPE_DATA, event_length);
1299 if (!event)
1300 goto out;
1301
1302 body = rb_event_data(event);
1303
1304 memcpy(body, data, length);
1305
1306 rb_commit(cpu_buffer, event);
1307
1308 ret = 0;
1309 out:
182e9f5f 1310 ftrace_preempt_enable(resched);
7a8e76a3
SR
1311
1312 return ret;
1313}
1314
bf41a158
SR
1315static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1316{
1317 struct buffer_page *reader = cpu_buffer->reader_page;
1318 struct buffer_page *head = cpu_buffer->head_page;
1319 struct buffer_page *commit = cpu_buffer->commit_page;
1320
1321 return reader->read == rb_page_commit(reader) &&
1322 (commit == reader ||
1323 (commit == head &&
1324 head->read == rb_page_commit(commit)));
1325}
1326
7a8e76a3
SR
1327/**
1328 * ring_buffer_record_disable - stop all writes into the buffer
1329 * @buffer: The ring buffer to stop writes to.
1330 *
1331 * This prevents all writes to the buffer. Any attempt to write
1332 * to the buffer after this will fail and return NULL.
1333 *
1334 * The caller should call synchronize_sched() after this.
1335 */
1336void ring_buffer_record_disable(struct ring_buffer *buffer)
1337{
1338 atomic_inc(&buffer->record_disabled);
1339}
1340
1341/**
1342 * ring_buffer_record_enable - enable writes to the buffer
1343 * @buffer: The ring buffer to enable writes
1344 *
1345 * Note, multiple disables will need the same number of enables
1346 * to truely enable the writing (much like preempt_disable).
1347 */
1348void ring_buffer_record_enable(struct ring_buffer *buffer)
1349{
1350 atomic_dec(&buffer->record_disabled);
1351}
1352
1353/**
1354 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1355 * @buffer: The ring buffer to stop writes to.
1356 * @cpu: The CPU buffer to stop
1357 *
1358 * This prevents all writes to the buffer. Any attempt to write
1359 * to the buffer after this will fail and return NULL.
1360 *
1361 * The caller should call synchronize_sched() after this.
1362 */
1363void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1364{
1365 struct ring_buffer_per_cpu *cpu_buffer;
1366
1367 if (!cpu_isset(cpu, buffer->cpumask))
1368 return;
1369
1370 cpu_buffer = buffer->buffers[cpu];
1371 atomic_inc(&cpu_buffer->record_disabled);
1372}
1373
1374/**
1375 * ring_buffer_record_enable_cpu - enable writes to the buffer
1376 * @buffer: The ring buffer to enable writes
1377 * @cpu: The CPU to enable.
1378 *
1379 * Note, multiple disables will need the same number of enables
1380 * to truely enable the writing (much like preempt_disable).
1381 */
1382void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1383{
1384 struct ring_buffer_per_cpu *cpu_buffer;
1385
1386 if (!cpu_isset(cpu, buffer->cpumask))
1387 return;
1388
1389 cpu_buffer = buffer->buffers[cpu];
1390 atomic_dec(&cpu_buffer->record_disabled);
1391}
1392
1393/**
1394 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1395 * @buffer: The ring buffer
1396 * @cpu: The per CPU buffer to get the entries from.
1397 */
1398unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1399{
1400 struct ring_buffer_per_cpu *cpu_buffer;
1401
1402 if (!cpu_isset(cpu, buffer->cpumask))
1403 return 0;
1404
1405 cpu_buffer = buffer->buffers[cpu];
1406 return cpu_buffer->entries;
1407}
1408
1409/**
1410 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1411 * @buffer: The ring buffer
1412 * @cpu: The per CPU buffer to get the number of overruns from
1413 */
1414unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1415{
1416 struct ring_buffer_per_cpu *cpu_buffer;
1417
1418 if (!cpu_isset(cpu, buffer->cpumask))
1419 return 0;
1420
1421 cpu_buffer = buffer->buffers[cpu];
1422 return cpu_buffer->overrun;
1423}
1424
1425/**
1426 * ring_buffer_entries - get the number of entries in a buffer
1427 * @buffer: The ring buffer
1428 *
1429 * Returns the total number of entries in the ring buffer
1430 * (all CPU entries)
1431 */
1432unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1433{
1434 struct ring_buffer_per_cpu *cpu_buffer;
1435 unsigned long entries = 0;
1436 int cpu;
1437
1438 /* if you care about this being correct, lock the buffer */
1439 for_each_buffer_cpu(buffer, cpu) {
1440 cpu_buffer = buffer->buffers[cpu];
1441 entries += cpu_buffer->entries;
1442 }
1443
1444 return entries;
1445}
1446
1447/**
1448 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1449 * @buffer: The ring buffer
1450 *
1451 * Returns the total number of overruns in the ring buffer
1452 * (all CPU entries)
1453 */
1454unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1455{
1456 struct ring_buffer_per_cpu *cpu_buffer;
1457 unsigned long overruns = 0;
1458 int cpu;
1459
1460 /* if you care about this being correct, lock the buffer */
1461 for_each_buffer_cpu(buffer, cpu) {
1462 cpu_buffer = buffer->buffers[cpu];
1463 overruns += cpu_buffer->overrun;
1464 }
1465
1466 return overruns;
1467}
1468
1469/**
1470 * ring_buffer_iter_reset - reset an iterator
1471 * @iter: The iterator to reset
1472 *
1473 * Resets the iterator, so that it will start from the beginning
1474 * again.
1475 */
1476void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1477{
1478 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1479
d769041f
SR
1480 /* Iterator usage is expected to have record disabled */
1481 if (list_empty(&cpu_buffer->reader_page->list)) {
1482 iter->head_page = cpu_buffer->head_page;
6f807acd 1483 iter->head = cpu_buffer->head_page->read;
d769041f
SR
1484 } else {
1485 iter->head_page = cpu_buffer->reader_page;
6f807acd 1486 iter->head = cpu_buffer->reader_page->read;
d769041f
SR
1487 }
1488 if (iter->head)
1489 iter->read_stamp = cpu_buffer->read_stamp;
1490 else
1491 iter->read_stamp = iter->head_page->time_stamp;
7a8e76a3
SR
1492}
1493
1494/**
1495 * ring_buffer_iter_empty - check if an iterator has no more to read
1496 * @iter: The iterator to check
1497 */
1498int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1499{
1500 struct ring_buffer_per_cpu *cpu_buffer;
1501
1502 cpu_buffer = iter->cpu_buffer;
1503
bf41a158
SR
1504 return iter->head_page == cpu_buffer->commit_page &&
1505 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3
SR
1506}
1507
1508static void
1509rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1510 struct ring_buffer_event *event)
1511{
1512 u64 delta;
1513
1514 switch (event->type) {
1515 case RINGBUF_TYPE_PADDING:
1516 return;
1517
1518 case RINGBUF_TYPE_TIME_EXTEND:
1519 delta = event->array[0];
1520 delta <<= TS_SHIFT;
1521 delta += event->time_delta;
1522 cpu_buffer->read_stamp += delta;
1523 return;
1524
1525 case RINGBUF_TYPE_TIME_STAMP:
1526 /* FIXME: not implemented */
1527 return;
1528
1529 case RINGBUF_TYPE_DATA:
1530 cpu_buffer->read_stamp += event->time_delta;
1531 return;
1532
1533 default:
1534 BUG();
1535 }
1536 return;
1537}
1538
1539static void
1540rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1541 struct ring_buffer_event *event)
1542{
1543 u64 delta;
1544
1545 switch (event->type) {
1546 case RINGBUF_TYPE_PADDING:
1547 return;
1548
1549 case RINGBUF_TYPE_TIME_EXTEND:
1550 delta = event->array[0];
1551 delta <<= TS_SHIFT;
1552 delta += event->time_delta;
1553 iter->read_stamp += delta;
1554 return;
1555
1556 case RINGBUF_TYPE_TIME_STAMP:
1557 /* FIXME: not implemented */
1558 return;
1559
1560 case RINGBUF_TYPE_DATA:
1561 iter->read_stamp += event->time_delta;
1562 return;
1563
1564 default:
1565 BUG();
1566 }
1567 return;
1568}
1569
d769041f
SR
1570static struct buffer_page *
1571rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1572{
d769041f
SR
1573 struct buffer_page *reader = NULL;
1574 unsigned long flags;
818e3dd3 1575 int nr_loops = 0;
d769041f 1576
3e03fb7f
SR
1577 local_irq_save(flags);
1578 __raw_spin_lock(&cpu_buffer->lock);
d769041f
SR
1579
1580 again:
818e3dd3
SR
1581 /*
1582 * This should normally only loop twice. But because the
1583 * start of the reader inserts an empty page, it causes
1584 * a case where we will loop three times. There should be no
1585 * reason to loop four times (that I know of).
1586 */
1587 if (unlikely(++nr_loops > 3)) {
1588 RB_WARN_ON(cpu_buffer, 1);
1589 reader = NULL;
1590 goto out;
1591 }
1592
d769041f
SR
1593 reader = cpu_buffer->reader_page;
1594
1595 /* If there's more to read, return this page */
bf41a158 1596 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
1597 goto out;
1598
1599 /* Never should we have an index greater than the size */
bf41a158
SR
1600 RB_WARN_ON(cpu_buffer,
1601 cpu_buffer->reader_page->read > rb_page_size(reader));
d769041f
SR
1602
1603 /* check if we caught up to the tail */
1604 reader = NULL;
bf41a158 1605 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 1606 goto out;
7a8e76a3
SR
1607
1608 /*
d769041f
SR
1609 * Splice the empty reader page into the list around the head.
1610 * Reset the reader page to size zero.
7a8e76a3 1611 */
7a8e76a3 1612
d769041f
SR
1613 reader = cpu_buffer->head_page;
1614 cpu_buffer->reader_page->list.next = reader->list.next;
1615 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158
SR
1616
1617 local_set(&cpu_buffer->reader_page->write, 0);
1618 local_set(&cpu_buffer->reader_page->commit, 0);
7a8e76a3 1619
d769041f
SR
1620 /* Make the reader page now replace the head */
1621 reader->list.prev->next = &cpu_buffer->reader_page->list;
1622 reader->list.next->prev = &cpu_buffer->reader_page->list;
7a8e76a3
SR
1623
1624 /*
d769041f
SR
1625 * If the tail is on the reader, then we must set the head
1626 * to the inserted page, otherwise we set it one before.
7a8e76a3 1627 */
d769041f 1628 cpu_buffer->head_page = cpu_buffer->reader_page;
7a8e76a3 1629
bf41a158 1630 if (cpu_buffer->commit_page != reader)
d769041f
SR
1631 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1632
1633 /* Finally update the reader page to the new head */
1634 cpu_buffer->reader_page = reader;
1635 rb_reset_reader_page(cpu_buffer);
1636
1637 goto again;
1638
1639 out:
3e03fb7f
SR
1640 __raw_spin_unlock(&cpu_buffer->lock);
1641 local_irq_restore(flags);
d769041f
SR
1642
1643 return reader;
1644}
1645
1646static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1647{
1648 struct ring_buffer_event *event;
1649 struct buffer_page *reader;
1650 unsigned length;
1651
1652 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 1653
d769041f 1654 /* This function should not be called when buffer is empty */
f536aafc 1655 RB_WARN_ON_RET(cpu_buffer, !reader);
7a8e76a3 1656
d769041f
SR
1657 event = rb_reader_event(cpu_buffer);
1658
1659 if (event->type == RINGBUF_TYPE_DATA)
1660 cpu_buffer->entries--;
1661
1662 rb_update_read_stamp(cpu_buffer, event);
1663
1664 length = rb_event_length(event);
6f807acd 1665 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
1666}
1667
1668static void rb_advance_iter(struct ring_buffer_iter *iter)
1669{
1670 struct ring_buffer *buffer;
1671 struct ring_buffer_per_cpu *cpu_buffer;
1672 struct ring_buffer_event *event;
1673 unsigned length;
1674
1675 cpu_buffer = iter->cpu_buffer;
1676 buffer = cpu_buffer->buffer;
1677
1678 /*
1679 * Check if we are at the end of the buffer.
1680 */
bf41a158 1681 if (iter->head >= rb_page_size(iter->head_page)) {
f536aafc
SR
1682 RB_WARN_ON_RET(buffer,
1683 iter->head_page == cpu_buffer->commit_page);
d769041f 1684 rb_inc_iter(iter);
7a8e76a3
SR
1685 return;
1686 }
1687
1688 event = rb_iter_head_event(iter);
1689
1690 length = rb_event_length(event);
1691
1692 /*
1693 * This should not be called to advance the header if we are
1694 * at the tail of the buffer.
1695 */
f536aafc
SR
1696 RB_WARN_ON_RET(cpu_buffer,
1697 (iter->head_page == cpu_buffer->commit_page) &&
1698 (iter->head + length > rb_commit_index(cpu_buffer)));
7a8e76a3
SR
1699
1700 rb_update_iter_read_stamp(iter, event);
1701
1702 iter->head += length;
1703
1704 /* check for end of page padding */
bf41a158
SR
1705 if ((iter->head >= rb_page_size(iter->head_page)) &&
1706 (iter->head_page != cpu_buffer->commit_page))
7a8e76a3
SR
1707 rb_advance_iter(iter);
1708}
1709
1710/**
1711 * ring_buffer_peek - peek at the next event to be read
1712 * @buffer: The ring buffer to read
1713 * @cpu: The cpu to peak at
1714 * @ts: The timestamp counter of this event.
1715 *
1716 * This will return the event that will be read next, but does
1717 * not consume the data.
1718 */
1719struct ring_buffer_event *
1720ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1721{
1722 struct ring_buffer_per_cpu *cpu_buffer;
1723 struct ring_buffer_event *event;
d769041f 1724 struct buffer_page *reader;
818e3dd3 1725 int nr_loops = 0;
7a8e76a3
SR
1726
1727 if (!cpu_isset(cpu, buffer->cpumask))
1728 return NULL;
1729
1730 cpu_buffer = buffer->buffers[cpu];
1731
1732 again:
818e3dd3
SR
1733 /*
1734 * We repeat when a timestamp is encountered. It is possible
1735 * to get multiple timestamps from an interrupt entering just
1736 * as one timestamp is about to be written. The max times
1737 * that this can happen is the number of nested interrupts we
1738 * can have. Nesting 10 deep of interrupts is clearly
1739 * an anomaly.
1740 */
1741 if (unlikely(++nr_loops > 10)) {
1742 RB_WARN_ON(cpu_buffer, 1);
1743 return NULL;
1744 }
1745
d769041f
SR
1746 reader = rb_get_reader_page(cpu_buffer);
1747 if (!reader)
7a8e76a3
SR
1748 return NULL;
1749
d769041f 1750 event = rb_reader_event(cpu_buffer);
7a8e76a3
SR
1751
1752 switch (event->type) {
1753 case RINGBUF_TYPE_PADDING:
bf41a158 1754 RB_WARN_ON(cpu_buffer, 1);
d769041f
SR
1755 rb_advance_reader(cpu_buffer);
1756 return NULL;
7a8e76a3
SR
1757
1758 case RINGBUF_TYPE_TIME_EXTEND:
1759 /* Internal data, OK to advance */
d769041f 1760 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1761 goto again;
1762
1763 case RINGBUF_TYPE_TIME_STAMP:
1764 /* FIXME: not implemented */
d769041f 1765 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1766 goto again;
1767
1768 case RINGBUF_TYPE_DATA:
1769 if (ts) {
1770 *ts = cpu_buffer->read_stamp + event->time_delta;
1771 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1772 }
1773 return event;
1774
1775 default:
1776 BUG();
1777 }
1778
1779 return NULL;
1780}
1781
1782/**
1783 * ring_buffer_iter_peek - peek at the next event to be read
1784 * @iter: The ring buffer iterator
1785 * @ts: The timestamp counter of this event.
1786 *
1787 * This will return the event that will be read next, but does
1788 * not increment the iterator.
1789 */
1790struct ring_buffer_event *
1791ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1792{
1793 struct ring_buffer *buffer;
1794 struct ring_buffer_per_cpu *cpu_buffer;
1795 struct ring_buffer_event *event;
818e3dd3 1796 int nr_loops = 0;
7a8e76a3
SR
1797
1798 if (ring_buffer_iter_empty(iter))
1799 return NULL;
1800
1801 cpu_buffer = iter->cpu_buffer;
1802 buffer = cpu_buffer->buffer;
1803
1804 again:
818e3dd3
SR
1805 /*
1806 * We repeat when a timestamp is encountered. It is possible
1807 * to get multiple timestamps from an interrupt entering just
1808 * as one timestamp is about to be written. The max times
1809 * that this can happen is the number of nested interrupts we
1810 * can have. Nesting 10 deep of interrupts is clearly
1811 * an anomaly.
1812 */
1813 if (unlikely(++nr_loops > 10)) {
1814 RB_WARN_ON(cpu_buffer, 1);
1815 return NULL;
1816 }
1817
7a8e76a3
SR
1818 if (rb_per_cpu_empty(cpu_buffer))
1819 return NULL;
1820
1821 event = rb_iter_head_event(iter);
1822
1823 switch (event->type) {
1824 case RINGBUF_TYPE_PADDING:
d769041f 1825 rb_inc_iter(iter);
7a8e76a3
SR
1826 goto again;
1827
1828 case RINGBUF_TYPE_TIME_EXTEND:
1829 /* Internal data, OK to advance */
1830 rb_advance_iter(iter);
1831 goto again;
1832
1833 case RINGBUF_TYPE_TIME_STAMP:
1834 /* FIXME: not implemented */
1835 rb_advance_iter(iter);
1836 goto again;
1837
1838 case RINGBUF_TYPE_DATA:
1839 if (ts) {
1840 *ts = iter->read_stamp + event->time_delta;
1841 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1842 }
1843 return event;
1844
1845 default:
1846 BUG();
1847 }
1848
1849 return NULL;
1850}
1851
1852/**
1853 * ring_buffer_consume - return an event and consume it
1854 * @buffer: The ring buffer to get the next event from
1855 *
1856 * Returns the next event in the ring buffer, and that event is consumed.
1857 * Meaning, that sequential reads will keep returning a different event,
1858 * and eventually empty the ring buffer if the producer is slower.
1859 */
1860struct ring_buffer_event *
1861ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1862{
1863 struct ring_buffer_per_cpu *cpu_buffer;
1864 struct ring_buffer_event *event;
1865
1866 if (!cpu_isset(cpu, buffer->cpumask))
1867 return NULL;
1868
1869 event = ring_buffer_peek(buffer, cpu, ts);
1870 if (!event)
1871 return NULL;
1872
1873 cpu_buffer = buffer->buffers[cpu];
d769041f 1874 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
1875
1876 return event;
1877}
1878
1879/**
1880 * ring_buffer_read_start - start a non consuming read of the buffer
1881 * @buffer: The ring buffer to read from
1882 * @cpu: The cpu buffer to iterate over
1883 *
1884 * This starts up an iteration through the buffer. It also disables
1885 * the recording to the buffer until the reading is finished.
1886 * This prevents the reading from being corrupted. This is not
1887 * a consuming read, so a producer is not expected.
1888 *
1889 * Must be paired with ring_buffer_finish.
1890 */
1891struct ring_buffer_iter *
1892ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1893{
1894 struct ring_buffer_per_cpu *cpu_buffer;
1895 struct ring_buffer_iter *iter;
d769041f 1896 unsigned long flags;
7a8e76a3
SR
1897
1898 if (!cpu_isset(cpu, buffer->cpumask))
1899 return NULL;
1900
1901 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1902 if (!iter)
1903 return NULL;
1904
1905 cpu_buffer = buffer->buffers[cpu];
1906
1907 iter->cpu_buffer = cpu_buffer;
1908
1909 atomic_inc(&cpu_buffer->record_disabled);
1910 synchronize_sched();
1911
3e03fb7f
SR
1912 local_irq_save(flags);
1913 __raw_spin_lock(&cpu_buffer->lock);
d769041f 1914 ring_buffer_iter_reset(iter);
3e03fb7f
SR
1915 __raw_spin_unlock(&cpu_buffer->lock);
1916 local_irq_restore(flags);
7a8e76a3
SR
1917
1918 return iter;
1919}
1920
1921/**
1922 * ring_buffer_finish - finish reading the iterator of the buffer
1923 * @iter: The iterator retrieved by ring_buffer_start
1924 *
1925 * This re-enables the recording to the buffer, and frees the
1926 * iterator.
1927 */
1928void
1929ring_buffer_read_finish(struct ring_buffer_iter *iter)
1930{
1931 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1932
1933 atomic_dec(&cpu_buffer->record_disabled);
1934 kfree(iter);
1935}
1936
1937/**
1938 * ring_buffer_read - read the next item in the ring buffer by the iterator
1939 * @iter: The ring buffer iterator
1940 * @ts: The time stamp of the event read.
1941 *
1942 * This reads the next event in the ring buffer and increments the iterator.
1943 */
1944struct ring_buffer_event *
1945ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1946{
1947 struct ring_buffer_event *event;
1948
1949 event = ring_buffer_iter_peek(iter, ts);
1950 if (!event)
1951 return NULL;
1952
1953 rb_advance_iter(iter);
1954
1955 return event;
1956}
1957
1958/**
1959 * ring_buffer_size - return the size of the ring buffer (in bytes)
1960 * @buffer: The ring buffer.
1961 */
1962unsigned long ring_buffer_size(struct ring_buffer *buffer)
1963{
1964 return BUF_PAGE_SIZE * buffer->pages;
1965}
1966
1967static void
1968rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1969{
1970 cpu_buffer->head_page
1971 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
bf41a158
SR
1972 local_set(&cpu_buffer->head_page->write, 0);
1973 local_set(&cpu_buffer->head_page->commit, 0);
d769041f 1974
6f807acd 1975 cpu_buffer->head_page->read = 0;
bf41a158
SR
1976
1977 cpu_buffer->tail_page = cpu_buffer->head_page;
1978 cpu_buffer->commit_page = cpu_buffer->head_page;
1979
1980 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1981 local_set(&cpu_buffer->reader_page->write, 0);
1982 local_set(&cpu_buffer->reader_page->commit, 0);
6f807acd 1983 cpu_buffer->reader_page->read = 0;
7a8e76a3 1984
7a8e76a3
SR
1985 cpu_buffer->overrun = 0;
1986 cpu_buffer->entries = 0;
1987}
1988
1989/**
1990 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1991 * @buffer: The ring buffer to reset a per cpu buffer of
1992 * @cpu: The CPU buffer to be reset
1993 */
1994void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1995{
1996 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1997 unsigned long flags;
1998
1999 if (!cpu_isset(cpu, buffer->cpumask))
2000 return;
2001
3e03fb7f
SR
2002 local_irq_save(flags);
2003 __raw_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
2004
2005 rb_reset_cpu(cpu_buffer);
2006
3e03fb7f
SR
2007 __raw_spin_unlock(&cpu_buffer->lock);
2008 local_irq_restore(flags);
7a8e76a3
SR
2009}
2010
2011/**
2012 * ring_buffer_reset - reset a ring buffer
2013 * @buffer: The ring buffer to reset all cpu buffers
2014 */
2015void ring_buffer_reset(struct ring_buffer *buffer)
2016{
7a8e76a3
SR
2017 int cpu;
2018
7a8e76a3 2019 for_each_buffer_cpu(buffer, cpu)
d769041f 2020 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3
SR
2021}
2022
2023/**
2024 * rind_buffer_empty - is the ring buffer empty?
2025 * @buffer: The ring buffer to test
2026 */
2027int ring_buffer_empty(struct ring_buffer *buffer)
2028{
2029 struct ring_buffer_per_cpu *cpu_buffer;
2030 int cpu;
2031
2032 /* yes this is racy, but if you don't like the race, lock the buffer */
2033 for_each_buffer_cpu(buffer, cpu) {
2034 cpu_buffer = buffer->buffers[cpu];
2035 if (!rb_per_cpu_empty(cpu_buffer))
2036 return 0;
2037 }
2038 return 1;
2039}
2040
2041/**
2042 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2043 * @buffer: The ring buffer
2044 * @cpu: The CPU buffer to test
2045 */
2046int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2047{
2048 struct ring_buffer_per_cpu *cpu_buffer;
2049
2050 if (!cpu_isset(cpu, buffer->cpumask))
2051 return 1;
2052
2053 cpu_buffer = buffer->buffers[cpu];
2054 return rb_per_cpu_empty(cpu_buffer);
2055}
2056
2057/**
2058 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2059 * @buffer_a: One buffer to swap with
2060 * @buffer_b: The other buffer to swap with
2061 *
2062 * This function is useful for tracers that want to take a "snapshot"
2063 * of a CPU buffer and has another back up buffer lying around.
2064 * it is expected that the tracer handles the cpu buffer not being
2065 * used at the moment.
2066 */
2067int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2068 struct ring_buffer *buffer_b, int cpu)
2069{
2070 struct ring_buffer_per_cpu *cpu_buffer_a;
2071 struct ring_buffer_per_cpu *cpu_buffer_b;
2072
2073 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2074 !cpu_isset(cpu, buffer_b->cpumask))
2075 return -EINVAL;
2076
2077 /* At least make sure the two buffers are somewhat the same */
2078 if (buffer_a->size != buffer_b->size ||
2079 buffer_a->pages != buffer_b->pages)
2080 return -EINVAL;
2081
2082 cpu_buffer_a = buffer_a->buffers[cpu];
2083 cpu_buffer_b = buffer_b->buffers[cpu];
2084
2085 /*
2086 * We can't do a synchronize_sched here because this
2087 * function can be called in atomic context.
2088 * Normally this will be called from the same CPU as cpu.
2089 * If not it's up to the caller to protect this.
2090 */
2091 atomic_inc(&cpu_buffer_a->record_disabled);
2092 atomic_inc(&cpu_buffer_b->record_disabled);
2093
2094 buffer_a->buffers[cpu] = cpu_buffer_b;
2095 buffer_b->buffers[cpu] = cpu_buffer_a;
2096
2097 cpu_buffer_b->buffer = buffer_a;
2098 cpu_buffer_a->buffer = buffer_b;
2099
2100 atomic_dec(&cpu_buffer_a->record_disabled);
2101 atomic_dec(&cpu_buffer_b->record_disabled);
2102
2103 return 0;
2104}
2105