4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/spinlock.h>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/hardirq.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/hash.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
23 #include <asm/local.h>
26 static void update_pages_handler(struct work_struct
*work
);
29 * The ring buffer header is special. We must manually up keep it.
31 int ring_buffer_print_entry_header(struct trace_seq
*s
)
35 ret
= trace_seq_printf(s
, "# compressed entry header\n");
36 ret
= trace_seq_printf(s
, "\ttype_len : 5 bits\n");
37 ret
= trace_seq_printf(s
, "\ttime_delta : 27 bits\n");
38 ret
= trace_seq_printf(s
, "\tarray : 32 bits\n");
39 ret
= trace_seq_printf(s
, "\n");
40 ret
= trace_seq_printf(s
, "\tpadding : type == %d\n",
41 RINGBUF_TYPE_PADDING
);
42 ret
= trace_seq_printf(s
, "\ttime_extend : type == %d\n",
43 RINGBUF_TYPE_TIME_EXTEND
);
44 ret
= trace_seq_printf(s
, "\tdata max type_len == %d\n",
45 RINGBUF_TYPE_DATA_TYPE_LEN_MAX
);
51 * The ring buffer is made up of a list of pages. A separate list of pages is
52 * allocated for each CPU. A writer may only write to a buffer that is
53 * associated with the CPU it is currently executing on. A reader may read
54 * from any per cpu buffer.
56 * The reader is special. For each per cpu buffer, the reader has its own
57 * reader page. When a reader has read the entire reader page, this reader
58 * page is swapped with another page in the ring buffer.
60 * Now, as long as the writer is off the reader page, the reader can do what
61 * ever it wants with that page. The writer will never write to that page
62 * again (as long as it is out of the ring buffer).
64 * Here's some silly ASCII art.
67 * |reader| RING BUFFER
69 * +------+ +---+ +---+ +---+
78 * |reader| RING BUFFER
79 * |page |------------------v
80 * +------+ +---+ +---+ +---+
89 * |reader| RING BUFFER
90 * |page |------------------v
91 * +------+ +---+ +---+ +---+
96 * +------------------------------+
100 * |buffer| RING BUFFER
101 * |page |------------------v
102 * +------+ +---+ +---+ +---+
104 * | New +---+ +---+ +---+
107 * +------------------------------+
110 * After we make this swap, the reader can hand this page off to the splice
111 * code and be done with it. It can even allocate a new page if it needs to
112 * and swap that into the ring buffer.
114 * We will be using cmpxchg soon to make all this lockless.
119 * A fast way to enable or disable all ring buffers is to
120 * call tracing_on or tracing_off. Turning off the ring buffers
121 * prevents all ring buffers from being recorded to.
122 * Turning this switch on, makes it OK to write to the
123 * ring buffer, if the ring buffer is enabled itself.
125 * There's three layers that must be on in order to write
126 * to the ring buffer.
128 * 1) This global flag must be set.
129 * 2) The ring buffer must be enabled for recording.
130 * 3) The per cpu buffer must be enabled for recording.
132 * In case of an anomaly, this global flag has a bit set that
133 * will permantly disable all ring buffers.
137 * Global flag to disable all recording to ring buffers
138 * This has two bits: ON, DISABLED
142 * 0 0 : ring buffers are off
143 * 1 0 : ring buffers are on
144 * X 1 : ring buffers are permanently disabled
148 RB_BUFFERS_ON_BIT
= 0,
149 RB_BUFFERS_DISABLED_BIT
= 1,
153 RB_BUFFERS_ON
= 1 << RB_BUFFERS_ON_BIT
,
154 RB_BUFFERS_DISABLED
= 1 << RB_BUFFERS_DISABLED_BIT
,
157 static unsigned long ring_buffer_flags __read_mostly
= RB_BUFFERS_ON
;
159 /* Used for individual buffers (after the counter) */
160 #define RB_BUFFER_OFF (1 << 20)
162 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
165 * tracing_off_permanent - permanently disable ring buffers
167 * This function, once called, will disable all ring buffers
170 void tracing_off_permanent(void)
172 set_bit(RB_BUFFERS_DISABLED_BIT
, &ring_buffer_flags
);
175 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
176 #define RB_ALIGNMENT 4U
177 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
178 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
180 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
181 # define RB_FORCE_8BYTE_ALIGNMENT 0
182 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
184 # define RB_FORCE_8BYTE_ALIGNMENT 1
185 # define RB_ARCH_ALIGNMENT 8U
188 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
190 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
191 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
194 RB_LEN_TIME_EXTEND
= 8,
195 RB_LEN_TIME_STAMP
= 16,
198 #define skip_time_extend(event) \
199 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
201 static inline int rb_null_event(struct ring_buffer_event
*event
)
203 return event
->type_len
== RINGBUF_TYPE_PADDING
&& !event
->time_delta
;
206 static void rb_event_set_padding(struct ring_buffer_event
*event
)
208 /* padding has a NULL time_delta */
209 event
->type_len
= RINGBUF_TYPE_PADDING
;
210 event
->time_delta
= 0;
214 rb_event_data_length(struct ring_buffer_event
*event
)
219 length
= event
->type_len
* RB_ALIGNMENT
;
221 length
= event
->array
[0];
222 return length
+ RB_EVNT_HDR_SIZE
;
226 * Return the length of the given event. Will return
227 * the length of the time extend if the event is a
230 static inline unsigned
231 rb_event_length(struct ring_buffer_event
*event
)
233 switch (event
->type_len
) {
234 case RINGBUF_TYPE_PADDING
:
235 if (rb_null_event(event
))
238 return event
->array
[0] + RB_EVNT_HDR_SIZE
;
240 case RINGBUF_TYPE_TIME_EXTEND
:
241 return RB_LEN_TIME_EXTEND
;
243 case RINGBUF_TYPE_TIME_STAMP
:
244 return RB_LEN_TIME_STAMP
;
246 case RINGBUF_TYPE_DATA
:
247 return rb_event_data_length(event
);
256 * Return total length of time extend and data,
257 * or just the event length for all other events.
259 static inline unsigned
260 rb_event_ts_length(struct ring_buffer_event
*event
)
264 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
) {
265 /* time extends include the data event after it */
266 len
= RB_LEN_TIME_EXTEND
;
267 event
= skip_time_extend(event
);
269 return len
+ rb_event_length(event
);
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
276 * Returns the size of the data load of a data event.
277 * If the event is something other than a data event, it
278 * returns the size of the event itself. With the exception
279 * of a TIME EXTEND, where it still returns the size of the
280 * data load of the data event after it.
282 unsigned ring_buffer_event_length(struct ring_buffer_event
*event
)
286 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
)
287 event
= skip_time_extend(event
);
289 length
= rb_event_length(event
);
290 if (event
->type_len
> RINGBUF_TYPE_DATA_TYPE_LEN_MAX
)
292 length
-= RB_EVNT_HDR_SIZE
;
293 if (length
> RB_MAX_SMALL_DATA
+ sizeof(event
->array
[0]))
294 length
-= sizeof(event
->array
[0]);
297 EXPORT_SYMBOL_GPL(ring_buffer_event_length
);
299 /* inline for ring buffer fast paths */
301 rb_event_data(struct ring_buffer_event
*event
)
303 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
)
304 event
= skip_time_extend(event
);
305 BUG_ON(event
->type_len
> RINGBUF_TYPE_DATA_TYPE_LEN_MAX
);
306 /* If length is in len field, then array[0] has the data */
308 return (void *)&event
->array
[0];
309 /* Otherwise length is in array[0] and array[1] has the data */
310 return (void *)&event
->array
[1];
314 * ring_buffer_event_data - return the data of the event
315 * @event: the event to get the data from
317 void *ring_buffer_event_data(struct ring_buffer_event
*event
)
319 return rb_event_data(event
);
321 EXPORT_SYMBOL_GPL(ring_buffer_event_data
);
323 #define for_each_buffer_cpu(buffer, cpu) \
324 for_each_cpu(cpu, buffer->cpumask)
327 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
328 #define TS_DELTA_TEST (~TS_MASK)
330 /* Flag when events were overwritten */
331 #define RB_MISSED_EVENTS (1 << 31)
332 /* Missed count stored at end */
333 #define RB_MISSED_STORED (1 << 30)
335 struct buffer_data_page
{
336 u64 time_stamp
; /* page time stamp */
337 local_t commit
; /* write committed index */
338 unsigned char data
[] RB_ALIGN_DATA
; /* data of buffer page */
342 * Note, the buffer_page list must be first. The buffer pages
343 * are allocated in cache lines, which means that each buffer
344 * page will be at the beginning of a cache line, and thus
345 * the least significant bits will be zero. We use this to
346 * add flags in the list struct pointers, to make the ring buffer
350 struct list_head list
; /* list of buffer pages */
351 local_t write
; /* index for next write */
352 unsigned read
; /* index for next read */
353 local_t entries
; /* entries on this page */
354 unsigned long real_end
; /* real end of data */
355 struct buffer_data_page
*page
; /* Actual data page */
359 * The buffer page counters, write and entries, must be reset
360 * atomically when crossing page boundaries. To synchronize this
361 * update, two counters are inserted into the number. One is
362 * the actual counter for the write position or count on the page.
364 * The other is a counter of updaters. Before an update happens
365 * the update partition of the counter is incremented. This will
366 * allow the updater to update the counter atomically.
368 * The counter is 20 bits, and the state data is 12.
370 #define RB_WRITE_MASK 0xfffff
371 #define RB_WRITE_INTCNT (1 << 20)
373 static void rb_init_page(struct buffer_data_page
*bpage
)
375 local_set(&bpage
->commit
, 0);
379 * ring_buffer_page_len - the size of data on the page.
380 * @page: The page to read
382 * Returns the amount of data on the page, including buffer page header.
384 size_t ring_buffer_page_len(void *page
)
386 return local_read(&((struct buffer_data_page
*)page
)->commit
)
391 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
394 static void free_buffer_page(struct buffer_page
*bpage
)
396 free_page((unsigned long)bpage
->page
);
401 * We need to fit the time_stamp delta into 27 bits.
403 static inline int test_time_stamp(u64 delta
)
405 if (delta
& TS_DELTA_TEST
)
410 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
412 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
413 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
415 int ring_buffer_print_page_header(struct trace_seq
*s
)
417 struct buffer_data_page field
;
420 ret
= trace_seq_printf(s
, "\tfield: u64 timestamp;\t"
421 "offset:0;\tsize:%u;\tsigned:%u;\n",
422 (unsigned int)sizeof(field
.time_stamp
),
423 (unsigned int)is_signed_type(u64
));
425 ret
= trace_seq_printf(s
, "\tfield: local_t commit;\t"
426 "offset:%u;\tsize:%u;\tsigned:%u;\n",
427 (unsigned int)offsetof(typeof(field
), commit
),
428 (unsigned int)sizeof(field
.commit
),
429 (unsigned int)is_signed_type(long));
431 ret
= trace_seq_printf(s
, "\tfield: int overwrite;\t"
432 "offset:%u;\tsize:%u;\tsigned:%u;\n",
433 (unsigned int)offsetof(typeof(field
), commit
),
435 (unsigned int)is_signed_type(long));
437 ret
= trace_seq_printf(s
, "\tfield: char data;\t"
438 "offset:%u;\tsize:%u;\tsigned:%u;\n",
439 (unsigned int)offsetof(typeof(field
), data
),
440 (unsigned int)BUF_PAGE_SIZE
,
441 (unsigned int)is_signed_type(char));
447 * head_page == tail_page && head == tail then buffer is empty.
449 struct ring_buffer_per_cpu
{
451 atomic_t record_disabled
;
452 struct ring_buffer
*buffer
;
453 raw_spinlock_t reader_lock
; /* serialize readers */
454 arch_spinlock_t lock
;
455 struct lock_class_key lock_key
;
456 unsigned int nr_pages
;
457 struct list_head
*pages
;
458 struct buffer_page
*head_page
; /* read from head */
459 struct buffer_page
*tail_page
; /* write to tail */
460 struct buffer_page
*commit_page
; /* committed pages */
461 struct buffer_page
*reader_page
;
462 unsigned long lost_events
;
463 unsigned long last_overrun
;
464 local_t entries_bytes
;
467 local_t commit_overrun
;
468 local_t dropped_events
;
472 unsigned long read_bytes
;
475 /* ring buffer pages to update, > 0 to add, < 0 to remove */
476 int nr_pages_to_update
;
477 struct list_head new_pages
; /* new pages to add */
478 struct work_struct update_pages_work
;
479 struct completion update_done
;
485 atomic_t record_disabled
;
486 atomic_t resize_disabled
;
487 cpumask_var_t cpumask
;
489 struct lock_class_key
*reader_lock_key
;
493 struct ring_buffer_per_cpu
**buffers
;
495 #ifdef CONFIG_HOTPLUG_CPU
496 struct notifier_block cpu_notify
;
501 struct ring_buffer_iter
{
502 struct ring_buffer_per_cpu
*cpu_buffer
;
504 struct buffer_page
*head_page
;
505 struct buffer_page
*cache_reader_page
;
506 unsigned long cache_read
;
510 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
511 #define RB_WARN_ON(b, cond) \
513 int _____ret = unlikely(cond); \
515 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
516 struct ring_buffer_per_cpu *__b = \
518 atomic_inc(&__b->buffer->record_disabled); \
520 atomic_inc(&b->record_disabled); \
526 /* Up this if you want to test the TIME_EXTENTS and normalization */
527 #define DEBUG_SHIFT 0
529 static inline u64
rb_time_stamp(struct ring_buffer
*buffer
)
531 /* shift to debug/test normalization and TIME_EXTENTS */
532 return buffer
->clock() << DEBUG_SHIFT
;
535 u64
ring_buffer_time_stamp(struct ring_buffer
*buffer
, int cpu
)
539 preempt_disable_notrace();
540 time
= rb_time_stamp(buffer
);
541 preempt_enable_no_resched_notrace();
545 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp
);
547 void ring_buffer_normalize_time_stamp(struct ring_buffer
*buffer
,
550 /* Just stupid testing the normalize function and deltas */
553 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp
);
556 * Making the ring buffer lockless makes things tricky.
557 * Although writes only happen on the CPU that they are on,
558 * and they only need to worry about interrupts. Reads can
561 * The reader page is always off the ring buffer, but when the
562 * reader finishes with a page, it needs to swap its page with
563 * a new one from the buffer. The reader needs to take from
564 * the head (writes go to the tail). But if a writer is in overwrite
565 * mode and wraps, it must push the head page forward.
567 * Here lies the problem.
569 * The reader must be careful to replace only the head page, and
570 * not another one. As described at the top of the file in the
571 * ASCII art, the reader sets its old page to point to the next
572 * page after head. It then sets the page after head to point to
573 * the old reader page. But if the writer moves the head page
574 * during this operation, the reader could end up with the tail.
576 * We use cmpxchg to help prevent this race. We also do something
577 * special with the page before head. We set the LSB to 1.
579 * When the writer must push the page forward, it will clear the
580 * bit that points to the head page, move the head, and then set
581 * the bit that points to the new head page.
583 * We also don't want an interrupt coming in and moving the head
584 * page on another writer. Thus we use the second LSB to catch
587 * head->list->prev->next bit 1 bit 0
590 * Points to head page 0 1
593 * Note we can not trust the prev pointer of the head page, because:
595 * +----+ +-----+ +-----+
596 * | |------>| T |---X--->| N |
598 * +----+ +-----+ +-----+
601 * +----------| R |----------+ |
605 * Key: ---X--> HEAD flag set in pointer
610 * (see __rb_reserve_next() to see where this happens)
612 * What the above shows is that the reader just swapped out
613 * the reader page with a page in the buffer, but before it
614 * could make the new header point back to the new page added
615 * it was preempted by a writer. The writer moved forward onto
616 * the new page added by the reader and is about to move forward
619 * You can see, it is legitimate for the previous pointer of
620 * the head (or any page) not to point back to itself. But only
624 #define RB_PAGE_NORMAL 0UL
625 #define RB_PAGE_HEAD 1UL
626 #define RB_PAGE_UPDATE 2UL
629 #define RB_FLAG_MASK 3UL
631 /* PAGE_MOVED is not part of the mask */
632 #define RB_PAGE_MOVED 4UL
635 * rb_list_head - remove any bit
637 static struct list_head
*rb_list_head(struct list_head
*list
)
639 unsigned long val
= (unsigned long)list
;
641 return (struct list_head
*)(val
& ~RB_FLAG_MASK
);
645 * rb_is_head_page - test if the given page is the head page
647 * Because the reader may move the head_page pointer, we can
648 * not trust what the head page is (it may be pointing to
649 * the reader page). But if the next page is a header page,
650 * its flags will be non zero.
653 rb_is_head_page(struct ring_buffer_per_cpu
*cpu_buffer
,
654 struct buffer_page
*page
, struct list_head
*list
)
658 val
= (unsigned long)list
->next
;
660 if ((val
& ~RB_FLAG_MASK
) != (unsigned long)&page
->list
)
661 return RB_PAGE_MOVED
;
663 return val
& RB_FLAG_MASK
;
669 * The unique thing about the reader page, is that, if the
670 * writer is ever on it, the previous pointer never points
671 * back to the reader page.
673 static int rb_is_reader_page(struct buffer_page
*page
)
675 struct list_head
*list
= page
->list
.prev
;
677 return rb_list_head(list
->next
) != &page
->list
;
681 * rb_set_list_to_head - set a list_head to be pointing to head.
683 static void rb_set_list_to_head(struct ring_buffer_per_cpu
*cpu_buffer
,
684 struct list_head
*list
)
688 ptr
= (unsigned long *)&list
->next
;
689 *ptr
|= RB_PAGE_HEAD
;
690 *ptr
&= ~RB_PAGE_UPDATE
;
694 * rb_head_page_activate - sets up head page
696 static void rb_head_page_activate(struct ring_buffer_per_cpu
*cpu_buffer
)
698 struct buffer_page
*head
;
700 head
= cpu_buffer
->head_page
;
705 * Set the previous list pointer to have the HEAD flag.
707 rb_set_list_to_head(cpu_buffer
, head
->list
.prev
);
710 static void rb_list_head_clear(struct list_head
*list
)
712 unsigned long *ptr
= (unsigned long *)&list
->next
;
714 *ptr
&= ~RB_FLAG_MASK
;
718 * rb_head_page_dactivate - clears head page ptr (for free list)
721 rb_head_page_deactivate(struct ring_buffer_per_cpu
*cpu_buffer
)
723 struct list_head
*hd
;
725 /* Go through the whole list and clear any pointers found. */
726 rb_list_head_clear(cpu_buffer
->pages
);
728 list_for_each(hd
, cpu_buffer
->pages
)
729 rb_list_head_clear(hd
);
732 static int rb_head_page_set(struct ring_buffer_per_cpu
*cpu_buffer
,
733 struct buffer_page
*head
,
734 struct buffer_page
*prev
,
735 int old_flag
, int new_flag
)
737 struct list_head
*list
;
738 unsigned long val
= (unsigned long)&head
->list
;
743 val
&= ~RB_FLAG_MASK
;
745 ret
= cmpxchg((unsigned long *)&list
->next
,
746 val
| old_flag
, val
| new_flag
);
748 /* check if the reader took the page */
749 if ((ret
& ~RB_FLAG_MASK
) != val
)
750 return RB_PAGE_MOVED
;
752 return ret
& RB_FLAG_MASK
;
755 static int rb_head_page_set_update(struct ring_buffer_per_cpu
*cpu_buffer
,
756 struct buffer_page
*head
,
757 struct buffer_page
*prev
,
760 return rb_head_page_set(cpu_buffer
, head
, prev
,
761 old_flag
, RB_PAGE_UPDATE
);
764 static int rb_head_page_set_head(struct ring_buffer_per_cpu
*cpu_buffer
,
765 struct buffer_page
*head
,
766 struct buffer_page
*prev
,
769 return rb_head_page_set(cpu_buffer
, head
, prev
,
770 old_flag
, RB_PAGE_HEAD
);
773 static int rb_head_page_set_normal(struct ring_buffer_per_cpu
*cpu_buffer
,
774 struct buffer_page
*head
,
775 struct buffer_page
*prev
,
778 return rb_head_page_set(cpu_buffer
, head
, prev
,
779 old_flag
, RB_PAGE_NORMAL
);
782 static inline void rb_inc_page(struct ring_buffer_per_cpu
*cpu_buffer
,
783 struct buffer_page
**bpage
)
785 struct list_head
*p
= rb_list_head((*bpage
)->list
.next
);
787 *bpage
= list_entry(p
, struct buffer_page
, list
);
790 static struct buffer_page
*
791 rb_set_head_page(struct ring_buffer_per_cpu
*cpu_buffer
)
793 struct buffer_page
*head
;
794 struct buffer_page
*page
;
795 struct list_head
*list
;
798 if (RB_WARN_ON(cpu_buffer
, !cpu_buffer
->head_page
))
802 list
= cpu_buffer
->pages
;
803 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->prev
->next
) != list
))
806 page
= head
= cpu_buffer
->head_page
;
808 * It is possible that the writer moves the header behind
809 * where we started, and we miss in one loop.
810 * A second loop should grab the header, but we'll do
811 * three loops just because I'm paranoid.
813 for (i
= 0; i
< 3; i
++) {
815 if (rb_is_head_page(cpu_buffer
, page
, page
->list
.prev
)) {
816 cpu_buffer
->head_page
= page
;
819 rb_inc_page(cpu_buffer
, &page
);
820 } while (page
!= head
);
823 RB_WARN_ON(cpu_buffer
, 1);
828 static int rb_head_page_replace(struct buffer_page
*old
,
829 struct buffer_page
*new)
831 unsigned long *ptr
= (unsigned long *)&old
->list
.prev
->next
;
835 val
= *ptr
& ~RB_FLAG_MASK
;
838 ret
= cmpxchg(ptr
, val
, (unsigned long)&new->list
);
844 * rb_tail_page_update - move the tail page forward
846 * Returns 1 if moved tail page, 0 if someone else did.
848 static int rb_tail_page_update(struct ring_buffer_per_cpu
*cpu_buffer
,
849 struct buffer_page
*tail_page
,
850 struct buffer_page
*next_page
)
852 struct buffer_page
*old_tail
;
853 unsigned long old_entries
;
854 unsigned long old_write
;
858 * The tail page now needs to be moved forward.
860 * We need to reset the tail page, but without messing
861 * with possible erasing of data brought in by interrupts
862 * that have moved the tail page and are currently on it.
864 * We add a counter to the write field to denote this.
866 old_write
= local_add_return(RB_WRITE_INTCNT
, &next_page
->write
);
867 old_entries
= local_add_return(RB_WRITE_INTCNT
, &next_page
->entries
);
870 * Just make sure we have seen our old_write and synchronize
871 * with any interrupts that come in.
876 * If the tail page is still the same as what we think
877 * it is, then it is up to us to update the tail
880 if (tail_page
== cpu_buffer
->tail_page
) {
881 /* Zero the write counter */
882 unsigned long val
= old_write
& ~RB_WRITE_MASK
;
883 unsigned long eval
= old_entries
& ~RB_WRITE_MASK
;
886 * This will only succeed if an interrupt did
887 * not come in and change it. In which case, we
888 * do not want to modify it.
890 * We add (void) to let the compiler know that we do not care
891 * about the return value of these functions. We use the
892 * cmpxchg to only update if an interrupt did not already
893 * do it for us. If the cmpxchg fails, we don't care.
895 (void)local_cmpxchg(&next_page
->write
, old_write
, val
);
896 (void)local_cmpxchg(&next_page
->entries
, old_entries
, eval
);
899 * No need to worry about races with clearing out the commit.
900 * it only can increment when a commit takes place. But that
901 * only happens in the outer most nested commit.
903 local_set(&next_page
->page
->commit
, 0);
905 old_tail
= cmpxchg(&cpu_buffer
->tail_page
,
906 tail_page
, next_page
);
908 if (old_tail
== tail_page
)
915 static int rb_check_bpage(struct ring_buffer_per_cpu
*cpu_buffer
,
916 struct buffer_page
*bpage
)
918 unsigned long val
= (unsigned long)bpage
;
920 if (RB_WARN_ON(cpu_buffer
, val
& RB_FLAG_MASK
))
927 * rb_check_list - make sure a pointer to a list has the last bits zero
929 static int rb_check_list(struct ring_buffer_per_cpu
*cpu_buffer
,
930 struct list_head
*list
)
932 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->prev
) != list
->prev
))
934 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->next
) != list
->next
))
940 * check_pages - integrity check of buffer pages
941 * @cpu_buffer: CPU buffer with pages to test
943 * As a safety measure we check to make sure the data pages have not
946 static int rb_check_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
948 struct list_head
*head
= cpu_buffer
->pages
;
949 struct buffer_page
*bpage
, *tmp
;
951 /* Reset the head page if it exists */
952 if (cpu_buffer
->head_page
)
953 rb_set_head_page(cpu_buffer
);
955 rb_head_page_deactivate(cpu_buffer
);
957 if (RB_WARN_ON(cpu_buffer
, head
->next
->prev
!= head
))
959 if (RB_WARN_ON(cpu_buffer
, head
->prev
->next
!= head
))
962 if (rb_check_list(cpu_buffer
, head
))
965 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
966 if (RB_WARN_ON(cpu_buffer
,
967 bpage
->list
.next
->prev
!= &bpage
->list
))
969 if (RB_WARN_ON(cpu_buffer
,
970 bpage
->list
.prev
->next
!= &bpage
->list
))
972 if (rb_check_list(cpu_buffer
, &bpage
->list
))
976 rb_head_page_activate(cpu_buffer
);
981 static int __rb_allocate_pages(int nr_pages
, struct list_head
*pages
, int cpu
)
984 struct buffer_page
*bpage
, *tmp
;
986 for (i
= 0; i
< nr_pages
; i
++) {
989 * __GFP_NORETRY flag makes sure that the allocation fails
990 * gracefully without invoking oom-killer and the system is
993 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
994 GFP_KERNEL
| __GFP_NORETRY
,
999 list_add(&bpage
->list
, pages
);
1001 page
= alloc_pages_node(cpu_to_node(cpu
),
1002 GFP_KERNEL
| __GFP_NORETRY
, 0);
1005 bpage
->page
= page_address(page
);
1006 rb_init_page(bpage
->page
);
1012 list_for_each_entry_safe(bpage
, tmp
, pages
, list
) {
1013 list_del_init(&bpage
->list
);
1014 free_buffer_page(bpage
);
1020 static int rb_allocate_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
1027 if (__rb_allocate_pages(nr_pages
, &pages
, cpu_buffer
->cpu
))
1031 * The ring buffer page list is a circular list that does not
1032 * start and end with a list head. All page list items point to
1035 cpu_buffer
->pages
= pages
.next
;
1038 cpu_buffer
->nr_pages
= nr_pages
;
1040 rb_check_pages(cpu_buffer
);
1045 static struct ring_buffer_per_cpu
*
1046 rb_allocate_cpu_buffer(struct ring_buffer
*buffer
, int nr_pages
, int cpu
)
1048 struct ring_buffer_per_cpu
*cpu_buffer
;
1049 struct buffer_page
*bpage
;
1053 cpu_buffer
= kzalloc_node(ALIGN(sizeof(*cpu_buffer
), cache_line_size()),
1054 GFP_KERNEL
, cpu_to_node(cpu
));
1058 cpu_buffer
->cpu
= cpu
;
1059 cpu_buffer
->buffer
= buffer
;
1060 raw_spin_lock_init(&cpu_buffer
->reader_lock
);
1061 lockdep_set_class(&cpu_buffer
->reader_lock
, buffer
->reader_lock_key
);
1062 cpu_buffer
->lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
1063 INIT_WORK(&cpu_buffer
->update_pages_work
, update_pages_handler
);
1064 init_completion(&cpu_buffer
->update_done
);
1066 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
1067 GFP_KERNEL
, cpu_to_node(cpu
));
1069 goto fail_free_buffer
;
1071 rb_check_bpage(cpu_buffer
, bpage
);
1073 cpu_buffer
->reader_page
= bpage
;
1074 page
= alloc_pages_node(cpu_to_node(cpu
), GFP_KERNEL
, 0);
1076 goto fail_free_reader
;
1077 bpage
->page
= page_address(page
);
1078 rb_init_page(bpage
->page
);
1080 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
1081 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
1083 ret
= rb_allocate_pages(cpu_buffer
, nr_pages
);
1085 goto fail_free_reader
;
1087 cpu_buffer
->head_page
1088 = list_entry(cpu_buffer
->pages
, struct buffer_page
, list
);
1089 cpu_buffer
->tail_page
= cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
1091 rb_head_page_activate(cpu_buffer
);
1096 free_buffer_page(cpu_buffer
->reader_page
);
1103 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu
*cpu_buffer
)
1105 struct list_head
*head
= cpu_buffer
->pages
;
1106 struct buffer_page
*bpage
, *tmp
;
1108 free_buffer_page(cpu_buffer
->reader_page
);
1110 rb_head_page_deactivate(cpu_buffer
);
1113 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
1114 list_del_init(&bpage
->list
);
1115 free_buffer_page(bpage
);
1117 bpage
= list_entry(head
, struct buffer_page
, list
);
1118 free_buffer_page(bpage
);
1124 #ifdef CONFIG_HOTPLUG_CPU
1125 static int rb_cpu_notify(struct notifier_block
*self
,
1126 unsigned long action
, void *hcpu
);
1130 * ring_buffer_alloc - allocate a new ring_buffer
1131 * @size: the size in bytes per cpu that is needed.
1132 * @flags: attributes to set for the ring buffer.
1134 * Currently the only flag that is available is the RB_FL_OVERWRITE
1135 * flag. This flag means that the buffer will overwrite old data
1136 * when the buffer wraps. If this flag is not set, the buffer will
1137 * drop data when the tail hits the head.
1139 struct ring_buffer
*__ring_buffer_alloc(unsigned long size
, unsigned flags
,
1140 struct lock_class_key
*key
)
1142 struct ring_buffer
*buffer
;
1146 /* keep it in its own cache line */
1147 buffer
= kzalloc(ALIGN(sizeof(*buffer
), cache_line_size()),
1152 if (!alloc_cpumask_var(&buffer
->cpumask
, GFP_KERNEL
))
1153 goto fail_free_buffer
;
1155 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1156 buffer
->flags
= flags
;
1157 buffer
->clock
= trace_clock_local
;
1158 buffer
->reader_lock_key
= key
;
1160 /* need at least two pages */
1165 * In case of non-hotplug cpu, if the ring-buffer is allocated
1166 * in early initcall, it will not be notified of secondary cpus.
1167 * In that off case, we need to allocate for all possible cpus.
1169 #ifdef CONFIG_HOTPLUG_CPU
1171 cpumask_copy(buffer
->cpumask
, cpu_online_mask
);
1173 cpumask_copy(buffer
->cpumask
, cpu_possible_mask
);
1175 buffer
->cpus
= nr_cpu_ids
;
1177 bsize
= sizeof(void *) * nr_cpu_ids
;
1178 buffer
->buffers
= kzalloc(ALIGN(bsize
, cache_line_size()),
1180 if (!buffer
->buffers
)
1181 goto fail_free_cpumask
;
1183 for_each_buffer_cpu(buffer
, cpu
) {
1184 buffer
->buffers
[cpu
] =
1185 rb_allocate_cpu_buffer(buffer
, nr_pages
, cpu
);
1186 if (!buffer
->buffers
[cpu
])
1187 goto fail_free_buffers
;
1190 #ifdef CONFIG_HOTPLUG_CPU
1191 buffer
->cpu_notify
.notifier_call
= rb_cpu_notify
;
1192 buffer
->cpu_notify
.priority
= 0;
1193 register_cpu_notifier(&buffer
->cpu_notify
);
1197 mutex_init(&buffer
->mutex
);
1202 for_each_buffer_cpu(buffer
, cpu
) {
1203 if (buffer
->buffers
[cpu
])
1204 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
1206 kfree(buffer
->buffers
);
1209 free_cpumask_var(buffer
->cpumask
);
1216 EXPORT_SYMBOL_GPL(__ring_buffer_alloc
);
1219 * ring_buffer_free - free a ring buffer.
1220 * @buffer: the buffer to free.
1223 ring_buffer_free(struct ring_buffer
*buffer
)
1229 #ifdef CONFIG_HOTPLUG_CPU
1230 unregister_cpu_notifier(&buffer
->cpu_notify
);
1233 for_each_buffer_cpu(buffer
, cpu
)
1234 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
1238 kfree(buffer
->buffers
);
1239 free_cpumask_var(buffer
->cpumask
);
1243 EXPORT_SYMBOL_GPL(ring_buffer_free
);
1245 void ring_buffer_set_clock(struct ring_buffer
*buffer
,
1248 buffer
->clock
= clock
;
1251 static void rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
);
1253 static inline unsigned long rb_page_entries(struct buffer_page
*bpage
)
1255 return local_read(&bpage
->entries
) & RB_WRITE_MASK
;
1258 static inline unsigned long rb_page_write(struct buffer_page
*bpage
)
1260 return local_read(&bpage
->write
) & RB_WRITE_MASK
;
1264 rb_remove_pages(struct ring_buffer_per_cpu
*cpu_buffer
, unsigned int nr_pages
)
1266 struct list_head
*tail_page
, *to_remove
, *next_page
;
1267 struct buffer_page
*to_remove_page
, *tmp_iter_page
;
1268 struct buffer_page
*last_page
, *first_page
;
1269 unsigned int nr_removed
;
1270 unsigned long head_bit
;
1275 raw_spin_lock_irq(&cpu_buffer
->reader_lock
);
1276 atomic_inc(&cpu_buffer
->record_disabled
);
1278 * We don't race with the readers since we have acquired the reader
1279 * lock. We also don't race with writers after disabling recording.
1280 * This makes it easy to figure out the first and the last page to be
1281 * removed from the list. We unlink all the pages in between including
1282 * the first and last pages. This is done in a busy loop so that we
1283 * lose the least number of traces.
1284 * The pages are freed after we restart recording and unlock readers.
1286 tail_page
= &cpu_buffer
->tail_page
->list
;
1289 * tail page might be on reader page, we remove the next page
1290 * from the ring buffer
1292 if (cpu_buffer
->tail_page
== cpu_buffer
->reader_page
)
1293 tail_page
= rb_list_head(tail_page
->next
);
1294 to_remove
= tail_page
;
1296 /* start of pages to remove */
1297 first_page
= list_entry(rb_list_head(to_remove
->next
),
1298 struct buffer_page
, list
);
1300 for (nr_removed
= 0; nr_removed
< nr_pages
; nr_removed
++) {
1301 to_remove
= rb_list_head(to_remove
)->next
;
1302 head_bit
|= (unsigned long)to_remove
& RB_PAGE_HEAD
;
1305 next_page
= rb_list_head(to_remove
)->next
;
1308 * Now we remove all pages between tail_page and next_page.
1309 * Make sure that we have head_bit value preserved for the
1312 tail_page
->next
= (struct list_head
*)((unsigned long)next_page
|
1314 next_page
= rb_list_head(next_page
);
1315 next_page
->prev
= tail_page
;
1317 /* make sure pages points to a valid page in the ring buffer */
1318 cpu_buffer
->pages
= next_page
;
1320 /* update head page */
1322 cpu_buffer
->head_page
= list_entry(next_page
,
1323 struct buffer_page
, list
);
1326 * change read pointer to make sure any read iterators reset
1329 cpu_buffer
->read
= 0;
1331 /* pages are removed, resume tracing and then free the pages */
1332 atomic_dec(&cpu_buffer
->record_disabled
);
1333 raw_spin_unlock_irq(&cpu_buffer
->reader_lock
);
1335 RB_WARN_ON(cpu_buffer
, list_empty(cpu_buffer
->pages
));
1337 /* last buffer page to remove */
1338 last_page
= list_entry(rb_list_head(to_remove
), struct buffer_page
,
1340 tmp_iter_page
= first_page
;
1343 to_remove_page
= tmp_iter_page
;
1344 rb_inc_page(cpu_buffer
, &tmp_iter_page
);
1346 /* update the counters */
1347 page_entries
= rb_page_entries(to_remove_page
);
1350 * If something was added to this page, it was full
1351 * since it is not the tail page. So we deduct the
1352 * bytes consumed in ring buffer from here.
1353 * Increment overrun to account for the lost events.
1355 local_add(page_entries
, &cpu_buffer
->overrun
);
1356 local_sub(BUF_PAGE_SIZE
, &cpu_buffer
->entries_bytes
);
1360 * We have already removed references to this list item, just
1361 * free up the buffer_page and its page
1363 free_buffer_page(to_remove_page
);
1366 } while (to_remove_page
!= last_page
);
1368 RB_WARN_ON(cpu_buffer
, nr_removed
);
1370 return nr_removed
== 0;
1374 rb_insert_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
1376 struct list_head
*pages
= &cpu_buffer
->new_pages
;
1377 int retries
, success
;
1379 raw_spin_lock_irq(&cpu_buffer
->reader_lock
);
1381 * We are holding the reader lock, so the reader page won't be swapped
1382 * in the ring buffer. Now we are racing with the writer trying to
1383 * move head page and the tail page.
1384 * We are going to adapt the reader page update process where:
1385 * 1. We first splice the start and end of list of new pages between
1386 * the head page and its previous page.
1387 * 2. We cmpxchg the prev_page->next to point from head page to the
1388 * start of new pages list.
1389 * 3. Finally, we update the head->prev to the end of new list.
1391 * We will try this process 10 times, to make sure that we don't keep
1397 struct list_head
*head_page
, *prev_page
, *r
;
1398 struct list_head
*last_page
, *first_page
;
1399 struct list_head
*head_page_with_bit
;
1401 head_page
= &rb_set_head_page(cpu_buffer
)->list
;
1404 prev_page
= head_page
->prev
;
1406 first_page
= pages
->next
;
1407 last_page
= pages
->prev
;
1409 head_page_with_bit
= (struct list_head
*)
1410 ((unsigned long)head_page
| RB_PAGE_HEAD
);
1412 last_page
->next
= head_page_with_bit
;
1413 first_page
->prev
= prev_page
;
1415 r
= cmpxchg(&prev_page
->next
, head_page_with_bit
, first_page
);
1417 if (r
== head_page_with_bit
) {
1419 * yay, we replaced the page pointer to our new list,
1420 * now, we just have to update to head page's prev
1421 * pointer to point to end of list
1423 head_page
->prev
= last_page
;
1430 INIT_LIST_HEAD(pages
);
1432 * If we weren't successful in adding in new pages, warn and stop
1435 RB_WARN_ON(cpu_buffer
, !success
);
1436 raw_spin_unlock_irq(&cpu_buffer
->reader_lock
);
1438 /* free pages if they weren't inserted */
1440 struct buffer_page
*bpage
, *tmp
;
1441 list_for_each_entry_safe(bpage
, tmp
, &cpu_buffer
->new_pages
,
1443 list_del_init(&bpage
->list
);
1444 free_buffer_page(bpage
);
1450 static void rb_update_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
1454 if (cpu_buffer
->nr_pages_to_update
> 0)
1455 success
= rb_insert_pages(cpu_buffer
);
1457 success
= rb_remove_pages(cpu_buffer
,
1458 -cpu_buffer
->nr_pages_to_update
);
1461 cpu_buffer
->nr_pages
+= cpu_buffer
->nr_pages_to_update
;
1464 static void update_pages_handler(struct work_struct
*work
)
1466 struct ring_buffer_per_cpu
*cpu_buffer
= container_of(work
,
1467 struct ring_buffer_per_cpu
, update_pages_work
);
1468 rb_update_pages(cpu_buffer
);
1469 complete(&cpu_buffer
->update_done
);
1473 * ring_buffer_resize - resize the ring buffer
1474 * @buffer: the buffer to resize.
1475 * @size: the new size.
1477 * Minimum size is 2 * BUF_PAGE_SIZE.
1479 * Returns 0 on success and < 0 on failure.
1481 int ring_buffer_resize(struct ring_buffer
*buffer
, unsigned long size
,
1484 struct ring_buffer_per_cpu
*cpu_buffer
;
1489 * Always succeed at resizing a non-existent buffer:
1494 /* Make sure the requested buffer exists */
1495 if (cpu_id
!= RING_BUFFER_ALL_CPUS
&&
1496 !cpumask_test_cpu(cpu_id
, buffer
->cpumask
))
1499 size
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1500 size
*= BUF_PAGE_SIZE
;
1502 /* we need a minimum of two pages */
1503 if (size
< BUF_PAGE_SIZE
* 2)
1504 size
= BUF_PAGE_SIZE
* 2;
1506 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1509 * Don't succeed if resizing is disabled, as a reader might be
1510 * manipulating the ring buffer and is expecting a sane state while
1513 if (atomic_read(&buffer
->resize_disabled
))
1516 /* prevent another thread from changing buffer sizes */
1517 mutex_lock(&buffer
->mutex
);
1519 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
1520 /* calculate the pages to update */
1521 for_each_buffer_cpu(buffer
, cpu
) {
1522 cpu_buffer
= buffer
->buffers
[cpu
];
1524 cpu_buffer
->nr_pages_to_update
= nr_pages
-
1525 cpu_buffer
->nr_pages
;
1527 * nothing more to do for removing pages or no update
1529 if (cpu_buffer
->nr_pages_to_update
<= 0)
1532 * to add pages, make sure all new pages can be
1533 * allocated without receiving ENOMEM
1535 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
1536 if (__rb_allocate_pages(cpu_buffer
->nr_pages_to_update
,
1537 &cpu_buffer
->new_pages
, cpu
)) {
1538 /* not enough memory for new pages */
1546 * Fire off all the required work handlers
1547 * We can't schedule on offline CPUs, but it's not necessary
1548 * since we can change their buffer sizes without any race.
1550 for_each_buffer_cpu(buffer
, cpu
) {
1551 cpu_buffer
= buffer
->buffers
[cpu
];
1552 if (!cpu_buffer
->nr_pages_to_update
)
1555 if (cpu_online(cpu
))
1556 schedule_work_on(cpu
,
1557 &cpu_buffer
->update_pages_work
);
1559 rb_update_pages(cpu_buffer
);
1562 /* wait for all the updates to complete */
1563 for_each_buffer_cpu(buffer
, cpu
) {
1564 cpu_buffer
= buffer
->buffers
[cpu
];
1565 if (!cpu_buffer
->nr_pages_to_update
)
1568 if (cpu_online(cpu
))
1569 wait_for_completion(&cpu_buffer
->update_done
);
1570 cpu_buffer
->nr_pages_to_update
= 0;
1575 /* Make sure this CPU has been intitialized */
1576 if (!cpumask_test_cpu(cpu_id
, buffer
->cpumask
))
1579 cpu_buffer
= buffer
->buffers
[cpu_id
];
1581 if (nr_pages
== cpu_buffer
->nr_pages
)
1584 cpu_buffer
->nr_pages_to_update
= nr_pages
-
1585 cpu_buffer
->nr_pages
;
1587 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
1588 if (cpu_buffer
->nr_pages_to_update
> 0 &&
1589 __rb_allocate_pages(cpu_buffer
->nr_pages_to_update
,
1590 &cpu_buffer
->new_pages
, cpu_id
)) {
1597 if (cpu_online(cpu_id
)) {
1598 schedule_work_on(cpu_id
,
1599 &cpu_buffer
->update_pages_work
);
1600 wait_for_completion(&cpu_buffer
->update_done
);
1602 rb_update_pages(cpu_buffer
);
1604 cpu_buffer
->nr_pages_to_update
= 0;
1610 * The ring buffer resize can happen with the ring buffer
1611 * enabled, so that the update disturbs the tracing as little
1612 * as possible. But if the buffer is disabled, we do not need
1613 * to worry about that, and we can take the time to verify
1614 * that the buffer is not corrupt.
1616 if (atomic_read(&buffer
->record_disabled
)) {
1617 atomic_inc(&buffer
->record_disabled
);
1619 * Even though the buffer was disabled, we must make sure
1620 * that it is truly disabled before calling rb_check_pages.
1621 * There could have been a race between checking
1622 * record_disable and incrementing it.
1624 synchronize_sched();
1625 for_each_buffer_cpu(buffer
, cpu
) {
1626 cpu_buffer
= buffer
->buffers
[cpu
];
1627 rb_check_pages(cpu_buffer
);
1629 atomic_dec(&buffer
->record_disabled
);
1632 mutex_unlock(&buffer
->mutex
);
1636 for_each_buffer_cpu(buffer
, cpu
) {
1637 struct buffer_page
*bpage
, *tmp
;
1639 cpu_buffer
= buffer
->buffers
[cpu
];
1640 cpu_buffer
->nr_pages_to_update
= 0;
1642 if (list_empty(&cpu_buffer
->new_pages
))
1645 list_for_each_entry_safe(bpage
, tmp
, &cpu_buffer
->new_pages
,
1647 list_del_init(&bpage
->list
);
1648 free_buffer_page(bpage
);
1651 mutex_unlock(&buffer
->mutex
);
1654 EXPORT_SYMBOL_GPL(ring_buffer_resize
);
1656 void ring_buffer_change_overwrite(struct ring_buffer
*buffer
, int val
)
1658 mutex_lock(&buffer
->mutex
);
1660 buffer
->flags
|= RB_FL_OVERWRITE
;
1662 buffer
->flags
&= ~RB_FL_OVERWRITE
;
1663 mutex_unlock(&buffer
->mutex
);
1665 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite
);
1667 static inline void *
1668 __rb_data_page_index(struct buffer_data_page
*bpage
, unsigned index
)
1670 return bpage
->data
+ index
;
1673 static inline void *__rb_page_index(struct buffer_page
*bpage
, unsigned index
)
1675 return bpage
->page
->data
+ index
;
1678 static inline struct ring_buffer_event
*
1679 rb_reader_event(struct ring_buffer_per_cpu
*cpu_buffer
)
1681 return __rb_page_index(cpu_buffer
->reader_page
,
1682 cpu_buffer
->reader_page
->read
);
1685 static inline struct ring_buffer_event
*
1686 rb_iter_head_event(struct ring_buffer_iter
*iter
)
1688 return __rb_page_index(iter
->head_page
, iter
->head
);
1691 static inline unsigned rb_page_commit(struct buffer_page
*bpage
)
1693 return local_read(&bpage
->page
->commit
);
1696 /* Size is determined by what has been committed */
1697 static inline unsigned rb_page_size(struct buffer_page
*bpage
)
1699 return rb_page_commit(bpage
);
1702 static inline unsigned
1703 rb_commit_index(struct ring_buffer_per_cpu
*cpu_buffer
)
1705 return rb_page_commit(cpu_buffer
->commit_page
);
1708 static inline unsigned
1709 rb_event_index(struct ring_buffer_event
*event
)
1711 unsigned long addr
= (unsigned long)event
;
1713 return (addr
& ~PAGE_MASK
) - BUF_PAGE_HDR_SIZE
;
1717 rb_event_is_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
1718 struct ring_buffer_event
*event
)
1720 unsigned long addr
= (unsigned long)event
;
1721 unsigned long index
;
1723 index
= rb_event_index(event
);
1726 return cpu_buffer
->commit_page
->page
== (void *)addr
&&
1727 rb_commit_index(cpu_buffer
) == index
;
1731 rb_set_commit_to_write(struct ring_buffer_per_cpu
*cpu_buffer
)
1733 unsigned long max_count
;
1736 * We only race with interrupts and NMIs on this CPU.
1737 * If we own the commit event, then we can commit
1738 * all others that interrupted us, since the interruptions
1739 * are in stack format (they finish before they come
1740 * back to us). This allows us to do a simple loop to
1741 * assign the commit to the tail.
1744 max_count
= cpu_buffer
->nr_pages
* 100;
1746 while (cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
) {
1747 if (RB_WARN_ON(cpu_buffer
, !(--max_count
)))
1749 if (RB_WARN_ON(cpu_buffer
,
1750 rb_is_reader_page(cpu_buffer
->tail_page
)))
1752 local_set(&cpu_buffer
->commit_page
->page
->commit
,
1753 rb_page_write(cpu_buffer
->commit_page
));
1754 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
1755 cpu_buffer
->write_stamp
=
1756 cpu_buffer
->commit_page
->page
->time_stamp
;
1757 /* add barrier to keep gcc from optimizing too much */
1760 while (rb_commit_index(cpu_buffer
) !=
1761 rb_page_write(cpu_buffer
->commit_page
)) {
1763 local_set(&cpu_buffer
->commit_page
->page
->commit
,
1764 rb_page_write(cpu_buffer
->commit_page
));
1765 RB_WARN_ON(cpu_buffer
,
1766 local_read(&cpu_buffer
->commit_page
->page
->commit
) &
1771 /* again, keep gcc from optimizing */
1775 * If an interrupt came in just after the first while loop
1776 * and pushed the tail page forward, we will be left with
1777 * a dangling commit that will never go forward.
1779 if (unlikely(cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
))
1783 static void rb_reset_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
1785 cpu_buffer
->read_stamp
= cpu_buffer
->reader_page
->page
->time_stamp
;
1786 cpu_buffer
->reader_page
->read
= 0;
1789 static void rb_inc_iter(struct ring_buffer_iter
*iter
)
1791 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1794 * The iterator could be on the reader page (it starts there).
1795 * But the head could have moved, since the reader was
1796 * found. Check for this case and assign the iterator
1797 * to the head page instead of next.
1799 if (iter
->head_page
== cpu_buffer
->reader_page
)
1800 iter
->head_page
= rb_set_head_page(cpu_buffer
);
1802 rb_inc_page(cpu_buffer
, &iter
->head_page
);
1804 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
1808 /* Slow path, do not inline */
1809 static noinline
struct ring_buffer_event
*
1810 rb_add_time_stamp(struct ring_buffer_event
*event
, u64 delta
)
1812 event
->type_len
= RINGBUF_TYPE_TIME_EXTEND
;
1814 /* Not the first event on the page? */
1815 if (rb_event_index(event
)) {
1816 event
->time_delta
= delta
& TS_MASK
;
1817 event
->array
[0] = delta
>> TS_SHIFT
;
1819 /* nope, just zero it */
1820 event
->time_delta
= 0;
1821 event
->array
[0] = 0;
1824 return skip_time_extend(event
);
1828 * rb_update_event - update event type and data
1829 * @event: the even to update
1830 * @type: the type of event
1831 * @length: the size of the event field in the ring buffer
1833 * Update the type and data fields of the event. The length
1834 * is the actual size that is written to the ring buffer,
1835 * and with this, we can determine what to place into the
1839 rb_update_event(struct ring_buffer_per_cpu
*cpu_buffer
,
1840 struct ring_buffer_event
*event
, unsigned length
,
1841 int add_timestamp
, u64 delta
)
1843 /* Only a commit updates the timestamp */
1844 if (unlikely(!rb_event_is_commit(cpu_buffer
, event
)))
1848 * If we need to add a timestamp, then we
1849 * add it to the start of the resevered space.
1851 if (unlikely(add_timestamp
)) {
1852 event
= rb_add_time_stamp(event
, delta
);
1853 length
-= RB_LEN_TIME_EXTEND
;
1857 event
->time_delta
= delta
;
1858 length
-= RB_EVNT_HDR_SIZE
;
1859 if (length
> RB_MAX_SMALL_DATA
|| RB_FORCE_8BYTE_ALIGNMENT
) {
1860 event
->type_len
= 0;
1861 event
->array
[0] = length
;
1863 event
->type_len
= DIV_ROUND_UP(length
, RB_ALIGNMENT
);
1867 * rb_handle_head_page - writer hit the head page
1869 * Returns: +1 to retry page
1874 rb_handle_head_page(struct ring_buffer_per_cpu
*cpu_buffer
,
1875 struct buffer_page
*tail_page
,
1876 struct buffer_page
*next_page
)
1878 struct buffer_page
*new_head
;
1883 entries
= rb_page_entries(next_page
);
1886 * The hard part is here. We need to move the head
1887 * forward, and protect against both readers on
1888 * other CPUs and writers coming in via interrupts.
1890 type
= rb_head_page_set_update(cpu_buffer
, next_page
, tail_page
,
1894 * type can be one of four:
1895 * NORMAL - an interrupt already moved it for us
1896 * HEAD - we are the first to get here.
1897 * UPDATE - we are the interrupt interrupting
1899 * MOVED - a reader on another CPU moved the next
1900 * pointer to its reader page. Give up
1907 * We changed the head to UPDATE, thus
1908 * it is our responsibility to update
1911 local_add(entries
, &cpu_buffer
->overrun
);
1912 local_sub(BUF_PAGE_SIZE
, &cpu_buffer
->entries_bytes
);
1915 * The entries will be zeroed out when we move the
1919 /* still more to do */
1922 case RB_PAGE_UPDATE
:
1924 * This is an interrupt that interrupt the
1925 * previous update. Still more to do.
1928 case RB_PAGE_NORMAL
:
1930 * An interrupt came in before the update
1931 * and processed this for us.
1932 * Nothing left to do.
1937 * The reader is on another CPU and just did
1938 * a swap with our next_page.
1943 RB_WARN_ON(cpu_buffer
, 1); /* WTF??? */
1948 * Now that we are here, the old head pointer is
1949 * set to UPDATE. This will keep the reader from
1950 * swapping the head page with the reader page.
1951 * The reader (on another CPU) will spin till
1954 * We just need to protect against interrupts
1955 * doing the job. We will set the next pointer
1956 * to HEAD. After that, we set the old pointer
1957 * to NORMAL, but only if it was HEAD before.
1958 * otherwise we are an interrupt, and only
1959 * want the outer most commit to reset it.
1961 new_head
= next_page
;
1962 rb_inc_page(cpu_buffer
, &new_head
);
1964 ret
= rb_head_page_set_head(cpu_buffer
, new_head
, next_page
,
1968 * Valid returns are:
1969 * HEAD - an interrupt came in and already set it.
1970 * NORMAL - One of two things:
1971 * 1) We really set it.
1972 * 2) A bunch of interrupts came in and moved
1973 * the page forward again.
1977 case RB_PAGE_NORMAL
:
1981 RB_WARN_ON(cpu_buffer
, 1);
1986 * It is possible that an interrupt came in,
1987 * set the head up, then more interrupts came in
1988 * and moved it again. When we get back here,
1989 * the page would have been set to NORMAL but we
1990 * just set it back to HEAD.
1992 * How do you detect this? Well, if that happened
1993 * the tail page would have moved.
1995 if (ret
== RB_PAGE_NORMAL
) {
1997 * If the tail had moved passed next, then we need
1998 * to reset the pointer.
2000 if (cpu_buffer
->tail_page
!= tail_page
&&
2001 cpu_buffer
->tail_page
!= next_page
)
2002 rb_head_page_set_normal(cpu_buffer
, new_head
,
2008 * If this was the outer most commit (the one that
2009 * changed the original pointer from HEAD to UPDATE),
2010 * then it is up to us to reset it to NORMAL.
2012 if (type
== RB_PAGE_HEAD
) {
2013 ret
= rb_head_page_set_normal(cpu_buffer
, next_page
,
2016 if (RB_WARN_ON(cpu_buffer
,
2017 ret
!= RB_PAGE_UPDATE
))
2024 static unsigned rb_calculate_event_length(unsigned length
)
2026 struct ring_buffer_event event
; /* Used only for sizeof array */
2028 /* zero length can cause confusions */
2032 if (length
> RB_MAX_SMALL_DATA
|| RB_FORCE_8BYTE_ALIGNMENT
)
2033 length
+= sizeof(event
.array
[0]);
2035 length
+= RB_EVNT_HDR_SIZE
;
2036 length
= ALIGN(length
, RB_ARCH_ALIGNMENT
);
2042 rb_reset_tail(struct ring_buffer_per_cpu
*cpu_buffer
,
2043 struct buffer_page
*tail_page
,
2044 unsigned long tail
, unsigned long length
)
2046 struct ring_buffer_event
*event
;
2049 * Only the event that crossed the page boundary
2050 * must fill the old tail_page with padding.
2052 if (tail
>= BUF_PAGE_SIZE
) {
2054 * If the page was filled, then we still need
2055 * to update the real_end. Reset it to zero
2056 * and the reader will ignore it.
2058 if (tail
== BUF_PAGE_SIZE
)
2059 tail_page
->real_end
= 0;
2061 local_sub(length
, &tail_page
->write
);
2065 event
= __rb_page_index(tail_page
, tail
);
2066 kmemcheck_annotate_bitfield(event
, bitfield
);
2068 /* account for padding bytes */
2069 local_add(BUF_PAGE_SIZE
- tail
, &cpu_buffer
->entries_bytes
);
2072 * Save the original length to the meta data.
2073 * This will be used by the reader to add lost event
2076 tail_page
->real_end
= tail
;
2079 * If this event is bigger than the minimum size, then
2080 * we need to be careful that we don't subtract the
2081 * write counter enough to allow another writer to slip
2083 * We put in a discarded commit instead, to make sure
2084 * that this space is not used again.
2086 * If we are less than the minimum size, we don't need to
2089 if (tail
> (BUF_PAGE_SIZE
- RB_EVNT_MIN_SIZE
)) {
2090 /* No room for any events */
2092 /* Mark the rest of the page with padding */
2093 rb_event_set_padding(event
);
2095 /* Set the write back to the previous setting */
2096 local_sub(length
, &tail_page
->write
);
2100 /* Put in a discarded event */
2101 event
->array
[0] = (BUF_PAGE_SIZE
- tail
) - RB_EVNT_HDR_SIZE
;
2102 event
->type_len
= RINGBUF_TYPE_PADDING
;
2103 /* time delta must be non zero */
2104 event
->time_delta
= 1;
2106 /* Set write to end of buffer */
2107 length
= (tail
+ length
) - BUF_PAGE_SIZE
;
2108 local_sub(length
, &tail_page
->write
);
2112 * This is the slow path, force gcc not to inline it.
2114 static noinline
struct ring_buffer_event
*
2115 rb_move_tail(struct ring_buffer_per_cpu
*cpu_buffer
,
2116 unsigned long length
, unsigned long tail
,
2117 struct buffer_page
*tail_page
, u64 ts
)
2119 struct buffer_page
*commit_page
= cpu_buffer
->commit_page
;
2120 struct ring_buffer
*buffer
= cpu_buffer
->buffer
;
2121 struct buffer_page
*next_page
;
2124 next_page
= tail_page
;
2126 rb_inc_page(cpu_buffer
, &next_page
);
2129 * If for some reason, we had an interrupt storm that made
2130 * it all the way around the buffer, bail, and warn
2133 if (unlikely(next_page
== commit_page
)) {
2134 local_inc(&cpu_buffer
->commit_overrun
);
2139 * This is where the fun begins!
2141 * We are fighting against races between a reader that
2142 * could be on another CPU trying to swap its reader
2143 * page with the buffer head.
2145 * We are also fighting against interrupts coming in and
2146 * moving the head or tail on us as well.
2148 * If the next page is the head page then we have filled
2149 * the buffer, unless the commit page is still on the
2152 if (rb_is_head_page(cpu_buffer
, next_page
, &tail_page
->list
)) {
2155 * If the commit is not on the reader page, then
2156 * move the header page.
2158 if (!rb_is_reader_page(cpu_buffer
->commit_page
)) {
2160 * If we are not in overwrite mode,
2161 * this is easy, just stop here.
2163 if (!(buffer
->flags
& RB_FL_OVERWRITE
)) {
2164 local_inc(&cpu_buffer
->dropped_events
);
2168 ret
= rb_handle_head_page(cpu_buffer
,
2177 * We need to be careful here too. The
2178 * commit page could still be on the reader
2179 * page. We could have a small buffer, and
2180 * have filled up the buffer with events
2181 * from interrupts and such, and wrapped.
2183 * Note, if the tail page is also the on the
2184 * reader_page, we let it move out.
2186 if (unlikely((cpu_buffer
->commit_page
!=
2187 cpu_buffer
->tail_page
) &&
2188 (cpu_buffer
->commit_page
==
2189 cpu_buffer
->reader_page
))) {
2190 local_inc(&cpu_buffer
->commit_overrun
);
2196 ret
= rb_tail_page_update(cpu_buffer
, tail_page
, next_page
);
2199 * Nested commits always have zero deltas, so
2200 * just reread the time stamp
2202 ts
= rb_time_stamp(buffer
);
2203 next_page
->page
->time_stamp
= ts
;
2208 rb_reset_tail(cpu_buffer
, tail_page
, tail
, length
);
2210 /* fail and let the caller try again */
2211 return ERR_PTR(-EAGAIN
);
2215 rb_reset_tail(cpu_buffer
, tail_page
, tail
, length
);
2220 static struct ring_buffer_event
*
2221 __rb_reserve_next(struct ring_buffer_per_cpu
*cpu_buffer
,
2222 unsigned long length
, u64 ts
,
2223 u64 delta
, int add_timestamp
)
2225 struct buffer_page
*tail_page
;
2226 struct ring_buffer_event
*event
;
2227 unsigned long tail
, write
;
2230 * If the time delta since the last event is too big to
2231 * hold in the time field of the event, then we append a
2232 * TIME EXTEND event ahead of the data event.
2234 if (unlikely(add_timestamp
))
2235 length
+= RB_LEN_TIME_EXTEND
;
2237 tail_page
= cpu_buffer
->tail_page
;
2238 write
= local_add_return(length
, &tail_page
->write
);
2240 /* set write to only the index of the write */
2241 write
&= RB_WRITE_MASK
;
2242 tail
= write
- length
;
2244 /* See if we shot pass the end of this buffer page */
2245 if (unlikely(write
> BUF_PAGE_SIZE
))
2246 return rb_move_tail(cpu_buffer
, length
, tail
,
2249 /* We reserved something on the buffer */
2251 event
= __rb_page_index(tail_page
, tail
);
2252 kmemcheck_annotate_bitfield(event
, bitfield
);
2253 rb_update_event(cpu_buffer
, event
, length
, add_timestamp
, delta
);
2255 local_inc(&tail_page
->entries
);
2258 * If this is the first commit on the page, then update
2262 tail_page
->page
->time_stamp
= ts
;
2264 /* account for these added bytes */
2265 local_add(length
, &cpu_buffer
->entries_bytes
);
2271 rb_try_to_discard(struct ring_buffer_per_cpu
*cpu_buffer
,
2272 struct ring_buffer_event
*event
)
2274 unsigned long new_index
, old_index
;
2275 struct buffer_page
*bpage
;
2276 unsigned long index
;
2279 new_index
= rb_event_index(event
);
2280 old_index
= new_index
+ rb_event_ts_length(event
);
2281 addr
= (unsigned long)event
;
2284 bpage
= cpu_buffer
->tail_page
;
2286 if (bpage
->page
== (void *)addr
&& rb_page_write(bpage
) == old_index
) {
2287 unsigned long write_mask
=
2288 local_read(&bpage
->write
) & ~RB_WRITE_MASK
;
2289 unsigned long event_length
= rb_event_length(event
);
2291 * This is on the tail page. It is possible that
2292 * a write could come in and move the tail page
2293 * and write to the next page. That is fine
2294 * because we just shorten what is on this page.
2296 old_index
+= write_mask
;
2297 new_index
+= write_mask
;
2298 index
= local_cmpxchg(&bpage
->write
, old_index
, new_index
);
2299 if (index
== old_index
) {
2300 /* update counters */
2301 local_sub(event_length
, &cpu_buffer
->entries_bytes
);
2306 /* could not discard */
2310 static void rb_start_commit(struct ring_buffer_per_cpu
*cpu_buffer
)
2312 local_inc(&cpu_buffer
->committing
);
2313 local_inc(&cpu_buffer
->commits
);
2316 static inline void rb_end_commit(struct ring_buffer_per_cpu
*cpu_buffer
)
2318 unsigned long commits
;
2320 if (RB_WARN_ON(cpu_buffer
,
2321 !local_read(&cpu_buffer
->committing
)))
2325 commits
= local_read(&cpu_buffer
->commits
);
2326 /* synchronize with interrupts */
2328 if (local_read(&cpu_buffer
->committing
) == 1)
2329 rb_set_commit_to_write(cpu_buffer
);
2331 local_dec(&cpu_buffer
->committing
);
2333 /* synchronize with interrupts */
2337 * Need to account for interrupts coming in between the
2338 * updating of the commit page and the clearing of the
2339 * committing counter.
2341 if (unlikely(local_read(&cpu_buffer
->commits
) != commits
) &&
2342 !local_read(&cpu_buffer
->committing
)) {
2343 local_inc(&cpu_buffer
->committing
);
2348 static struct ring_buffer_event
*
2349 rb_reserve_next_event(struct ring_buffer
*buffer
,
2350 struct ring_buffer_per_cpu
*cpu_buffer
,
2351 unsigned long length
)
2353 struct ring_buffer_event
*event
;
2359 rb_start_commit(cpu_buffer
);
2361 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2363 * Due to the ability to swap a cpu buffer from a buffer
2364 * it is possible it was swapped before we committed.
2365 * (committing stops a swap). We check for it here and
2366 * if it happened, we have to fail the write.
2369 if (unlikely(ACCESS_ONCE(cpu_buffer
->buffer
) != buffer
)) {
2370 local_dec(&cpu_buffer
->committing
);
2371 local_dec(&cpu_buffer
->commits
);
2376 length
= rb_calculate_event_length(length
);
2382 * We allow for interrupts to reenter here and do a trace.
2383 * If one does, it will cause this original code to loop
2384 * back here. Even with heavy interrupts happening, this
2385 * should only happen a few times in a row. If this happens
2386 * 1000 times in a row, there must be either an interrupt
2387 * storm or we have something buggy.
2390 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 1000))
2393 ts
= rb_time_stamp(cpu_buffer
->buffer
);
2394 diff
= ts
- cpu_buffer
->write_stamp
;
2396 /* make sure this diff is calculated here */
2399 /* Did the write stamp get updated already? */
2400 if (likely(ts
>= cpu_buffer
->write_stamp
)) {
2402 if (unlikely(test_time_stamp(delta
))) {
2403 int local_clock_stable
= 1;
2404 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2405 local_clock_stable
= sched_clock_stable
;
2407 WARN_ONCE(delta
> (1ULL << 59),
2408 KERN_WARNING
"Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2409 (unsigned long long)delta
,
2410 (unsigned long long)ts
,
2411 (unsigned long long)cpu_buffer
->write_stamp
,
2412 local_clock_stable
? "" :
2413 "If you just came from a suspend/resume,\n"
2414 "please switch to the trace global clock:\n"
2415 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2420 event
= __rb_reserve_next(cpu_buffer
, length
, ts
,
2421 delta
, add_timestamp
);
2422 if (unlikely(PTR_ERR(event
) == -EAGAIN
))
2431 rb_end_commit(cpu_buffer
);
2435 #ifdef CONFIG_TRACING
2437 #define TRACE_RECURSIVE_DEPTH 16
2439 /* Keep this code out of the fast path cache */
2440 static noinline
void trace_recursive_fail(void)
2442 /* Disable all tracing before we do anything else */
2443 tracing_off_permanent();
2445 printk_once(KERN_WARNING
"Tracing recursion: depth[%ld]:"
2446 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2447 trace_recursion_buffer(),
2448 hardirq_count() >> HARDIRQ_SHIFT
,
2449 softirq_count() >> SOFTIRQ_SHIFT
,
2455 static inline int trace_recursive_lock(void)
2457 trace_recursion_inc();
2459 if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH
))
2462 trace_recursive_fail();
2467 static inline void trace_recursive_unlock(void)
2469 WARN_ON_ONCE(!trace_recursion_buffer());
2471 trace_recursion_dec();
2476 #define trace_recursive_lock() (0)
2477 #define trace_recursive_unlock() do { } while (0)
2482 * ring_buffer_lock_reserve - reserve a part of the buffer
2483 * @buffer: the ring buffer to reserve from
2484 * @length: the length of the data to reserve (excluding event header)
2486 * Returns a reseverd event on the ring buffer to copy directly to.
2487 * The user of this interface will need to get the body to write into
2488 * and can use the ring_buffer_event_data() interface.
2490 * The length is the length of the data needed, not the event length
2491 * which also includes the event header.
2493 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2494 * If NULL is returned, then nothing has been allocated or locked.
2496 struct ring_buffer_event
*
2497 ring_buffer_lock_reserve(struct ring_buffer
*buffer
, unsigned long length
)
2499 struct ring_buffer_per_cpu
*cpu_buffer
;
2500 struct ring_buffer_event
*event
;
2503 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
2506 /* If we are tracing schedule, we don't want to recurse */
2507 preempt_disable_notrace();
2509 if (atomic_read(&buffer
->record_disabled
))
2512 if (trace_recursive_lock())
2515 cpu
= raw_smp_processor_id();
2517 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2520 cpu_buffer
= buffer
->buffers
[cpu
];
2522 if (atomic_read(&cpu_buffer
->record_disabled
))
2525 if (length
> BUF_MAX_DATA_SIZE
)
2528 event
= rb_reserve_next_event(buffer
, cpu_buffer
, length
);
2535 trace_recursive_unlock();
2538 preempt_enable_notrace();
2541 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve
);
2544 rb_update_write_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
2545 struct ring_buffer_event
*event
)
2550 * The event first in the commit queue updates the
2553 if (rb_event_is_commit(cpu_buffer
, event
)) {
2555 * A commit event that is first on a page
2556 * updates the write timestamp with the page stamp
2558 if (!rb_event_index(event
))
2559 cpu_buffer
->write_stamp
=
2560 cpu_buffer
->commit_page
->page
->time_stamp
;
2561 else if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
) {
2562 delta
= event
->array
[0];
2564 delta
+= event
->time_delta
;
2565 cpu_buffer
->write_stamp
+= delta
;
2567 cpu_buffer
->write_stamp
+= event
->time_delta
;
2571 static void rb_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
2572 struct ring_buffer_event
*event
)
2574 local_inc(&cpu_buffer
->entries
);
2575 rb_update_write_stamp(cpu_buffer
, event
);
2576 rb_end_commit(cpu_buffer
);
2580 * ring_buffer_unlock_commit - commit a reserved
2581 * @buffer: The buffer to commit to
2582 * @event: The event pointer to commit.
2584 * This commits the data to the ring buffer, and releases any locks held.
2586 * Must be paired with ring_buffer_lock_reserve.
2588 int ring_buffer_unlock_commit(struct ring_buffer
*buffer
,
2589 struct ring_buffer_event
*event
)
2591 struct ring_buffer_per_cpu
*cpu_buffer
;
2592 int cpu
= raw_smp_processor_id();
2594 cpu_buffer
= buffer
->buffers
[cpu
];
2596 rb_commit(cpu_buffer
, event
);
2598 trace_recursive_unlock();
2600 preempt_enable_notrace();
2604 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit
);
2606 static inline void rb_event_discard(struct ring_buffer_event
*event
)
2608 if (event
->type_len
== RINGBUF_TYPE_TIME_EXTEND
)
2609 event
= skip_time_extend(event
);
2611 /* array[0] holds the actual length for the discarded event */
2612 event
->array
[0] = rb_event_data_length(event
) - RB_EVNT_HDR_SIZE
;
2613 event
->type_len
= RINGBUF_TYPE_PADDING
;
2614 /* time delta must be non zero */
2615 if (!event
->time_delta
)
2616 event
->time_delta
= 1;
2620 * Decrement the entries to the page that an event is on.
2621 * The event does not even need to exist, only the pointer
2622 * to the page it is on. This may only be called before the commit
2626 rb_decrement_entry(struct ring_buffer_per_cpu
*cpu_buffer
,
2627 struct ring_buffer_event
*event
)
2629 unsigned long addr
= (unsigned long)event
;
2630 struct buffer_page
*bpage
= cpu_buffer
->commit_page
;
2631 struct buffer_page
*start
;
2635 /* Do the likely case first */
2636 if (likely(bpage
->page
== (void *)addr
)) {
2637 local_dec(&bpage
->entries
);
2642 * Because the commit page may be on the reader page we
2643 * start with the next page and check the end loop there.
2645 rb_inc_page(cpu_buffer
, &bpage
);
2648 if (bpage
->page
== (void *)addr
) {
2649 local_dec(&bpage
->entries
);
2652 rb_inc_page(cpu_buffer
, &bpage
);
2653 } while (bpage
!= start
);
2655 /* commit not part of this buffer?? */
2656 RB_WARN_ON(cpu_buffer
, 1);
2660 * ring_buffer_commit_discard - discard an event that has not been committed
2661 * @buffer: the ring buffer
2662 * @event: non committed event to discard
2664 * Sometimes an event that is in the ring buffer needs to be ignored.
2665 * This function lets the user discard an event in the ring buffer
2666 * and then that event will not be read later.
2668 * This function only works if it is called before the the item has been
2669 * committed. It will try to free the event from the ring buffer
2670 * if another event has not been added behind it.
2672 * If another event has been added behind it, it will set the event
2673 * up as discarded, and perform the commit.
2675 * If this function is called, do not call ring_buffer_unlock_commit on
2678 void ring_buffer_discard_commit(struct ring_buffer
*buffer
,
2679 struct ring_buffer_event
*event
)
2681 struct ring_buffer_per_cpu
*cpu_buffer
;
2684 /* The event is discarded regardless */
2685 rb_event_discard(event
);
2687 cpu
= smp_processor_id();
2688 cpu_buffer
= buffer
->buffers
[cpu
];
2691 * This must only be called if the event has not been
2692 * committed yet. Thus we can assume that preemption
2693 * is still disabled.
2695 RB_WARN_ON(buffer
, !local_read(&cpu_buffer
->committing
));
2697 rb_decrement_entry(cpu_buffer
, event
);
2698 if (rb_try_to_discard(cpu_buffer
, event
))
2702 * The commit is still visible by the reader, so we
2703 * must still update the timestamp.
2705 rb_update_write_stamp(cpu_buffer
, event
);
2707 rb_end_commit(cpu_buffer
);
2709 trace_recursive_unlock();
2711 preempt_enable_notrace();
2714 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit
);
2717 * ring_buffer_write - write data to the buffer without reserving
2718 * @buffer: The ring buffer to write to.
2719 * @length: The length of the data being written (excluding the event header)
2720 * @data: The data to write to the buffer.
2722 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2723 * one function. If you already have the data to write to the buffer, it
2724 * may be easier to simply call this function.
2726 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2727 * and not the length of the event which would hold the header.
2729 int ring_buffer_write(struct ring_buffer
*buffer
,
2730 unsigned long length
,
2733 struct ring_buffer_per_cpu
*cpu_buffer
;
2734 struct ring_buffer_event
*event
;
2739 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
2742 preempt_disable_notrace();
2744 if (atomic_read(&buffer
->record_disabled
))
2747 cpu
= raw_smp_processor_id();
2749 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2752 cpu_buffer
= buffer
->buffers
[cpu
];
2754 if (atomic_read(&cpu_buffer
->record_disabled
))
2757 if (length
> BUF_MAX_DATA_SIZE
)
2760 event
= rb_reserve_next_event(buffer
, cpu_buffer
, length
);
2764 body
= rb_event_data(event
);
2766 memcpy(body
, data
, length
);
2768 rb_commit(cpu_buffer
, event
);
2772 preempt_enable_notrace();
2776 EXPORT_SYMBOL_GPL(ring_buffer_write
);
2778 static int rb_per_cpu_empty(struct ring_buffer_per_cpu
*cpu_buffer
)
2780 struct buffer_page
*reader
= cpu_buffer
->reader_page
;
2781 struct buffer_page
*head
= rb_set_head_page(cpu_buffer
);
2782 struct buffer_page
*commit
= cpu_buffer
->commit_page
;
2784 /* In case of error, head will be NULL */
2785 if (unlikely(!head
))
2788 return reader
->read
== rb_page_commit(reader
) &&
2789 (commit
== reader
||
2791 head
->read
== rb_page_commit(commit
)));
2795 * ring_buffer_record_disable - stop all writes into the buffer
2796 * @buffer: The ring buffer to stop writes to.
2798 * This prevents all writes to the buffer. Any attempt to write
2799 * to the buffer after this will fail and return NULL.
2801 * The caller should call synchronize_sched() after this.
2803 void ring_buffer_record_disable(struct ring_buffer
*buffer
)
2805 atomic_inc(&buffer
->record_disabled
);
2807 EXPORT_SYMBOL_GPL(ring_buffer_record_disable
);
2810 * ring_buffer_record_enable - enable writes to the buffer
2811 * @buffer: The ring buffer to enable writes
2813 * Note, multiple disables will need the same number of enables
2814 * to truly enable the writing (much like preempt_disable).
2816 void ring_buffer_record_enable(struct ring_buffer
*buffer
)
2818 atomic_dec(&buffer
->record_disabled
);
2820 EXPORT_SYMBOL_GPL(ring_buffer_record_enable
);
2823 * ring_buffer_record_off - stop all writes into the buffer
2824 * @buffer: The ring buffer to stop writes to.
2826 * This prevents all writes to the buffer. Any attempt to write
2827 * to the buffer after this will fail and return NULL.
2829 * This is different than ring_buffer_record_disable() as
2830 * it works like an on/off switch, where as the disable() version
2831 * must be paired with a enable().
2833 void ring_buffer_record_off(struct ring_buffer
*buffer
)
2836 unsigned int new_rd
;
2839 rd
= atomic_read(&buffer
->record_disabled
);
2840 new_rd
= rd
| RB_BUFFER_OFF
;
2841 } while (atomic_cmpxchg(&buffer
->record_disabled
, rd
, new_rd
) != rd
);
2843 EXPORT_SYMBOL_GPL(ring_buffer_record_off
);
2846 * ring_buffer_record_on - restart writes into the buffer
2847 * @buffer: The ring buffer to start writes to.
2849 * This enables all writes to the buffer that was disabled by
2850 * ring_buffer_record_off().
2852 * This is different than ring_buffer_record_enable() as
2853 * it works like an on/off switch, where as the enable() version
2854 * must be paired with a disable().
2856 void ring_buffer_record_on(struct ring_buffer
*buffer
)
2859 unsigned int new_rd
;
2862 rd
= atomic_read(&buffer
->record_disabled
);
2863 new_rd
= rd
& ~RB_BUFFER_OFF
;
2864 } while (atomic_cmpxchg(&buffer
->record_disabled
, rd
, new_rd
) != rd
);
2866 EXPORT_SYMBOL_GPL(ring_buffer_record_on
);
2869 * ring_buffer_record_is_on - return true if the ring buffer can write
2870 * @buffer: The ring buffer to see if write is enabled
2872 * Returns true if the ring buffer is in a state that it accepts writes.
2874 int ring_buffer_record_is_on(struct ring_buffer
*buffer
)
2876 return !atomic_read(&buffer
->record_disabled
);
2880 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2881 * @buffer: The ring buffer to stop writes to.
2882 * @cpu: The CPU buffer to stop
2884 * This prevents all writes to the buffer. Any attempt to write
2885 * to the buffer after this will fail and return NULL.
2887 * The caller should call synchronize_sched() after this.
2889 void ring_buffer_record_disable_cpu(struct ring_buffer
*buffer
, int cpu
)
2891 struct ring_buffer_per_cpu
*cpu_buffer
;
2893 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2896 cpu_buffer
= buffer
->buffers
[cpu
];
2897 atomic_inc(&cpu_buffer
->record_disabled
);
2899 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu
);
2902 * ring_buffer_record_enable_cpu - enable writes to the buffer
2903 * @buffer: The ring buffer to enable writes
2904 * @cpu: The CPU to enable.
2906 * Note, multiple disables will need the same number of enables
2907 * to truly enable the writing (much like preempt_disable).
2909 void ring_buffer_record_enable_cpu(struct ring_buffer
*buffer
, int cpu
)
2911 struct ring_buffer_per_cpu
*cpu_buffer
;
2913 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2916 cpu_buffer
= buffer
->buffers
[cpu
];
2917 atomic_dec(&cpu_buffer
->record_disabled
);
2919 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu
);
2922 * The total entries in the ring buffer is the running counter
2923 * of entries entered into the ring buffer, minus the sum of
2924 * the entries read from the ring buffer and the number of
2925 * entries that were overwritten.
2927 static inline unsigned long
2928 rb_num_of_entries(struct ring_buffer_per_cpu
*cpu_buffer
)
2930 return local_read(&cpu_buffer
->entries
) -
2931 (local_read(&cpu_buffer
->overrun
) + cpu_buffer
->read
);
2935 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2936 * @buffer: The ring buffer
2937 * @cpu: The per CPU buffer to read from.
2939 u64
ring_buffer_oldest_event_ts(struct ring_buffer
*buffer
, int cpu
)
2941 unsigned long flags
;
2942 struct ring_buffer_per_cpu
*cpu_buffer
;
2943 struct buffer_page
*bpage
;
2946 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2949 cpu_buffer
= buffer
->buffers
[cpu
];
2950 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2952 * if the tail is on reader_page, oldest time stamp is on the reader
2955 if (cpu_buffer
->tail_page
== cpu_buffer
->reader_page
)
2956 bpage
= cpu_buffer
->reader_page
;
2958 bpage
= rb_set_head_page(cpu_buffer
);
2960 ret
= bpage
->page
->time_stamp
;
2961 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2965 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts
);
2968 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2969 * @buffer: The ring buffer
2970 * @cpu: The per CPU buffer to read from.
2972 unsigned long ring_buffer_bytes_cpu(struct ring_buffer
*buffer
, int cpu
)
2974 struct ring_buffer_per_cpu
*cpu_buffer
;
2977 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2980 cpu_buffer
= buffer
->buffers
[cpu
];
2981 ret
= local_read(&cpu_buffer
->entries_bytes
) - cpu_buffer
->read_bytes
;
2985 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu
);
2988 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2989 * @buffer: The ring buffer
2990 * @cpu: The per CPU buffer to get the entries from.
2992 unsigned long ring_buffer_entries_cpu(struct ring_buffer
*buffer
, int cpu
)
2994 struct ring_buffer_per_cpu
*cpu_buffer
;
2996 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2999 cpu_buffer
= buffer
->buffers
[cpu
];
3001 return rb_num_of_entries(cpu_buffer
);
3003 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu
);
3006 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3007 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3008 * @buffer: The ring buffer
3009 * @cpu: The per CPU buffer to get the number of overruns from
3011 unsigned long ring_buffer_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
3013 struct ring_buffer_per_cpu
*cpu_buffer
;
3016 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3019 cpu_buffer
= buffer
->buffers
[cpu
];
3020 ret
= local_read(&cpu_buffer
->overrun
);
3024 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu
);
3027 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3028 * commits failing due to the buffer wrapping around while there are uncommitted
3029 * events, such as during an interrupt storm.
3030 * @buffer: The ring buffer
3031 * @cpu: The per CPU buffer to get the number of overruns from
3034 ring_buffer_commit_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
3036 struct ring_buffer_per_cpu
*cpu_buffer
;
3039 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3042 cpu_buffer
= buffer
->buffers
[cpu
];
3043 ret
= local_read(&cpu_buffer
->commit_overrun
);
3047 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu
);
3050 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3051 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3052 * @buffer: The ring buffer
3053 * @cpu: The per CPU buffer to get the number of overruns from
3056 ring_buffer_dropped_events_cpu(struct ring_buffer
*buffer
, int cpu
)
3058 struct ring_buffer_per_cpu
*cpu_buffer
;
3061 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3064 cpu_buffer
= buffer
->buffers
[cpu
];
3065 ret
= local_read(&cpu_buffer
->dropped_events
);
3069 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu
);
3072 * ring_buffer_entries - get the number of entries in a buffer
3073 * @buffer: The ring buffer
3075 * Returns the total number of entries in the ring buffer
3078 unsigned long ring_buffer_entries(struct ring_buffer
*buffer
)
3080 struct ring_buffer_per_cpu
*cpu_buffer
;
3081 unsigned long entries
= 0;
3084 /* if you care about this being correct, lock the buffer */
3085 for_each_buffer_cpu(buffer
, cpu
) {
3086 cpu_buffer
= buffer
->buffers
[cpu
];
3087 entries
+= rb_num_of_entries(cpu_buffer
);
3092 EXPORT_SYMBOL_GPL(ring_buffer_entries
);
3095 * ring_buffer_overruns - get the number of overruns in buffer
3096 * @buffer: The ring buffer
3098 * Returns the total number of overruns in the ring buffer
3101 unsigned long ring_buffer_overruns(struct ring_buffer
*buffer
)
3103 struct ring_buffer_per_cpu
*cpu_buffer
;
3104 unsigned long overruns
= 0;
3107 /* if you care about this being correct, lock the buffer */
3108 for_each_buffer_cpu(buffer
, cpu
) {
3109 cpu_buffer
= buffer
->buffers
[cpu
];
3110 overruns
+= local_read(&cpu_buffer
->overrun
);
3115 EXPORT_SYMBOL_GPL(ring_buffer_overruns
);
3117 static void rb_iter_reset(struct ring_buffer_iter
*iter
)
3119 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3121 /* Iterator usage is expected to have record disabled */
3122 if (list_empty(&cpu_buffer
->reader_page
->list
)) {
3123 iter
->head_page
= rb_set_head_page(cpu_buffer
);
3124 if (unlikely(!iter
->head_page
))
3126 iter
->head
= iter
->head_page
->read
;
3128 iter
->head_page
= cpu_buffer
->reader_page
;
3129 iter
->head
= cpu_buffer
->reader_page
->read
;
3132 iter
->read_stamp
= cpu_buffer
->read_stamp
;
3134 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
3135 iter
->cache_reader_page
= cpu_buffer
->reader_page
;
3136 iter
->cache_read
= cpu_buffer
->read
;
3140 * ring_buffer_iter_reset - reset an iterator
3141 * @iter: The iterator to reset
3143 * Resets the iterator, so that it will start from the beginning
3146 void ring_buffer_iter_reset(struct ring_buffer_iter
*iter
)
3148 struct ring_buffer_per_cpu
*cpu_buffer
;
3149 unsigned long flags
;
3154 cpu_buffer
= iter
->cpu_buffer
;
3156 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3157 rb_iter_reset(iter
);
3158 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3160 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset
);
3163 * ring_buffer_iter_empty - check if an iterator has no more to read
3164 * @iter: The iterator to check
3166 int ring_buffer_iter_empty(struct ring_buffer_iter
*iter
)
3168 struct ring_buffer_per_cpu
*cpu_buffer
;
3170 cpu_buffer
= iter
->cpu_buffer
;
3172 return iter
->head_page
== cpu_buffer
->commit_page
&&
3173 iter
->head
== rb_commit_index(cpu_buffer
);
3175 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty
);
3178 rb_update_read_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
3179 struct ring_buffer_event
*event
)
3183 switch (event
->type_len
) {
3184 case RINGBUF_TYPE_PADDING
:
3187 case RINGBUF_TYPE_TIME_EXTEND
:
3188 delta
= event
->array
[0];
3190 delta
+= event
->time_delta
;
3191 cpu_buffer
->read_stamp
+= delta
;
3194 case RINGBUF_TYPE_TIME_STAMP
:
3195 /* FIXME: not implemented */
3198 case RINGBUF_TYPE_DATA
:
3199 cpu_buffer
->read_stamp
+= event
->time_delta
;
3209 rb_update_iter_read_stamp(struct ring_buffer_iter
*iter
,
3210 struct ring_buffer_event
*event
)
3214 switch (event
->type_len
) {
3215 case RINGBUF_TYPE_PADDING
:
3218 case RINGBUF_TYPE_TIME_EXTEND
:
3219 delta
= event
->array
[0];
3221 delta
+= event
->time_delta
;
3222 iter
->read_stamp
+= delta
;
3225 case RINGBUF_TYPE_TIME_STAMP
:
3226 /* FIXME: not implemented */
3229 case RINGBUF_TYPE_DATA
:
3230 iter
->read_stamp
+= event
->time_delta
;
3239 static struct buffer_page
*
3240 rb_get_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
3242 struct buffer_page
*reader
= NULL
;
3243 unsigned long overwrite
;
3244 unsigned long flags
;
3248 local_irq_save(flags
);
3249 arch_spin_lock(&cpu_buffer
->lock
);
3253 * This should normally only loop twice. But because the
3254 * start of the reader inserts an empty page, it causes
3255 * a case where we will loop three times. There should be no
3256 * reason to loop four times (that I know of).
3258 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 3)) {
3263 reader
= cpu_buffer
->reader_page
;
3265 /* If there's more to read, return this page */
3266 if (cpu_buffer
->reader_page
->read
< rb_page_size(reader
))
3269 /* Never should we have an index greater than the size */
3270 if (RB_WARN_ON(cpu_buffer
,
3271 cpu_buffer
->reader_page
->read
> rb_page_size(reader
)))
3274 /* check if we caught up to the tail */
3276 if (cpu_buffer
->commit_page
== cpu_buffer
->reader_page
)
3279 /* Don't bother swapping if the ring buffer is empty */
3280 if (rb_num_of_entries(cpu_buffer
) == 0)
3284 * Reset the reader page to size zero.
3286 local_set(&cpu_buffer
->reader_page
->write
, 0);
3287 local_set(&cpu_buffer
->reader_page
->entries
, 0);
3288 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
3289 cpu_buffer
->reader_page
->real_end
= 0;
3293 * Splice the empty reader page into the list around the head.
3295 reader
= rb_set_head_page(cpu_buffer
);
3298 cpu_buffer
->reader_page
->list
.next
= rb_list_head(reader
->list
.next
);
3299 cpu_buffer
->reader_page
->list
.prev
= reader
->list
.prev
;
3302 * cpu_buffer->pages just needs to point to the buffer, it
3303 * has no specific buffer page to point to. Lets move it out
3304 * of our way so we don't accidentally swap it.
3306 cpu_buffer
->pages
= reader
->list
.prev
;
3308 /* The reader page will be pointing to the new head */
3309 rb_set_list_to_head(cpu_buffer
, &cpu_buffer
->reader_page
->list
);
3312 * We want to make sure we read the overruns after we set up our
3313 * pointers to the next object. The writer side does a
3314 * cmpxchg to cross pages which acts as the mb on the writer
3315 * side. Note, the reader will constantly fail the swap
3316 * while the writer is updating the pointers, so this
3317 * guarantees that the overwrite recorded here is the one we
3318 * want to compare with the last_overrun.
3321 overwrite
= local_read(&(cpu_buffer
->overrun
));
3324 * Here's the tricky part.
3326 * We need to move the pointer past the header page.
3327 * But we can only do that if a writer is not currently
3328 * moving it. The page before the header page has the
3329 * flag bit '1' set if it is pointing to the page we want.
3330 * but if the writer is in the process of moving it
3331 * than it will be '2' or already moved '0'.
3334 ret
= rb_head_page_replace(reader
, cpu_buffer
->reader_page
);
3337 * If we did not convert it, then we must try again.
3343 * Yeah! We succeeded in replacing the page.
3345 * Now make the new head point back to the reader page.
3347 rb_list_head(reader
->list
.next
)->prev
= &cpu_buffer
->reader_page
->list
;
3348 rb_inc_page(cpu_buffer
, &cpu_buffer
->head_page
);
3350 /* Finally update the reader page to the new head */
3351 cpu_buffer
->reader_page
= reader
;
3352 rb_reset_reader_page(cpu_buffer
);
3354 if (overwrite
!= cpu_buffer
->last_overrun
) {
3355 cpu_buffer
->lost_events
= overwrite
- cpu_buffer
->last_overrun
;
3356 cpu_buffer
->last_overrun
= overwrite
;
3362 arch_spin_unlock(&cpu_buffer
->lock
);
3363 local_irq_restore(flags
);
3368 static void rb_advance_reader(struct ring_buffer_per_cpu
*cpu_buffer
)
3370 struct ring_buffer_event
*event
;
3371 struct buffer_page
*reader
;
3374 reader
= rb_get_reader_page(cpu_buffer
);
3376 /* This function should not be called when buffer is empty */
3377 if (RB_WARN_ON(cpu_buffer
, !reader
))
3380 event
= rb_reader_event(cpu_buffer
);
3382 if (event
->type_len
<= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
)
3385 rb_update_read_stamp(cpu_buffer
, event
);
3387 length
= rb_event_length(event
);
3388 cpu_buffer
->reader_page
->read
+= length
;
3391 static void rb_advance_iter(struct ring_buffer_iter
*iter
)
3393 struct ring_buffer_per_cpu
*cpu_buffer
;
3394 struct ring_buffer_event
*event
;
3397 cpu_buffer
= iter
->cpu_buffer
;
3400 * Check if we are at the end of the buffer.
3402 if (iter
->head
>= rb_page_size(iter
->head_page
)) {
3403 /* discarded commits can make the page empty */
3404 if (iter
->head_page
== cpu_buffer
->commit_page
)
3410 event
= rb_iter_head_event(iter
);
3412 length
= rb_event_length(event
);
3415 * This should not be called to advance the header if we are
3416 * at the tail of the buffer.
3418 if (RB_WARN_ON(cpu_buffer
,
3419 (iter
->head_page
== cpu_buffer
->commit_page
) &&
3420 (iter
->head
+ length
> rb_commit_index(cpu_buffer
))))
3423 rb_update_iter_read_stamp(iter
, event
);
3425 iter
->head
+= length
;
3427 /* check for end of page padding */
3428 if ((iter
->head
>= rb_page_size(iter
->head_page
)) &&
3429 (iter
->head_page
!= cpu_buffer
->commit_page
))
3430 rb_advance_iter(iter
);
3433 static int rb_lost_events(struct ring_buffer_per_cpu
*cpu_buffer
)
3435 return cpu_buffer
->lost_events
;
3438 static struct ring_buffer_event
*
3439 rb_buffer_peek(struct ring_buffer_per_cpu
*cpu_buffer
, u64
*ts
,
3440 unsigned long *lost_events
)
3442 struct ring_buffer_event
*event
;
3443 struct buffer_page
*reader
;
3448 * We repeat when a time extend is encountered.
3449 * Since the time extend is always attached to a data event,
3450 * we should never loop more than once.
3451 * (We never hit the following condition more than twice).
3453 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 2))
3456 reader
= rb_get_reader_page(cpu_buffer
);
3460 event
= rb_reader_event(cpu_buffer
);
3462 switch (event
->type_len
) {
3463 case RINGBUF_TYPE_PADDING
:
3464 if (rb_null_event(event
))
3465 RB_WARN_ON(cpu_buffer
, 1);
3467 * Because the writer could be discarding every
3468 * event it creates (which would probably be bad)
3469 * if we were to go back to "again" then we may never
3470 * catch up, and will trigger the warn on, or lock
3471 * the box. Return the padding, and we will release
3472 * the current locks, and try again.
3476 case RINGBUF_TYPE_TIME_EXTEND
:
3477 /* Internal data, OK to advance */
3478 rb_advance_reader(cpu_buffer
);
3481 case RINGBUF_TYPE_TIME_STAMP
:
3482 /* FIXME: not implemented */
3483 rb_advance_reader(cpu_buffer
);
3486 case RINGBUF_TYPE_DATA
:
3488 *ts
= cpu_buffer
->read_stamp
+ event
->time_delta
;
3489 ring_buffer_normalize_time_stamp(cpu_buffer
->buffer
,
3490 cpu_buffer
->cpu
, ts
);
3493 *lost_events
= rb_lost_events(cpu_buffer
);
3502 EXPORT_SYMBOL_GPL(ring_buffer_peek
);
3504 static struct ring_buffer_event
*
3505 rb_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
3507 struct ring_buffer
*buffer
;
3508 struct ring_buffer_per_cpu
*cpu_buffer
;
3509 struct ring_buffer_event
*event
;
3512 cpu_buffer
= iter
->cpu_buffer
;
3513 buffer
= cpu_buffer
->buffer
;
3516 * Check if someone performed a consuming read to
3517 * the buffer. A consuming read invalidates the iterator
3518 * and we need to reset the iterator in this case.
3520 if (unlikely(iter
->cache_read
!= cpu_buffer
->read
||
3521 iter
->cache_reader_page
!= cpu_buffer
->reader_page
))
3522 rb_iter_reset(iter
);
3525 if (ring_buffer_iter_empty(iter
))
3529 * We repeat when a time extend is encountered.
3530 * Since the time extend is always attached to a data event,
3531 * we should never loop more than once.
3532 * (We never hit the following condition more than twice).
3534 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 2))
3537 if (rb_per_cpu_empty(cpu_buffer
))
3540 if (iter
->head
>= local_read(&iter
->head_page
->page
->commit
)) {
3545 event
= rb_iter_head_event(iter
);
3547 switch (event
->type_len
) {
3548 case RINGBUF_TYPE_PADDING
:
3549 if (rb_null_event(event
)) {
3553 rb_advance_iter(iter
);
3556 case RINGBUF_TYPE_TIME_EXTEND
:
3557 /* Internal data, OK to advance */
3558 rb_advance_iter(iter
);
3561 case RINGBUF_TYPE_TIME_STAMP
:
3562 /* FIXME: not implemented */
3563 rb_advance_iter(iter
);
3566 case RINGBUF_TYPE_DATA
:
3568 *ts
= iter
->read_stamp
+ event
->time_delta
;
3569 ring_buffer_normalize_time_stamp(buffer
,
3570 cpu_buffer
->cpu
, ts
);
3580 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek
);
3582 static inline int rb_ok_to_lock(void)
3585 * If an NMI die dumps out the content of the ring buffer
3586 * do not grab locks. We also permanently disable the ring
3587 * buffer too. A one time deal is all you get from reading
3588 * the ring buffer from an NMI.
3590 if (likely(!in_nmi()))
3593 tracing_off_permanent();
3598 * ring_buffer_peek - peek at the next event to be read
3599 * @buffer: The ring buffer to read
3600 * @cpu: The cpu to peak at
3601 * @ts: The timestamp counter of this event.
3602 * @lost_events: a variable to store if events were lost (may be NULL)
3604 * This will return the event that will be read next, but does
3605 * not consume the data.
3607 struct ring_buffer_event
*
3608 ring_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
,
3609 unsigned long *lost_events
)
3611 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
3612 struct ring_buffer_event
*event
;
3613 unsigned long flags
;
3616 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3619 dolock
= rb_ok_to_lock();
3621 local_irq_save(flags
);
3623 raw_spin_lock(&cpu_buffer
->reader_lock
);
3624 event
= rb_buffer_peek(cpu_buffer
, ts
, lost_events
);
3625 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3626 rb_advance_reader(cpu_buffer
);
3628 raw_spin_unlock(&cpu_buffer
->reader_lock
);
3629 local_irq_restore(flags
);
3631 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3638 * ring_buffer_iter_peek - peek at the next event to be read
3639 * @iter: The ring buffer iterator
3640 * @ts: The timestamp counter of this event.
3642 * This will return the event that will be read next, but does
3643 * not increment the iterator.
3645 struct ring_buffer_event
*
3646 ring_buffer_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
3648 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3649 struct ring_buffer_event
*event
;
3650 unsigned long flags
;
3653 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3654 event
= rb_iter_peek(iter
, ts
);
3655 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3657 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3664 * ring_buffer_consume - return an event and consume it
3665 * @buffer: The ring buffer to get the next event from
3666 * @cpu: the cpu to read the buffer from
3667 * @ts: a variable to store the timestamp (may be NULL)
3668 * @lost_events: a variable to store if events were lost (may be NULL)
3670 * Returns the next event in the ring buffer, and that event is consumed.
3671 * Meaning, that sequential reads will keep returning a different event,
3672 * and eventually empty the ring buffer if the producer is slower.
3674 struct ring_buffer_event
*
3675 ring_buffer_consume(struct ring_buffer
*buffer
, int cpu
, u64
*ts
,
3676 unsigned long *lost_events
)
3678 struct ring_buffer_per_cpu
*cpu_buffer
;
3679 struct ring_buffer_event
*event
= NULL
;
3680 unsigned long flags
;
3683 dolock
= rb_ok_to_lock();
3686 /* might be called in atomic */
3689 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3692 cpu_buffer
= buffer
->buffers
[cpu
];
3693 local_irq_save(flags
);
3695 raw_spin_lock(&cpu_buffer
->reader_lock
);
3697 event
= rb_buffer_peek(cpu_buffer
, ts
, lost_events
);
3699 cpu_buffer
->lost_events
= 0;
3700 rb_advance_reader(cpu_buffer
);
3704 raw_spin_unlock(&cpu_buffer
->reader_lock
);
3705 local_irq_restore(flags
);
3710 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3715 EXPORT_SYMBOL_GPL(ring_buffer_consume
);
3718 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3719 * @buffer: The ring buffer to read from
3720 * @cpu: The cpu buffer to iterate over
3722 * This performs the initial preparations necessary to iterate
3723 * through the buffer. Memory is allocated, buffer recording
3724 * is disabled, and the iterator pointer is returned to the caller.
3726 * Disabling buffer recordng prevents the reading from being
3727 * corrupted. This is not a consuming read, so a producer is not
3730 * After a sequence of ring_buffer_read_prepare calls, the user is
3731 * expected to make at least one call to ring_buffer_prepare_sync.
3732 * Afterwards, ring_buffer_read_start is invoked to get things going
3735 * This overall must be paired with ring_buffer_finish.
3737 struct ring_buffer_iter
*
3738 ring_buffer_read_prepare(struct ring_buffer
*buffer
, int cpu
)
3740 struct ring_buffer_per_cpu
*cpu_buffer
;
3741 struct ring_buffer_iter
*iter
;
3743 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3746 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
3750 cpu_buffer
= buffer
->buffers
[cpu
];
3752 iter
->cpu_buffer
= cpu_buffer
;
3754 atomic_inc(&buffer
->resize_disabled
);
3755 atomic_inc(&cpu_buffer
->record_disabled
);
3759 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare
);
3762 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3764 * All previously invoked ring_buffer_read_prepare calls to prepare
3765 * iterators will be synchronized. Afterwards, read_buffer_read_start
3766 * calls on those iterators are allowed.
3769 ring_buffer_read_prepare_sync(void)
3771 synchronize_sched();
3773 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync
);
3776 * ring_buffer_read_start - start a non consuming read of the buffer
3777 * @iter: The iterator returned by ring_buffer_read_prepare
3779 * This finalizes the startup of an iteration through the buffer.
3780 * The iterator comes from a call to ring_buffer_read_prepare and
3781 * an intervening ring_buffer_read_prepare_sync must have been
3784 * Must be paired with ring_buffer_finish.
3787 ring_buffer_read_start(struct ring_buffer_iter
*iter
)
3789 struct ring_buffer_per_cpu
*cpu_buffer
;
3790 unsigned long flags
;
3795 cpu_buffer
= iter
->cpu_buffer
;
3797 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3798 arch_spin_lock(&cpu_buffer
->lock
);
3799 rb_iter_reset(iter
);
3800 arch_spin_unlock(&cpu_buffer
->lock
);
3801 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3803 EXPORT_SYMBOL_GPL(ring_buffer_read_start
);
3806 * ring_buffer_finish - finish reading the iterator of the buffer
3807 * @iter: The iterator retrieved by ring_buffer_start
3809 * This re-enables the recording to the buffer, and frees the
3813 ring_buffer_read_finish(struct ring_buffer_iter
*iter
)
3815 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3816 unsigned long flags
;
3819 * Ring buffer is disabled from recording, here's a good place
3820 * to check the integrity of the ring buffer.
3821 * Must prevent readers from trying to read, as the check
3822 * clears the HEAD page and readers require it.
3824 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3825 rb_check_pages(cpu_buffer
);
3826 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3828 atomic_dec(&cpu_buffer
->record_disabled
);
3829 atomic_dec(&cpu_buffer
->buffer
->resize_disabled
);
3832 EXPORT_SYMBOL_GPL(ring_buffer_read_finish
);
3835 * ring_buffer_read - read the next item in the ring buffer by the iterator
3836 * @iter: The ring buffer iterator
3837 * @ts: The time stamp of the event read.
3839 * This reads the next event in the ring buffer and increments the iterator.
3841 struct ring_buffer_event
*
3842 ring_buffer_read(struct ring_buffer_iter
*iter
, u64
*ts
)
3844 struct ring_buffer_event
*event
;
3845 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3846 unsigned long flags
;
3848 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3850 event
= rb_iter_peek(iter
, ts
);
3854 if (event
->type_len
== RINGBUF_TYPE_PADDING
)
3857 rb_advance_iter(iter
);
3859 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3863 EXPORT_SYMBOL_GPL(ring_buffer_read
);
3866 * ring_buffer_size - return the size of the ring buffer (in bytes)
3867 * @buffer: The ring buffer.
3869 unsigned long ring_buffer_size(struct ring_buffer
*buffer
, int cpu
)
3872 * Earlier, this method returned
3873 * BUF_PAGE_SIZE * buffer->nr_pages
3874 * Since the nr_pages field is now removed, we have converted this to
3875 * return the per cpu buffer value.
3877 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3880 return BUF_PAGE_SIZE
* buffer
->buffers
[cpu
]->nr_pages
;
3882 EXPORT_SYMBOL_GPL(ring_buffer_size
);
3885 rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
)
3887 rb_head_page_deactivate(cpu_buffer
);
3889 cpu_buffer
->head_page
3890 = list_entry(cpu_buffer
->pages
, struct buffer_page
, list
);
3891 local_set(&cpu_buffer
->head_page
->write
, 0);
3892 local_set(&cpu_buffer
->head_page
->entries
, 0);
3893 local_set(&cpu_buffer
->head_page
->page
->commit
, 0);
3895 cpu_buffer
->head_page
->read
= 0;
3897 cpu_buffer
->tail_page
= cpu_buffer
->head_page
;
3898 cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
3900 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
3901 INIT_LIST_HEAD(&cpu_buffer
->new_pages
);
3902 local_set(&cpu_buffer
->reader_page
->write
, 0);
3903 local_set(&cpu_buffer
->reader_page
->entries
, 0);
3904 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
3905 cpu_buffer
->reader_page
->read
= 0;
3907 local_set(&cpu_buffer
->entries_bytes
, 0);
3908 local_set(&cpu_buffer
->overrun
, 0);
3909 local_set(&cpu_buffer
->commit_overrun
, 0);
3910 local_set(&cpu_buffer
->dropped_events
, 0);
3911 local_set(&cpu_buffer
->entries
, 0);
3912 local_set(&cpu_buffer
->committing
, 0);
3913 local_set(&cpu_buffer
->commits
, 0);
3914 cpu_buffer
->read
= 0;
3915 cpu_buffer
->read_bytes
= 0;
3917 cpu_buffer
->write_stamp
= 0;
3918 cpu_buffer
->read_stamp
= 0;
3920 cpu_buffer
->lost_events
= 0;
3921 cpu_buffer
->last_overrun
= 0;
3923 rb_head_page_activate(cpu_buffer
);
3927 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3928 * @buffer: The ring buffer to reset a per cpu buffer of
3929 * @cpu: The CPU buffer to be reset
3931 void ring_buffer_reset_cpu(struct ring_buffer
*buffer
, int cpu
)
3933 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
3934 unsigned long flags
;
3936 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3939 atomic_inc(&buffer
->resize_disabled
);
3940 atomic_inc(&cpu_buffer
->record_disabled
);
3942 /* Make sure all commits have finished */
3943 synchronize_sched();
3945 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3947 if (RB_WARN_ON(cpu_buffer
, local_read(&cpu_buffer
->committing
)))
3950 arch_spin_lock(&cpu_buffer
->lock
);
3952 rb_reset_cpu(cpu_buffer
);
3954 arch_spin_unlock(&cpu_buffer
->lock
);
3957 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3959 atomic_dec(&cpu_buffer
->record_disabled
);
3960 atomic_dec(&buffer
->resize_disabled
);
3962 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu
);
3965 * ring_buffer_reset - reset a ring buffer
3966 * @buffer: The ring buffer to reset all cpu buffers
3968 void ring_buffer_reset(struct ring_buffer
*buffer
)
3972 for_each_buffer_cpu(buffer
, cpu
)
3973 ring_buffer_reset_cpu(buffer
, cpu
);
3975 EXPORT_SYMBOL_GPL(ring_buffer_reset
);
3978 * rind_buffer_empty - is the ring buffer empty?
3979 * @buffer: The ring buffer to test
3981 int ring_buffer_empty(struct ring_buffer
*buffer
)
3983 struct ring_buffer_per_cpu
*cpu_buffer
;
3984 unsigned long flags
;
3989 dolock
= rb_ok_to_lock();
3991 /* yes this is racy, but if you don't like the race, lock the buffer */
3992 for_each_buffer_cpu(buffer
, cpu
) {
3993 cpu_buffer
= buffer
->buffers
[cpu
];
3994 local_irq_save(flags
);
3996 raw_spin_lock(&cpu_buffer
->reader_lock
);
3997 ret
= rb_per_cpu_empty(cpu_buffer
);
3999 raw_spin_unlock(&cpu_buffer
->reader_lock
);
4000 local_irq_restore(flags
);
4008 EXPORT_SYMBOL_GPL(ring_buffer_empty
);
4011 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4012 * @buffer: The ring buffer
4013 * @cpu: The CPU buffer to test
4015 int ring_buffer_empty_cpu(struct ring_buffer
*buffer
, int cpu
)
4017 struct ring_buffer_per_cpu
*cpu_buffer
;
4018 unsigned long flags
;
4022 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
4025 dolock
= rb_ok_to_lock();
4027 cpu_buffer
= buffer
->buffers
[cpu
];
4028 local_irq_save(flags
);
4030 raw_spin_lock(&cpu_buffer
->reader_lock
);
4031 ret
= rb_per_cpu_empty(cpu_buffer
);
4033 raw_spin_unlock(&cpu_buffer
->reader_lock
);
4034 local_irq_restore(flags
);
4038 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu
);
4040 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4042 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4043 * @buffer_a: One buffer to swap with
4044 * @buffer_b: The other buffer to swap with
4046 * This function is useful for tracers that want to take a "snapshot"
4047 * of a CPU buffer and has another back up buffer lying around.
4048 * it is expected that the tracer handles the cpu buffer not being
4049 * used at the moment.
4051 int ring_buffer_swap_cpu(struct ring_buffer
*buffer_a
,
4052 struct ring_buffer
*buffer_b
, int cpu
)
4054 struct ring_buffer_per_cpu
*cpu_buffer_a
;
4055 struct ring_buffer_per_cpu
*cpu_buffer_b
;
4058 if (!cpumask_test_cpu(cpu
, buffer_a
->cpumask
) ||
4059 !cpumask_test_cpu(cpu
, buffer_b
->cpumask
))
4062 cpu_buffer_a
= buffer_a
->buffers
[cpu
];
4063 cpu_buffer_b
= buffer_b
->buffers
[cpu
];
4065 /* At least make sure the two buffers are somewhat the same */
4066 if (cpu_buffer_a
->nr_pages
!= cpu_buffer_b
->nr_pages
)
4071 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
4074 if (atomic_read(&buffer_a
->record_disabled
))
4077 if (atomic_read(&buffer_b
->record_disabled
))
4080 if (atomic_read(&cpu_buffer_a
->record_disabled
))
4083 if (atomic_read(&cpu_buffer_b
->record_disabled
))
4087 * We can't do a synchronize_sched here because this
4088 * function can be called in atomic context.
4089 * Normally this will be called from the same CPU as cpu.
4090 * If not it's up to the caller to protect this.
4092 atomic_inc(&cpu_buffer_a
->record_disabled
);
4093 atomic_inc(&cpu_buffer_b
->record_disabled
);
4096 if (local_read(&cpu_buffer_a
->committing
))
4098 if (local_read(&cpu_buffer_b
->committing
))
4101 buffer_a
->buffers
[cpu
] = cpu_buffer_b
;
4102 buffer_b
->buffers
[cpu
] = cpu_buffer_a
;
4104 cpu_buffer_b
->buffer
= buffer_a
;
4105 cpu_buffer_a
->buffer
= buffer_b
;
4110 atomic_dec(&cpu_buffer_a
->record_disabled
);
4111 atomic_dec(&cpu_buffer_b
->record_disabled
);
4115 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu
);
4116 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4119 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4120 * @buffer: the buffer to allocate for.
4122 * This function is used in conjunction with ring_buffer_read_page.
4123 * When reading a full page from the ring buffer, these functions
4124 * can be used to speed up the process. The calling function should
4125 * allocate a few pages first with this function. Then when it
4126 * needs to get pages from the ring buffer, it passes the result
4127 * of this function into ring_buffer_read_page, which will swap
4128 * the page that was allocated, with the read page of the buffer.
4131 * The page allocated, or NULL on error.
4133 void *ring_buffer_alloc_read_page(struct ring_buffer
*buffer
, int cpu
)
4135 struct buffer_data_page
*bpage
;
4138 page
= alloc_pages_node(cpu_to_node(cpu
),
4139 GFP_KERNEL
| __GFP_NORETRY
, 0);
4143 bpage
= page_address(page
);
4145 rb_init_page(bpage
);
4149 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page
);
4152 * ring_buffer_free_read_page - free an allocated read page
4153 * @buffer: the buffer the page was allocate for
4154 * @data: the page to free
4156 * Free a page allocated from ring_buffer_alloc_read_page.
4158 void ring_buffer_free_read_page(struct ring_buffer
*buffer
, void *data
)
4160 free_page((unsigned long)data
);
4162 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page
);
4165 * ring_buffer_read_page - extract a page from the ring buffer
4166 * @buffer: buffer to extract from
4167 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4168 * @len: amount to extract
4169 * @cpu: the cpu of the buffer to extract
4170 * @full: should the extraction only happen when the page is full.
4172 * This function will pull out a page from the ring buffer and consume it.
4173 * @data_page must be the address of the variable that was returned
4174 * from ring_buffer_alloc_read_page. This is because the page might be used
4175 * to swap with a page in the ring buffer.
4178 * rpage = ring_buffer_alloc_read_page(buffer);
4181 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4183 * process_page(rpage, ret);
4185 * When @full is set, the function will not return true unless
4186 * the writer is off the reader page.
4188 * Note: it is up to the calling functions to handle sleeps and wakeups.
4189 * The ring buffer can be used anywhere in the kernel and can not
4190 * blindly call wake_up. The layer that uses the ring buffer must be
4191 * responsible for that.
4194 * >=0 if data has been transferred, returns the offset of consumed data.
4195 * <0 if no data has been transferred.
4197 int ring_buffer_read_page(struct ring_buffer
*buffer
,
4198 void **data_page
, size_t len
, int cpu
, int full
)
4200 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
4201 struct ring_buffer_event
*event
;
4202 struct buffer_data_page
*bpage
;
4203 struct buffer_page
*reader
;
4204 unsigned long missed_events
;
4205 unsigned long flags
;
4206 unsigned int commit
;
4211 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
4215 * If len is not big enough to hold the page header, then
4216 * we can not copy anything.
4218 if (len
<= BUF_PAGE_HDR_SIZE
)
4221 len
-= BUF_PAGE_HDR_SIZE
;
4230 raw_spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
4232 reader
= rb_get_reader_page(cpu_buffer
);
4236 event
= rb_reader_event(cpu_buffer
);
4238 read
= reader
->read
;
4239 commit
= rb_page_commit(reader
);
4241 /* Check if any events were dropped */
4242 missed_events
= cpu_buffer
->lost_events
;
4245 * If this page has been partially read or
4246 * if len is not big enough to read the rest of the page or
4247 * a writer is still on the page, then
4248 * we must copy the data from the page to the buffer.
4249 * Otherwise, we can simply swap the page with the one passed in.
4251 if (read
|| (len
< (commit
- read
)) ||
4252 cpu_buffer
->reader_page
== cpu_buffer
->commit_page
) {
4253 struct buffer_data_page
*rpage
= cpu_buffer
->reader_page
->page
;
4254 unsigned int rpos
= read
;
4255 unsigned int pos
= 0;
4261 if (len
> (commit
- read
))
4262 len
= (commit
- read
);
4264 /* Always keep the time extend and data together */
4265 size
= rb_event_ts_length(event
);
4270 /* save the current timestamp, since the user will need it */
4271 save_timestamp
= cpu_buffer
->read_stamp
;
4273 /* Need to copy one event at a time */
4275 /* We need the size of one event, because
4276 * rb_advance_reader only advances by one event,
4277 * whereas rb_event_ts_length may include the size of
4278 * one or two events.
4279 * We have already ensured there's enough space if this
4280 * is a time extend. */
4281 size
= rb_event_length(event
);
4282 memcpy(bpage
->data
+ pos
, rpage
->data
+ rpos
, size
);
4286 rb_advance_reader(cpu_buffer
);
4287 rpos
= reader
->read
;
4293 event
= rb_reader_event(cpu_buffer
);
4294 /* Always keep the time extend and data together */
4295 size
= rb_event_ts_length(event
);
4296 } while (len
>= size
);
4299 local_set(&bpage
->commit
, pos
);
4300 bpage
->time_stamp
= save_timestamp
;
4302 /* we copied everything to the beginning */
4305 /* update the entry counter */
4306 cpu_buffer
->read
+= rb_page_entries(reader
);
4307 cpu_buffer
->read_bytes
+= BUF_PAGE_SIZE
;
4309 /* swap the pages */
4310 rb_init_page(bpage
);
4311 bpage
= reader
->page
;
4312 reader
->page
= *data_page
;
4313 local_set(&reader
->write
, 0);
4314 local_set(&reader
->entries
, 0);
4319 * Use the real_end for the data size,
4320 * This gives us a chance to store the lost events
4323 if (reader
->real_end
)
4324 local_set(&bpage
->commit
, reader
->real_end
);
4328 cpu_buffer
->lost_events
= 0;
4330 commit
= local_read(&bpage
->commit
);
4332 * Set a flag in the commit field if we lost events
4334 if (missed_events
) {
4335 /* If there is room at the end of the page to save the
4336 * missed events, then record it there.
4338 if (BUF_PAGE_SIZE
- commit
>= sizeof(missed_events
)) {
4339 memcpy(&bpage
->data
[commit
], &missed_events
,
4340 sizeof(missed_events
));
4341 local_add(RB_MISSED_STORED
, &bpage
->commit
);
4342 commit
+= sizeof(missed_events
);
4344 local_add(RB_MISSED_EVENTS
, &bpage
->commit
);
4348 * This page may be off to user land. Zero it out here.
4350 if (commit
< BUF_PAGE_SIZE
)
4351 memset(&bpage
->data
[commit
], 0, BUF_PAGE_SIZE
- commit
);
4354 raw_spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
4359 EXPORT_SYMBOL_GPL(ring_buffer_read_page
);
4361 #ifdef CONFIG_HOTPLUG_CPU
4362 static int rb_cpu_notify(struct notifier_block
*self
,
4363 unsigned long action
, void *hcpu
)
4365 struct ring_buffer
*buffer
=
4366 container_of(self
, struct ring_buffer
, cpu_notify
);
4367 long cpu
= (long)hcpu
;
4368 int cpu_i
, nr_pages_same
;
4369 unsigned int nr_pages
;
4372 case CPU_UP_PREPARE
:
4373 case CPU_UP_PREPARE_FROZEN
:
4374 if (cpumask_test_cpu(cpu
, buffer
->cpumask
))
4379 /* check if all cpu sizes are same */
4380 for_each_buffer_cpu(buffer
, cpu_i
) {
4381 /* fill in the size from first enabled cpu */
4383 nr_pages
= buffer
->buffers
[cpu_i
]->nr_pages
;
4384 if (nr_pages
!= buffer
->buffers
[cpu_i
]->nr_pages
) {
4389 /* allocate minimum pages, user can later expand it */
4392 buffer
->buffers
[cpu
] =
4393 rb_allocate_cpu_buffer(buffer
, nr_pages
, cpu
);
4394 if (!buffer
->buffers
[cpu
]) {
4395 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4400 cpumask_set_cpu(cpu
, buffer
->cpumask
);
4402 case CPU_DOWN_PREPARE
:
4403 case CPU_DOWN_PREPARE_FROZEN
:
4406 * If we were to free the buffer, then the user would
4407 * lose any trace that was in the buffer.