]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/ring_buffer.c
trace/ring_buffer: handle 64bit aligned structs
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ring_buffer.c
1 /*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/spinlock.h>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/hardirq.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/hash.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/fs.h>
22
23 #include <asm/local.h>
24 #include "trace.h"
25
26 static void update_pages_handler(struct work_struct *work);
27
28 /*
29 * The ring buffer header is special. We must manually up keep it.
30 */
31 int ring_buffer_print_entry_header(struct trace_seq *s)
32 {
33 int ret;
34
35 ret = trace_seq_printf(s, "# compressed entry header\n");
36 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
37 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
38 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
39 ret = trace_seq_printf(s, "\n");
40 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
41 RINGBUF_TYPE_PADDING);
42 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
43 RINGBUF_TYPE_TIME_EXTEND);
44 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
45 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
46
47 return ret;
48 }
49
50 /*
51 * The ring buffer is made up of a list of pages. A separate list of pages is
52 * allocated for each CPU. A writer may only write to a buffer that is
53 * associated with the CPU it is currently executing on. A reader may read
54 * from any per cpu buffer.
55 *
56 * The reader is special. For each per cpu buffer, the reader has its own
57 * reader page. When a reader has read the entire reader page, this reader
58 * page is swapped with another page in the ring buffer.
59 *
60 * Now, as long as the writer is off the reader page, the reader can do what
61 * ever it wants with that page. The writer will never write to that page
62 * again (as long as it is out of the ring buffer).
63 *
64 * Here's some silly ASCII art.
65 *
66 * +------+
67 * |reader| RING BUFFER
68 * |page |
69 * +------+ +---+ +---+ +---+
70 * | |-->| |-->| |
71 * +---+ +---+ +---+
72 * ^ |
73 * | |
74 * +---------------+
75 *
76 *
77 * +------+
78 * |reader| RING BUFFER
79 * |page |------------------v
80 * +------+ +---+ +---+ +---+
81 * | |-->| |-->| |
82 * +---+ +---+ +---+
83 * ^ |
84 * | |
85 * +---------------+
86 *
87 *
88 * +------+
89 * |reader| RING BUFFER
90 * |page |------------------v
91 * +------+ +---+ +---+ +---+
92 * ^ | |-->| |-->| |
93 * | +---+ +---+ +---+
94 * | |
95 * | |
96 * +------------------------------+
97 *
98 *
99 * +------+
100 * |buffer| RING BUFFER
101 * |page |------------------v
102 * +------+ +---+ +---+ +---+
103 * ^ | | | |-->| |
104 * | New +---+ +---+ +---+
105 * | Reader------^ |
106 * | page |
107 * +------------------------------+
108 *
109 *
110 * After we make this swap, the reader can hand this page off to the splice
111 * code and be done with it. It can even allocate a new page if it needs to
112 * and swap that into the ring buffer.
113 *
114 * We will be using cmpxchg soon to make all this lockless.
115 *
116 */
117
118 /*
119 * A fast way to enable or disable all ring buffers is to
120 * call tracing_on or tracing_off. Turning off the ring buffers
121 * prevents all ring buffers from being recorded to.
122 * Turning this switch on, makes it OK to write to the
123 * ring buffer, if the ring buffer is enabled itself.
124 *
125 * There's three layers that must be on in order to write
126 * to the ring buffer.
127 *
128 * 1) This global flag must be set.
129 * 2) The ring buffer must be enabled for recording.
130 * 3) The per cpu buffer must be enabled for recording.
131 *
132 * In case of an anomaly, this global flag has a bit set that
133 * will permantly disable all ring buffers.
134 */
135
136 /*
137 * Global flag to disable all recording to ring buffers
138 * This has two bits: ON, DISABLED
139 *
140 * ON DISABLED
141 * ---- ----------
142 * 0 0 : ring buffers are off
143 * 1 0 : ring buffers are on
144 * X 1 : ring buffers are permanently disabled
145 */
146
147 enum {
148 RB_BUFFERS_ON_BIT = 0,
149 RB_BUFFERS_DISABLED_BIT = 1,
150 };
151
152 enum {
153 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
154 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
155 };
156
157 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
158
159 /* Used for individual buffers (after the counter) */
160 #define RB_BUFFER_OFF (1 << 20)
161
162 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
163
164 /**
165 * tracing_off_permanent - permanently disable ring buffers
166 *
167 * This function, once called, will disable all ring buffers
168 * permanently.
169 */
170 void tracing_off_permanent(void)
171 {
172 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
173 }
174
175 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
176 #define RB_ALIGNMENT 4U
177 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
178 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
179
180 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
181 # define RB_FORCE_8BYTE_ALIGNMENT 0
182 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
183 #else
184 # define RB_FORCE_8BYTE_ALIGNMENT 1
185 # define RB_ARCH_ALIGNMENT 8U
186 #endif
187
188 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
189
190 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
191 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
192
193 enum {
194 RB_LEN_TIME_EXTEND = 8,
195 RB_LEN_TIME_STAMP = 16,
196 };
197
198 #define skip_time_extend(event) \
199 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
200
201 static inline int rb_null_event(struct ring_buffer_event *event)
202 {
203 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
204 }
205
206 static void rb_event_set_padding(struct ring_buffer_event *event)
207 {
208 /* padding has a NULL time_delta */
209 event->type_len = RINGBUF_TYPE_PADDING;
210 event->time_delta = 0;
211 }
212
213 static unsigned
214 rb_event_data_length(struct ring_buffer_event *event)
215 {
216 unsigned length;
217
218 if (event->type_len)
219 length = event->type_len * RB_ALIGNMENT;
220 else
221 length = event->array[0];
222 return length + RB_EVNT_HDR_SIZE;
223 }
224
225 /*
226 * Return the length of the given event. Will return
227 * the length of the time extend if the event is a
228 * time extend.
229 */
230 static inline unsigned
231 rb_event_length(struct ring_buffer_event *event)
232 {
233 switch (event->type_len) {
234 case RINGBUF_TYPE_PADDING:
235 if (rb_null_event(event))
236 /* undefined */
237 return -1;
238 return event->array[0] + RB_EVNT_HDR_SIZE;
239
240 case RINGBUF_TYPE_TIME_EXTEND:
241 return RB_LEN_TIME_EXTEND;
242
243 case RINGBUF_TYPE_TIME_STAMP:
244 return RB_LEN_TIME_STAMP;
245
246 case RINGBUF_TYPE_DATA:
247 return rb_event_data_length(event);
248 default:
249 BUG();
250 }
251 /* not hit */
252 return 0;
253 }
254
255 /*
256 * Return total length of time extend and data,
257 * or just the event length for all other events.
258 */
259 static inline unsigned
260 rb_event_ts_length(struct ring_buffer_event *event)
261 {
262 unsigned len = 0;
263
264 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
265 /* time extends include the data event after it */
266 len = RB_LEN_TIME_EXTEND;
267 event = skip_time_extend(event);
268 }
269 return len + rb_event_length(event);
270 }
271
272 /**
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
275 *
276 * Returns the size of the data load of a data event.
277 * If the event is something other than a data event, it
278 * returns the size of the event itself. With the exception
279 * of a TIME EXTEND, where it still returns the size of the
280 * data load of the data event after it.
281 */
282 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
283 {
284 unsigned length;
285
286 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
287 event = skip_time_extend(event);
288
289 length = rb_event_length(event);
290 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
291 return length;
292 length -= RB_EVNT_HDR_SIZE;
293 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
294 length -= sizeof(event->array[0]);
295 return length;
296 }
297 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
298
299 /* inline for ring buffer fast paths */
300 static void *
301 rb_event_data(struct ring_buffer_event *event)
302 {
303 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
304 event = skip_time_extend(event);
305 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
306 /* If length is in len field, then array[0] has the data */
307 if (event->type_len)
308 return (void *)&event->array[0];
309 /* Otherwise length is in array[0] and array[1] has the data */
310 return (void *)&event->array[1];
311 }
312
313 /**
314 * ring_buffer_event_data - return the data of the event
315 * @event: the event to get the data from
316 */
317 void *ring_buffer_event_data(struct ring_buffer_event *event)
318 {
319 return rb_event_data(event);
320 }
321 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
322
323 #define for_each_buffer_cpu(buffer, cpu) \
324 for_each_cpu(cpu, buffer->cpumask)
325
326 #define TS_SHIFT 27
327 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
328 #define TS_DELTA_TEST (~TS_MASK)
329
330 /* Flag when events were overwritten */
331 #define RB_MISSED_EVENTS (1 << 31)
332 /* Missed count stored at end */
333 #define RB_MISSED_STORED (1 << 30)
334
335 struct buffer_data_page {
336 u64 time_stamp; /* page time stamp */
337 local_t commit; /* write committed index */
338 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
339 };
340
341 /*
342 * Note, the buffer_page list must be first. The buffer pages
343 * are allocated in cache lines, which means that each buffer
344 * page will be at the beginning of a cache line, and thus
345 * the least significant bits will be zero. We use this to
346 * add flags in the list struct pointers, to make the ring buffer
347 * lockless.
348 */
349 struct buffer_page {
350 struct list_head list; /* list of buffer pages */
351 local_t write; /* index for next write */
352 unsigned read; /* index for next read */
353 local_t entries; /* entries on this page */
354 unsigned long real_end; /* real end of data */
355 struct buffer_data_page *page; /* Actual data page */
356 };
357
358 /*
359 * The buffer page counters, write and entries, must be reset
360 * atomically when crossing page boundaries. To synchronize this
361 * update, two counters are inserted into the number. One is
362 * the actual counter for the write position or count on the page.
363 *
364 * The other is a counter of updaters. Before an update happens
365 * the update partition of the counter is incremented. This will
366 * allow the updater to update the counter atomically.
367 *
368 * The counter is 20 bits, and the state data is 12.
369 */
370 #define RB_WRITE_MASK 0xfffff
371 #define RB_WRITE_INTCNT (1 << 20)
372
373 static void rb_init_page(struct buffer_data_page *bpage)
374 {
375 local_set(&bpage->commit, 0);
376 }
377
378 /**
379 * ring_buffer_page_len - the size of data on the page.
380 * @page: The page to read
381 *
382 * Returns the amount of data on the page, including buffer page header.
383 */
384 size_t ring_buffer_page_len(void *page)
385 {
386 return local_read(&((struct buffer_data_page *)page)->commit)
387 + BUF_PAGE_HDR_SIZE;
388 }
389
390 /*
391 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
392 * this issue out.
393 */
394 static void free_buffer_page(struct buffer_page *bpage)
395 {
396 free_page((unsigned long)bpage->page);
397 kfree(bpage);
398 }
399
400 /*
401 * We need to fit the time_stamp delta into 27 bits.
402 */
403 static inline int test_time_stamp(u64 delta)
404 {
405 if (delta & TS_DELTA_TEST)
406 return 1;
407 return 0;
408 }
409
410 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
411
412 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
413 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
414
415 int ring_buffer_print_page_header(struct trace_seq *s)
416 {
417 struct buffer_data_page field;
418 int ret;
419
420 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
421 "offset:0;\tsize:%u;\tsigned:%u;\n",
422 (unsigned int)sizeof(field.time_stamp),
423 (unsigned int)is_signed_type(u64));
424
425 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
426 "offset:%u;\tsize:%u;\tsigned:%u;\n",
427 (unsigned int)offsetof(typeof(field), commit),
428 (unsigned int)sizeof(field.commit),
429 (unsigned int)is_signed_type(long));
430
431 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
432 "offset:%u;\tsize:%u;\tsigned:%u;\n",
433 (unsigned int)offsetof(typeof(field), commit),
434 1,
435 (unsigned int)is_signed_type(long));
436
437 ret = trace_seq_printf(s, "\tfield: char data;\t"
438 "offset:%u;\tsize:%u;\tsigned:%u;\n",
439 (unsigned int)offsetof(typeof(field), data),
440 (unsigned int)BUF_PAGE_SIZE,
441 (unsigned int)is_signed_type(char));
442
443 return ret;
444 }
445
446 /*
447 * head_page == tail_page && head == tail then buffer is empty.
448 */
449 struct ring_buffer_per_cpu {
450 int cpu;
451 atomic_t record_disabled;
452 struct ring_buffer *buffer;
453 raw_spinlock_t reader_lock; /* serialize readers */
454 arch_spinlock_t lock;
455 struct lock_class_key lock_key;
456 unsigned int nr_pages;
457 struct list_head *pages;
458 struct buffer_page *head_page; /* read from head */
459 struct buffer_page *tail_page; /* write to tail */
460 struct buffer_page *commit_page; /* committed pages */
461 struct buffer_page *reader_page;
462 unsigned long lost_events;
463 unsigned long last_overrun;
464 local_t entries_bytes;
465 local_t entries;
466 local_t overrun;
467 local_t commit_overrun;
468 local_t dropped_events;
469 local_t committing;
470 local_t commits;
471 unsigned long read;
472 unsigned long read_bytes;
473 u64 write_stamp;
474 u64 read_stamp;
475 /* ring buffer pages to update, > 0 to add, < 0 to remove */
476 int nr_pages_to_update;
477 struct list_head new_pages; /* new pages to add */
478 struct work_struct update_pages_work;
479 struct completion update_done;
480 };
481
482 struct ring_buffer {
483 unsigned flags;
484 int cpus;
485 atomic_t record_disabled;
486 atomic_t resize_disabled;
487 cpumask_var_t cpumask;
488
489 struct lock_class_key *reader_lock_key;
490
491 struct mutex mutex;
492
493 struct ring_buffer_per_cpu **buffers;
494
495 #ifdef CONFIG_HOTPLUG_CPU
496 struct notifier_block cpu_notify;
497 #endif
498 u64 (*clock)(void);
499 };
500
501 struct ring_buffer_iter {
502 struct ring_buffer_per_cpu *cpu_buffer;
503 unsigned long head;
504 struct buffer_page *head_page;
505 struct buffer_page *cache_reader_page;
506 unsigned long cache_read;
507 u64 read_stamp;
508 };
509
510 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
511 #define RB_WARN_ON(b, cond) \
512 ({ \
513 int _____ret = unlikely(cond); \
514 if (_____ret) { \
515 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
516 struct ring_buffer_per_cpu *__b = \
517 (void *)b; \
518 atomic_inc(&__b->buffer->record_disabled); \
519 } else \
520 atomic_inc(&b->record_disabled); \
521 WARN_ON(1); \
522 } \
523 _____ret; \
524 })
525
526 /* Up this if you want to test the TIME_EXTENTS and normalization */
527 #define DEBUG_SHIFT 0
528
529 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
530 {
531 /* shift to debug/test normalization and TIME_EXTENTS */
532 return buffer->clock() << DEBUG_SHIFT;
533 }
534
535 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
536 {
537 u64 time;
538
539 preempt_disable_notrace();
540 time = rb_time_stamp(buffer);
541 preempt_enable_no_resched_notrace();
542
543 return time;
544 }
545 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
546
547 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
548 int cpu, u64 *ts)
549 {
550 /* Just stupid testing the normalize function and deltas */
551 *ts >>= DEBUG_SHIFT;
552 }
553 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
554
555 /*
556 * Making the ring buffer lockless makes things tricky.
557 * Although writes only happen on the CPU that they are on,
558 * and they only need to worry about interrupts. Reads can
559 * happen on any CPU.
560 *
561 * The reader page is always off the ring buffer, but when the
562 * reader finishes with a page, it needs to swap its page with
563 * a new one from the buffer. The reader needs to take from
564 * the head (writes go to the tail). But if a writer is in overwrite
565 * mode and wraps, it must push the head page forward.
566 *
567 * Here lies the problem.
568 *
569 * The reader must be careful to replace only the head page, and
570 * not another one. As described at the top of the file in the
571 * ASCII art, the reader sets its old page to point to the next
572 * page after head. It then sets the page after head to point to
573 * the old reader page. But if the writer moves the head page
574 * during this operation, the reader could end up with the tail.
575 *
576 * We use cmpxchg to help prevent this race. We also do something
577 * special with the page before head. We set the LSB to 1.
578 *
579 * When the writer must push the page forward, it will clear the
580 * bit that points to the head page, move the head, and then set
581 * the bit that points to the new head page.
582 *
583 * We also don't want an interrupt coming in and moving the head
584 * page on another writer. Thus we use the second LSB to catch
585 * that too. Thus:
586 *
587 * head->list->prev->next bit 1 bit 0
588 * ------- -------
589 * Normal page 0 0
590 * Points to head page 0 1
591 * New head page 1 0
592 *
593 * Note we can not trust the prev pointer of the head page, because:
594 *
595 * +----+ +-----+ +-----+
596 * | |------>| T |---X--->| N |
597 * | |<------| | | |
598 * +----+ +-----+ +-----+
599 * ^ ^ |
600 * | +-----+ | |
601 * +----------| R |----------+ |
602 * | |<-----------+
603 * +-----+
604 *
605 * Key: ---X--> HEAD flag set in pointer
606 * T Tail page
607 * R Reader page
608 * N Next page
609 *
610 * (see __rb_reserve_next() to see where this happens)
611 *
612 * What the above shows is that the reader just swapped out
613 * the reader page with a page in the buffer, but before it
614 * could make the new header point back to the new page added
615 * it was preempted by a writer. The writer moved forward onto
616 * the new page added by the reader and is about to move forward
617 * again.
618 *
619 * You can see, it is legitimate for the previous pointer of
620 * the head (or any page) not to point back to itself. But only
621 * temporarially.
622 */
623
624 #define RB_PAGE_NORMAL 0UL
625 #define RB_PAGE_HEAD 1UL
626 #define RB_PAGE_UPDATE 2UL
627
628
629 #define RB_FLAG_MASK 3UL
630
631 /* PAGE_MOVED is not part of the mask */
632 #define RB_PAGE_MOVED 4UL
633
634 /*
635 * rb_list_head - remove any bit
636 */
637 static struct list_head *rb_list_head(struct list_head *list)
638 {
639 unsigned long val = (unsigned long)list;
640
641 return (struct list_head *)(val & ~RB_FLAG_MASK);
642 }
643
644 /*
645 * rb_is_head_page - test if the given page is the head page
646 *
647 * Because the reader may move the head_page pointer, we can
648 * not trust what the head page is (it may be pointing to
649 * the reader page). But if the next page is a header page,
650 * its flags will be non zero.
651 */
652 static inline int
653 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
654 struct buffer_page *page, struct list_head *list)
655 {
656 unsigned long val;
657
658 val = (unsigned long)list->next;
659
660 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
661 return RB_PAGE_MOVED;
662
663 return val & RB_FLAG_MASK;
664 }
665
666 /*
667 * rb_is_reader_page
668 *
669 * The unique thing about the reader page, is that, if the
670 * writer is ever on it, the previous pointer never points
671 * back to the reader page.
672 */
673 static int rb_is_reader_page(struct buffer_page *page)
674 {
675 struct list_head *list = page->list.prev;
676
677 return rb_list_head(list->next) != &page->list;
678 }
679
680 /*
681 * rb_set_list_to_head - set a list_head to be pointing to head.
682 */
683 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
684 struct list_head *list)
685 {
686 unsigned long *ptr;
687
688 ptr = (unsigned long *)&list->next;
689 *ptr |= RB_PAGE_HEAD;
690 *ptr &= ~RB_PAGE_UPDATE;
691 }
692
693 /*
694 * rb_head_page_activate - sets up head page
695 */
696 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
697 {
698 struct buffer_page *head;
699
700 head = cpu_buffer->head_page;
701 if (!head)
702 return;
703
704 /*
705 * Set the previous list pointer to have the HEAD flag.
706 */
707 rb_set_list_to_head(cpu_buffer, head->list.prev);
708 }
709
710 static void rb_list_head_clear(struct list_head *list)
711 {
712 unsigned long *ptr = (unsigned long *)&list->next;
713
714 *ptr &= ~RB_FLAG_MASK;
715 }
716
717 /*
718 * rb_head_page_dactivate - clears head page ptr (for free list)
719 */
720 static void
721 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
722 {
723 struct list_head *hd;
724
725 /* Go through the whole list and clear any pointers found. */
726 rb_list_head_clear(cpu_buffer->pages);
727
728 list_for_each(hd, cpu_buffer->pages)
729 rb_list_head_clear(hd);
730 }
731
732 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
733 struct buffer_page *head,
734 struct buffer_page *prev,
735 int old_flag, int new_flag)
736 {
737 struct list_head *list;
738 unsigned long val = (unsigned long)&head->list;
739 unsigned long ret;
740
741 list = &prev->list;
742
743 val &= ~RB_FLAG_MASK;
744
745 ret = cmpxchg((unsigned long *)&list->next,
746 val | old_flag, val | new_flag);
747
748 /* check if the reader took the page */
749 if ((ret & ~RB_FLAG_MASK) != val)
750 return RB_PAGE_MOVED;
751
752 return ret & RB_FLAG_MASK;
753 }
754
755 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
756 struct buffer_page *head,
757 struct buffer_page *prev,
758 int old_flag)
759 {
760 return rb_head_page_set(cpu_buffer, head, prev,
761 old_flag, RB_PAGE_UPDATE);
762 }
763
764 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
765 struct buffer_page *head,
766 struct buffer_page *prev,
767 int old_flag)
768 {
769 return rb_head_page_set(cpu_buffer, head, prev,
770 old_flag, RB_PAGE_HEAD);
771 }
772
773 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
774 struct buffer_page *head,
775 struct buffer_page *prev,
776 int old_flag)
777 {
778 return rb_head_page_set(cpu_buffer, head, prev,
779 old_flag, RB_PAGE_NORMAL);
780 }
781
782 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
783 struct buffer_page **bpage)
784 {
785 struct list_head *p = rb_list_head((*bpage)->list.next);
786
787 *bpage = list_entry(p, struct buffer_page, list);
788 }
789
790 static struct buffer_page *
791 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
792 {
793 struct buffer_page *head;
794 struct buffer_page *page;
795 struct list_head *list;
796 int i;
797
798 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
799 return NULL;
800
801 /* sanity check */
802 list = cpu_buffer->pages;
803 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
804 return NULL;
805
806 page = head = cpu_buffer->head_page;
807 /*
808 * It is possible that the writer moves the header behind
809 * where we started, and we miss in one loop.
810 * A second loop should grab the header, but we'll do
811 * three loops just because I'm paranoid.
812 */
813 for (i = 0; i < 3; i++) {
814 do {
815 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
816 cpu_buffer->head_page = page;
817 return page;
818 }
819 rb_inc_page(cpu_buffer, &page);
820 } while (page != head);
821 }
822
823 RB_WARN_ON(cpu_buffer, 1);
824
825 return NULL;
826 }
827
828 static int rb_head_page_replace(struct buffer_page *old,
829 struct buffer_page *new)
830 {
831 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
832 unsigned long val;
833 unsigned long ret;
834
835 val = *ptr & ~RB_FLAG_MASK;
836 val |= RB_PAGE_HEAD;
837
838 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
839
840 return ret == val;
841 }
842
843 /*
844 * rb_tail_page_update - move the tail page forward
845 *
846 * Returns 1 if moved tail page, 0 if someone else did.
847 */
848 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
849 struct buffer_page *tail_page,
850 struct buffer_page *next_page)
851 {
852 struct buffer_page *old_tail;
853 unsigned long old_entries;
854 unsigned long old_write;
855 int ret = 0;
856
857 /*
858 * The tail page now needs to be moved forward.
859 *
860 * We need to reset the tail page, but without messing
861 * with possible erasing of data brought in by interrupts
862 * that have moved the tail page and are currently on it.
863 *
864 * We add a counter to the write field to denote this.
865 */
866 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
867 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
868
869 /*
870 * Just make sure we have seen our old_write and synchronize
871 * with any interrupts that come in.
872 */
873 barrier();
874
875 /*
876 * If the tail page is still the same as what we think
877 * it is, then it is up to us to update the tail
878 * pointer.
879 */
880 if (tail_page == cpu_buffer->tail_page) {
881 /* Zero the write counter */
882 unsigned long val = old_write & ~RB_WRITE_MASK;
883 unsigned long eval = old_entries & ~RB_WRITE_MASK;
884
885 /*
886 * This will only succeed if an interrupt did
887 * not come in and change it. In which case, we
888 * do not want to modify it.
889 *
890 * We add (void) to let the compiler know that we do not care
891 * about the return value of these functions. We use the
892 * cmpxchg to only update if an interrupt did not already
893 * do it for us. If the cmpxchg fails, we don't care.
894 */
895 (void)local_cmpxchg(&next_page->write, old_write, val);
896 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
897
898 /*
899 * No need to worry about races with clearing out the commit.
900 * it only can increment when a commit takes place. But that
901 * only happens in the outer most nested commit.
902 */
903 local_set(&next_page->page->commit, 0);
904
905 old_tail = cmpxchg(&cpu_buffer->tail_page,
906 tail_page, next_page);
907
908 if (old_tail == tail_page)
909 ret = 1;
910 }
911
912 return ret;
913 }
914
915 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
916 struct buffer_page *bpage)
917 {
918 unsigned long val = (unsigned long)bpage;
919
920 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
921 return 1;
922
923 return 0;
924 }
925
926 /**
927 * rb_check_list - make sure a pointer to a list has the last bits zero
928 */
929 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
930 struct list_head *list)
931 {
932 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
933 return 1;
934 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
935 return 1;
936 return 0;
937 }
938
939 /**
940 * check_pages - integrity check of buffer pages
941 * @cpu_buffer: CPU buffer with pages to test
942 *
943 * As a safety measure we check to make sure the data pages have not
944 * been corrupted.
945 */
946 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
947 {
948 struct list_head *head = cpu_buffer->pages;
949 struct buffer_page *bpage, *tmp;
950
951 /* Reset the head page if it exists */
952 if (cpu_buffer->head_page)
953 rb_set_head_page(cpu_buffer);
954
955 rb_head_page_deactivate(cpu_buffer);
956
957 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
958 return -1;
959 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
960 return -1;
961
962 if (rb_check_list(cpu_buffer, head))
963 return -1;
964
965 list_for_each_entry_safe(bpage, tmp, head, list) {
966 if (RB_WARN_ON(cpu_buffer,
967 bpage->list.next->prev != &bpage->list))
968 return -1;
969 if (RB_WARN_ON(cpu_buffer,
970 bpage->list.prev->next != &bpage->list))
971 return -1;
972 if (rb_check_list(cpu_buffer, &bpage->list))
973 return -1;
974 }
975
976 rb_head_page_activate(cpu_buffer);
977
978 return 0;
979 }
980
981 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
982 {
983 int i;
984 struct buffer_page *bpage, *tmp;
985
986 for (i = 0; i < nr_pages; i++) {
987 struct page *page;
988 /*
989 * __GFP_NORETRY flag makes sure that the allocation fails
990 * gracefully without invoking oom-killer and the system is
991 * not destabilized.
992 */
993 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
994 GFP_KERNEL | __GFP_NORETRY,
995 cpu_to_node(cpu));
996 if (!bpage)
997 goto free_pages;
998
999 list_add(&bpage->list, pages);
1000
1001 page = alloc_pages_node(cpu_to_node(cpu),
1002 GFP_KERNEL | __GFP_NORETRY, 0);
1003 if (!page)
1004 goto free_pages;
1005 bpage->page = page_address(page);
1006 rb_init_page(bpage->page);
1007 }
1008
1009 return 0;
1010
1011 free_pages:
1012 list_for_each_entry_safe(bpage, tmp, pages, list) {
1013 list_del_init(&bpage->list);
1014 free_buffer_page(bpage);
1015 }
1016
1017 return -ENOMEM;
1018 }
1019
1020 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1021 unsigned nr_pages)
1022 {
1023 LIST_HEAD(pages);
1024
1025 WARN_ON(!nr_pages);
1026
1027 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1028 return -ENOMEM;
1029
1030 /*
1031 * The ring buffer page list is a circular list that does not
1032 * start and end with a list head. All page list items point to
1033 * other pages.
1034 */
1035 cpu_buffer->pages = pages.next;
1036 list_del(&pages);
1037
1038 cpu_buffer->nr_pages = nr_pages;
1039
1040 rb_check_pages(cpu_buffer);
1041
1042 return 0;
1043 }
1044
1045 static struct ring_buffer_per_cpu *
1046 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1047 {
1048 struct ring_buffer_per_cpu *cpu_buffer;
1049 struct buffer_page *bpage;
1050 struct page *page;
1051 int ret;
1052
1053 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1054 GFP_KERNEL, cpu_to_node(cpu));
1055 if (!cpu_buffer)
1056 return NULL;
1057
1058 cpu_buffer->cpu = cpu;
1059 cpu_buffer->buffer = buffer;
1060 raw_spin_lock_init(&cpu_buffer->reader_lock);
1061 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1062 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1063 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1064 init_completion(&cpu_buffer->update_done);
1065
1066 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1067 GFP_KERNEL, cpu_to_node(cpu));
1068 if (!bpage)
1069 goto fail_free_buffer;
1070
1071 rb_check_bpage(cpu_buffer, bpage);
1072
1073 cpu_buffer->reader_page = bpage;
1074 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1075 if (!page)
1076 goto fail_free_reader;
1077 bpage->page = page_address(page);
1078 rb_init_page(bpage->page);
1079
1080 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1081 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1082
1083 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1084 if (ret < 0)
1085 goto fail_free_reader;
1086
1087 cpu_buffer->head_page
1088 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1089 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1090
1091 rb_head_page_activate(cpu_buffer);
1092
1093 return cpu_buffer;
1094
1095 fail_free_reader:
1096 free_buffer_page(cpu_buffer->reader_page);
1097
1098 fail_free_buffer:
1099 kfree(cpu_buffer);
1100 return NULL;
1101 }
1102
1103 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1104 {
1105 struct list_head *head = cpu_buffer->pages;
1106 struct buffer_page *bpage, *tmp;
1107
1108 free_buffer_page(cpu_buffer->reader_page);
1109
1110 rb_head_page_deactivate(cpu_buffer);
1111
1112 if (head) {
1113 list_for_each_entry_safe(bpage, tmp, head, list) {
1114 list_del_init(&bpage->list);
1115 free_buffer_page(bpage);
1116 }
1117 bpage = list_entry(head, struct buffer_page, list);
1118 free_buffer_page(bpage);
1119 }
1120
1121 kfree(cpu_buffer);
1122 }
1123
1124 #ifdef CONFIG_HOTPLUG_CPU
1125 static int rb_cpu_notify(struct notifier_block *self,
1126 unsigned long action, void *hcpu);
1127 #endif
1128
1129 /**
1130 * ring_buffer_alloc - allocate a new ring_buffer
1131 * @size: the size in bytes per cpu that is needed.
1132 * @flags: attributes to set for the ring buffer.
1133 *
1134 * Currently the only flag that is available is the RB_FL_OVERWRITE
1135 * flag. This flag means that the buffer will overwrite old data
1136 * when the buffer wraps. If this flag is not set, the buffer will
1137 * drop data when the tail hits the head.
1138 */
1139 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1140 struct lock_class_key *key)
1141 {
1142 struct ring_buffer *buffer;
1143 int bsize;
1144 int cpu, nr_pages;
1145
1146 /* keep it in its own cache line */
1147 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1148 GFP_KERNEL);
1149 if (!buffer)
1150 return NULL;
1151
1152 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1153 goto fail_free_buffer;
1154
1155 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1156 buffer->flags = flags;
1157 buffer->clock = trace_clock_local;
1158 buffer->reader_lock_key = key;
1159
1160 /* need at least two pages */
1161 if (nr_pages < 2)
1162 nr_pages = 2;
1163
1164 /*
1165 * In case of non-hotplug cpu, if the ring-buffer is allocated
1166 * in early initcall, it will not be notified of secondary cpus.
1167 * In that off case, we need to allocate for all possible cpus.
1168 */
1169 #ifdef CONFIG_HOTPLUG_CPU
1170 get_online_cpus();
1171 cpumask_copy(buffer->cpumask, cpu_online_mask);
1172 #else
1173 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1174 #endif
1175 buffer->cpus = nr_cpu_ids;
1176
1177 bsize = sizeof(void *) * nr_cpu_ids;
1178 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1179 GFP_KERNEL);
1180 if (!buffer->buffers)
1181 goto fail_free_cpumask;
1182
1183 for_each_buffer_cpu(buffer, cpu) {
1184 buffer->buffers[cpu] =
1185 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1186 if (!buffer->buffers[cpu])
1187 goto fail_free_buffers;
1188 }
1189
1190 #ifdef CONFIG_HOTPLUG_CPU
1191 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1192 buffer->cpu_notify.priority = 0;
1193 register_cpu_notifier(&buffer->cpu_notify);
1194 #endif
1195
1196 put_online_cpus();
1197 mutex_init(&buffer->mutex);
1198
1199 return buffer;
1200
1201 fail_free_buffers:
1202 for_each_buffer_cpu(buffer, cpu) {
1203 if (buffer->buffers[cpu])
1204 rb_free_cpu_buffer(buffer->buffers[cpu]);
1205 }
1206 kfree(buffer->buffers);
1207
1208 fail_free_cpumask:
1209 free_cpumask_var(buffer->cpumask);
1210 put_online_cpus();
1211
1212 fail_free_buffer:
1213 kfree(buffer);
1214 return NULL;
1215 }
1216 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1217
1218 /**
1219 * ring_buffer_free - free a ring buffer.
1220 * @buffer: the buffer to free.
1221 */
1222 void
1223 ring_buffer_free(struct ring_buffer *buffer)
1224 {
1225 int cpu;
1226
1227 get_online_cpus();
1228
1229 #ifdef CONFIG_HOTPLUG_CPU
1230 unregister_cpu_notifier(&buffer->cpu_notify);
1231 #endif
1232
1233 for_each_buffer_cpu(buffer, cpu)
1234 rb_free_cpu_buffer(buffer->buffers[cpu]);
1235
1236 put_online_cpus();
1237
1238 kfree(buffer->buffers);
1239 free_cpumask_var(buffer->cpumask);
1240
1241 kfree(buffer);
1242 }
1243 EXPORT_SYMBOL_GPL(ring_buffer_free);
1244
1245 void ring_buffer_set_clock(struct ring_buffer *buffer,
1246 u64 (*clock)(void))
1247 {
1248 buffer->clock = clock;
1249 }
1250
1251 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1252
1253 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1254 {
1255 return local_read(&bpage->entries) & RB_WRITE_MASK;
1256 }
1257
1258 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1259 {
1260 return local_read(&bpage->write) & RB_WRITE_MASK;
1261 }
1262
1263 static int
1264 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1265 {
1266 struct list_head *tail_page, *to_remove, *next_page;
1267 struct buffer_page *to_remove_page, *tmp_iter_page;
1268 struct buffer_page *last_page, *first_page;
1269 unsigned int nr_removed;
1270 unsigned long head_bit;
1271 int page_entries;
1272
1273 head_bit = 0;
1274
1275 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1276 atomic_inc(&cpu_buffer->record_disabled);
1277 /*
1278 * We don't race with the readers since we have acquired the reader
1279 * lock. We also don't race with writers after disabling recording.
1280 * This makes it easy to figure out the first and the last page to be
1281 * removed from the list. We unlink all the pages in between including
1282 * the first and last pages. This is done in a busy loop so that we
1283 * lose the least number of traces.
1284 * The pages are freed after we restart recording and unlock readers.
1285 */
1286 tail_page = &cpu_buffer->tail_page->list;
1287
1288 /*
1289 * tail page might be on reader page, we remove the next page
1290 * from the ring buffer
1291 */
1292 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1293 tail_page = rb_list_head(tail_page->next);
1294 to_remove = tail_page;
1295
1296 /* start of pages to remove */
1297 first_page = list_entry(rb_list_head(to_remove->next),
1298 struct buffer_page, list);
1299
1300 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1301 to_remove = rb_list_head(to_remove)->next;
1302 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1303 }
1304
1305 next_page = rb_list_head(to_remove)->next;
1306
1307 /*
1308 * Now we remove all pages between tail_page and next_page.
1309 * Make sure that we have head_bit value preserved for the
1310 * next page
1311 */
1312 tail_page->next = (struct list_head *)((unsigned long)next_page |
1313 head_bit);
1314 next_page = rb_list_head(next_page);
1315 next_page->prev = tail_page;
1316
1317 /* make sure pages points to a valid page in the ring buffer */
1318 cpu_buffer->pages = next_page;
1319
1320 /* update head page */
1321 if (head_bit)
1322 cpu_buffer->head_page = list_entry(next_page,
1323 struct buffer_page, list);
1324
1325 /*
1326 * change read pointer to make sure any read iterators reset
1327 * themselves
1328 */
1329 cpu_buffer->read = 0;
1330
1331 /* pages are removed, resume tracing and then free the pages */
1332 atomic_dec(&cpu_buffer->record_disabled);
1333 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1334
1335 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1336
1337 /* last buffer page to remove */
1338 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1339 list);
1340 tmp_iter_page = first_page;
1341
1342 do {
1343 to_remove_page = tmp_iter_page;
1344 rb_inc_page(cpu_buffer, &tmp_iter_page);
1345
1346 /* update the counters */
1347 page_entries = rb_page_entries(to_remove_page);
1348 if (page_entries) {
1349 /*
1350 * If something was added to this page, it was full
1351 * since it is not the tail page. So we deduct the
1352 * bytes consumed in ring buffer from here.
1353 * Increment overrun to account for the lost events.
1354 */
1355 local_add(page_entries, &cpu_buffer->overrun);
1356 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1357 }
1358
1359 /*
1360 * We have already removed references to this list item, just
1361 * free up the buffer_page and its page
1362 */
1363 free_buffer_page(to_remove_page);
1364 nr_removed--;
1365
1366 } while (to_remove_page != last_page);
1367
1368 RB_WARN_ON(cpu_buffer, nr_removed);
1369
1370 return nr_removed == 0;
1371 }
1372
1373 static int
1374 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1375 {
1376 struct list_head *pages = &cpu_buffer->new_pages;
1377 int retries, success;
1378
1379 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1380 /*
1381 * We are holding the reader lock, so the reader page won't be swapped
1382 * in the ring buffer. Now we are racing with the writer trying to
1383 * move head page and the tail page.
1384 * We are going to adapt the reader page update process where:
1385 * 1. We first splice the start and end of list of new pages between
1386 * the head page and its previous page.
1387 * 2. We cmpxchg the prev_page->next to point from head page to the
1388 * start of new pages list.
1389 * 3. Finally, we update the head->prev to the end of new list.
1390 *
1391 * We will try this process 10 times, to make sure that we don't keep
1392 * spinning.
1393 */
1394 retries = 10;
1395 success = 0;
1396 while (retries--) {
1397 struct list_head *head_page, *prev_page, *r;
1398 struct list_head *last_page, *first_page;
1399 struct list_head *head_page_with_bit;
1400
1401 head_page = &rb_set_head_page(cpu_buffer)->list;
1402 if (!head_page)
1403 break;
1404 prev_page = head_page->prev;
1405
1406 first_page = pages->next;
1407 last_page = pages->prev;
1408
1409 head_page_with_bit = (struct list_head *)
1410 ((unsigned long)head_page | RB_PAGE_HEAD);
1411
1412 last_page->next = head_page_with_bit;
1413 first_page->prev = prev_page;
1414
1415 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1416
1417 if (r == head_page_with_bit) {
1418 /*
1419 * yay, we replaced the page pointer to our new list,
1420 * now, we just have to update to head page's prev
1421 * pointer to point to end of list
1422 */
1423 head_page->prev = last_page;
1424 success = 1;
1425 break;
1426 }
1427 }
1428
1429 if (success)
1430 INIT_LIST_HEAD(pages);
1431 /*
1432 * If we weren't successful in adding in new pages, warn and stop
1433 * tracing
1434 */
1435 RB_WARN_ON(cpu_buffer, !success);
1436 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1437
1438 /* free pages if they weren't inserted */
1439 if (!success) {
1440 struct buffer_page *bpage, *tmp;
1441 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1442 list) {
1443 list_del_init(&bpage->list);
1444 free_buffer_page(bpage);
1445 }
1446 }
1447 return success;
1448 }
1449
1450 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1451 {
1452 int success;
1453
1454 if (cpu_buffer->nr_pages_to_update > 0)
1455 success = rb_insert_pages(cpu_buffer);
1456 else
1457 success = rb_remove_pages(cpu_buffer,
1458 -cpu_buffer->nr_pages_to_update);
1459
1460 if (success)
1461 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1462 }
1463
1464 static void update_pages_handler(struct work_struct *work)
1465 {
1466 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1467 struct ring_buffer_per_cpu, update_pages_work);
1468 rb_update_pages(cpu_buffer);
1469 complete(&cpu_buffer->update_done);
1470 }
1471
1472 /**
1473 * ring_buffer_resize - resize the ring buffer
1474 * @buffer: the buffer to resize.
1475 * @size: the new size.
1476 *
1477 * Minimum size is 2 * BUF_PAGE_SIZE.
1478 *
1479 * Returns 0 on success and < 0 on failure.
1480 */
1481 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1482 int cpu_id)
1483 {
1484 struct ring_buffer_per_cpu *cpu_buffer;
1485 unsigned nr_pages;
1486 int cpu, err = 0;
1487
1488 /*
1489 * Always succeed at resizing a non-existent buffer:
1490 */
1491 if (!buffer)
1492 return size;
1493
1494 /* Make sure the requested buffer exists */
1495 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1496 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1497 return size;
1498
1499 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1500 size *= BUF_PAGE_SIZE;
1501
1502 /* we need a minimum of two pages */
1503 if (size < BUF_PAGE_SIZE * 2)
1504 size = BUF_PAGE_SIZE * 2;
1505
1506 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1507
1508 /*
1509 * Don't succeed if resizing is disabled, as a reader might be
1510 * manipulating the ring buffer and is expecting a sane state while
1511 * this is true.
1512 */
1513 if (atomic_read(&buffer->resize_disabled))
1514 return -EBUSY;
1515
1516 /* prevent another thread from changing buffer sizes */
1517 mutex_lock(&buffer->mutex);
1518
1519 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1520 /* calculate the pages to update */
1521 for_each_buffer_cpu(buffer, cpu) {
1522 cpu_buffer = buffer->buffers[cpu];
1523
1524 cpu_buffer->nr_pages_to_update = nr_pages -
1525 cpu_buffer->nr_pages;
1526 /*
1527 * nothing more to do for removing pages or no update
1528 */
1529 if (cpu_buffer->nr_pages_to_update <= 0)
1530 continue;
1531 /*
1532 * to add pages, make sure all new pages can be
1533 * allocated without receiving ENOMEM
1534 */
1535 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1536 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1537 &cpu_buffer->new_pages, cpu)) {
1538 /* not enough memory for new pages */
1539 err = -ENOMEM;
1540 goto out_err;
1541 }
1542 }
1543
1544 get_online_cpus();
1545 /*
1546 * Fire off all the required work handlers
1547 * We can't schedule on offline CPUs, but it's not necessary
1548 * since we can change their buffer sizes without any race.
1549 */
1550 for_each_buffer_cpu(buffer, cpu) {
1551 cpu_buffer = buffer->buffers[cpu];
1552 if (!cpu_buffer->nr_pages_to_update)
1553 continue;
1554
1555 if (cpu_online(cpu))
1556 schedule_work_on(cpu,
1557 &cpu_buffer->update_pages_work);
1558 else
1559 rb_update_pages(cpu_buffer);
1560 }
1561
1562 /* wait for all the updates to complete */
1563 for_each_buffer_cpu(buffer, cpu) {
1564 cpu_buffer = buffer->buffers[cpu];
1565 if (!cpu_buffer->nr_pages_to_update)
1566 continue;
1567
1568 if (cpu_online(cpu))
1569 wait_for_completion(&cpu_buffer->update_done);
1570 cpu_buffer->nr_pages_to_update = 0;
1571 }
1572
1573 put_online_cpus();
1574 } else {
1575 /* Make sure this CPU has been intitialized */
1576 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1577 goto out;
1578
1579 cpu_buffer = buffer->buffers[cpu_id];
1580
1581 if (nr_pages == cpu_buffer->nr_pages)
1582 goto out;
1583
1584 cpu_buffer->nr_pages_to_update = nr_pages -
1585 cpu_buffer->nr_pages;
1586
1587 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1588 if (cpu_buffer->nr_pages_to_update > 0 &&
1589 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1590 &cpu_buffer->new_pages, cpu_id)) {
1591 err = -ENOMEM;
1592 goto out_err;
1593 }
1594
1595 get_online_cpus();
1596
1597 if (cpu_online(cpu_id)) {
1598 schedule_work_on(cpu_id,
1599 &cpu_buffer->update_pages_work);
1600 wait_for_completion(&cpu_buffer->update_done);
1601 } else
1602 rb_update_pages(cpu_buffer);
1603
1604 cpu_buffer->nr_pages_to_update = 0;
1605 put_online_cpus();
1606 }
1607
1608 out:
1609 /*
1610 * The ring buffer resize can happen with the ring buffer
1611 * enabled, so that the update disturbs the tracing as little
1612 * as possible. But if the buffer is disabled, we do not need
1613 * to worry about that, and we can take the time to verify
1614 * that the buffer is not corrupt.
1615 */
1616 if (atomic_read(&buffer->record_disabled)) {
1617 atomic_inc(&buffer->record_disabled);
1618 /*
1619 * Even though the buffer was disabled, we must make sure
1620 * that it is truly disabled before calling rb_check_pages.
1621 * There could have been a race between checking
1622 * record_disable and incrementing it.
1623 */
1624 synchronize_sched();
1625 for_each_buffer_cpu(buffer, cpu) {
1626 cpu_buffer = buffer->buffers[cpu];
1627 rb_check_pages(cpu_buffer);
1628 }
1629 atomic_dec(&buffer->record_disabled);
1630 }
1631
1632 mutex_unlock(&buffer->mutex);
1633 return size;
1634
1635 out_err:
1636 for_each_buffer_cpu(buffer, cpu) {
1637 struct buffer_page *bpage, *tmp;
1638
1639 cpu_buffer = buffer->buffers[cpu];
1640 cpu_buffer->nr_pages_to_update = 0;
1641
1642 if (list_empty(&cpu_buffer->new_pages))
1643 continue;
1644
1645 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1646 list) {
1647 list_del_init(&bpage->list);
1648 free_buffer_page(bpage);
1649 }
1650 }
1651 mutex_unlock(&buffer->mutex);
1652 return err;
1653 }
1654 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1655
1656 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1657 {
1658 mutex_lock(&buffer->mutex);
1659 if (val)
1660 buffer->flags |= RB_FL_OVERWRITE;
1661 else
1662 buffer->flags &= ~RB_FL_OVERWRITE;
1663 mutex_unlock(&buffer->mutex);
1664 }
1665 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1666
1667 static inline void *
1668 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1669 {
1670 return bpage->data + index;
1671 }
1672
1673 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1674 {
1675 return bpage->page->data + index;
1676 }
1677
1678 static inline struct ring_buffer_event *
1679 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1680 {
1681 return __rb_page_index(cpu_buffer->reader_page,
1682 cpu_buffer->reader_page->read);
1683 }
1684
1685 static inline struct ring_buffer_event *
1686 rb_iter_head_event(struct ring_buffer_iter *iter)
1687 {
1688 return __rb_page_index(iter->head_page, iter->head);
1689 }
1690
1691 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1692 {
1693 return local_read(&bpage->page->commit);
1694 }
1695
1696 /* Size is determined by what has been committed */
1697 static inline unsigned rb_page_size(struct buffer_page *bpage)
1698 {
1699 return rb_page_commit(bpage);
1700 }
1701
1702 static inline unsigned
1703 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1704 {
1705 return rb_page_commit(cpu_buffer->commit_page);
1706 }
1707
1708 static inline unsigned
1709 rb_event_index(struct ring_buffer_event *event)
1710 {
1711 unsigned long addr = (unsigned long)event;
1712
1713 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1714 }
1715
1716 static inline int
1717 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1718 struct ring_buffer_event *event)
1719 {
1720 unsigned long addr = (unsigned long)event;
1721 unsigned long index;
1722
1723 index = rb_event_index(event);
1724 addr &= PAGE_MASK;
1725
1726 return cpu_buffer->commit_page->page == (void *)addr &&
1727 rb_commit_index(cpu_buffer) == index;
1728 }
1729
1730 static void
1731 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1732 {
1733 unsigned long max_count;
1734
1735 /*
1736 * We only race with interrupts and NMIs on this CPU.
1737 * If we own the commit event, then we can commit
1738 * all others that interrupted us, since the interruptions
1739 * are in stack format (they finish before they come
1740 * back to us). This allows us to do a simple loop to
1741 * assign the commit to the tail.
1742 */
1743 again:
1744 max_count = cpu_buffer->nr_pages * 100;
1745
1746 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1747 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1748 return;
1749 if (RB_WARN_ON(cpu_buffer,
1750 rb_is_reader_page(cpu_buffer->tail_page)))
1751 return;
1752 local_set(&cpu_buffer->commit_page->page->commit,
1753 rb_page_write(cpu_buffer->commit_page));
1754 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1755 cpu_buffer->write_stamp =
1756 cpu_buffer->commit_page->page->time_stamp;
1757 /* add barrier to keep gcc from optimizing too much */
1758 barrier();
1759 }
1760 while (rb_commit_index(cpu_buffer) !=
1761 rb_page_write(cpu_buffer->commit_page)) {
1762
1763 local_set(&cpu_buffer->commit_page->page->commit,
1764 rb_page_write(cpu_buffer->commit_page));
1765 RB_WARN_ON(cpu_buffer,
1766 local_read(&cpu_buffer->commit_page->page->commit) &
1767 ~RB_WRITE_MASK);
1768 barrier();
1769 }
1770
1771 /* again, keep gcc from optimizing */
1772 barrier();
1773
1774 /*
1775 * If an interrupt came in just after the first while loop
1776 * and pushed the tail page forward, we will be left with
1777 * a dangling commit that will never go forward.
1778 */
1779 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1780 goto again;
1781 }
1782
1783 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1784 {
1785 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1786 cpu_buffer->reader_page->read = 0;
1787 }
1788
1789 static void rb_inc_iter(struct ring_buffer_iter *iter)
1790 {
1791 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1792
1793 /*
1794 * The iterator could be on the reader page (it starts there).
1795 * But the head could have moved, since the reader was
1796 * found. Check for this case and assign the iterator
1797 * to the head page instead of next.
1798 */
1799 if (iter->head_page == cpu_buffer->reader_page)
1800 iter->head_page = rb_set_head_page(cpu_buffer);
1801 else
1802 rb_inc_page(cpu_buffer, &iter->head_page);
1803
1804 iter->read_stamp = iter->head_page->page->time_stamp;
1805 iter->head = 0;
1806 }
1807
1808 /* Slow path, do not inline */
1809 static noinline struct ring_buffer_event *
1810 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1811 {
1812 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1813
1814 /* Not the first event on the page? */
1815 if (rb_event_index(event)) {
1816 event->time_delta = delta & TS_MASK;
1817 event->array[0] = delta >> TS_SHIFT;
1818 } else {
1819 /* nope, just zero it */
1820 event->time_delta = 0;
1821 event->array[0] = 0;
1822 }
1823
1824 return skip_time_extend(event);
1825 }
1826
1827 /**
1828 * rb_update_event - update event type and data
1829 * @event: the even to update
1830 * @type: the type of event
1831 * @length: the size of the event field in the ring buffer
1832 *
1833 * Update the type and data fields of the event. The length
1834 * is the actual size that is written to the ring buffer,
1835 * and with this, we can determine what to place into the
1836 * data field.
1837 */
1838 static void
1839 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1840 struct ring_buffer_event *event, unsigned length,
1841 int add_timestamp, u64 delta)
1842 {
1843 /* Only a commit updates the timestamp */
1844 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1845 delta = 0;
1846
1847 /*
1848 * If we need to add a timestamp, then we
1849 * add it to the start of the resevered space.
1850 */
1851 if (unlikely(add_timestamp)) {
1852 event = rb_add_time_stamp(event, delta);
1853 length -= RB_LEN_TIME_EXTEND;
1854 delta = 0;
1855 }
1856
1857 event->time_delta = delta;
1858 length -= RB_EVNT_HDR_SIZE;
1859 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1860 event->type_len = 0;
1861 event->array[0] = length;
1862 } else
1863 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1864 }
1865
1866 /*
1867 * rb_handle_head_page - writer hit the head page
1868 *
1869 * Returns: +1 to retry page
1870 * 0 to continue
1871 * -1 on error
1872 */
1873 static int
1874 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1875 struct buffer_page *tail_page,
1876 struct buffer_page *next_page)
1877 {
1878 struct buffer_page *new_head;
1879 int entries;
1880 int type;
1881 int ret;
1882
1883 entries = rb_page_entries(next_page);
1884
1885 /*
1886 * The hard part is here. We need to move the head
1887 * forward, and protect against both readers on
1888 * other CPUs and writers coming in via interrupts.
1889 */
1890 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1891 RB_PAGE_HEAD);
1892
1893 /*
1894 * type can be one of four:
1895 * NORMAL - an interrupt already moved it for us
1896 * HEAD - we are the first to get here.
1897 * UPDATE - we are the interrupt interrupting
1898 * a current move.
1899 * MOVED - a reader on another CPU moved the next
1900 * pointer to its reader page. Give up
1901 * and try again.
1902 */
1903
1904 switch (type) {
1905 case RB_PAGE_HEAD:
1906 /*
1907 * We changed the head to UPDATE, thus
1908 * it is our responsibility to update
1909 * the counters.
1910 */
1911 local_add(entries, &cpu_buffer->overrun);
1912 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1913
1914 /*
1915 * The entries will be zeroed out when we move the
1916 * tail page.
1917 */
1918
1919 /* still more to do */
1920 break;
1921
1922 case RB_PAGE_UPDATE:
1923 /*
1924 * This is an interrupt that interrupt the
1925 * previous update. Still more to do.
1926 */
1927 break;
1928 case RB_PAGE_NORMAL:
1929 /*
1930 * An interrupt came in before the update
1931 * and processed this for us.
1932 * Nothing left to do.
1933 */
1934 return 1;
1935 case RB_PAGE_MOVED:
1936 /*
1937 * The reader is on another CPU and just did
1938 * a swap with our next_page.
1939 * Try again.
1940 */
1941 return 1;
1942 default:
1943 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1944 return -1;
1945 }
1946
1947 /*
1948 * Now that we are here, the old head pointer is
1949 * set to UPDATE. This will keep the reader from
1950 * swapping the head page with the reader page.
1951 * The reader (on another CPU) will spin till
1952 * we are finished.
1953 *
1954 * We just need to protect against interrupts
1955 * doing the job. We will set the next pointer
1956 * to HEAD. After that, we set the old pointer
1957 * to NORMAL, but only if it was HEAD before.
1958 * otherwise we are an interrupt, and only
1959 * want the outer most commit to reset it.
1960 */
1961 new_head = next_page;
1962 rb_inc_page(cpu_buffer, &new_head);
1963
1964 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1965 RB_PAGE_NORMAL);
1966
1967 /*
1968 * Valid returns are:
1969 * HEAD - an interrupt came in and already set it.
1970 * NORMAL - One of two things:
1971 * 1) We really set it.
1972 * 2) A bunch of interrupts came in and moved
1973 * the page forward again.
1974 */
1975 switch (ret) {
1976 case RB_PAGE_HEAD:
1977 case RB_PAGE_NORMAL:
1978 /* OK */
1979 break;
1980 default:
1981 RB_WARN_ON(cpu_buffer, 1);
1982 return -1;
1983 }
1984
1985 /*
1986 * It is possible that an interrupt came in,
1987 * set the head up, then more interrupts came in
1988 * and moved it again. When we get back here,
1989 * the page would have been set to NORMAL but we
1990 * just set it back to HEAD.
1991 *
1992 * How do you detect this? Well, if that happened
1993 * the tail page would have moved.
1994 */
1995 if (ret == RB_PAGE_NORMAL) {
1996 /*
1997 * If the tail had moved passed next, then we need
1998 * to reset the pointer.
1999 */
2000 if (cpu_buffer->tail_page != tail_page &&
2001 cpu_buffer->tail_page != next_page)
2002 rb_head_page_set_normal(cpu_buffer, new_head,
2003 next_page,
2004 RB_PAGE_HEAD);
2005 }
2006
2007 /*
2008 * If this was the outer most commit (the one that
2009 * changed the original pointer from HEAD to UPDATE),
2010 * then it is up to us to reset it to NORMAL.
2011 */
2012 if (type == RB_PAGE_HEAD) {
2013 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2014 tail_page,
2015 RB_PAGE_UPDATE);
2016 if (RB_WARN_ON(cpu_buffer,
2017 ret != RB_PAGE_UPDATE))
2018 return -1;
2019 }
2020
2021 return 0;
2022 }
2023
2024 static unsigned rb_calculate_event_length(unsigned length)
2025 {
2026 struct ring_buffer_event event; /* Used only for sizeof array */
2027
2028 /* zero length can cause confusions */
2029 if (!length)
2030 length = 1;
2031
2032 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2033 length += sizeof(event.array[0]);
2034
2035 length += RB_EVNT_HDR_SIZE;
2036 length = ALIGN(length, RB_ARCH_ALIGNMENT);
2037
2038 return length;
2039 }
2040
2041 static inline void
2042 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2043 struct buffer_page *tail_page,
2044 unsigned long tail, unsigned long length)
2045 {
2046 struct ring_buffer_event *event;
2047
2048 /*
2049 * Only the event that crossed the page boundary
2050 * must fill the old tail_page with padding.
2051 */
2052 if (tail >= BUF_PAGE_SIZE) {
2053 /*
2054 * If the page was filled, then we still need
2055 * to update the real_end. Reset it to zero
2056 * and the reader will ignore it.
2057 */
2058 if (tail == BUF_PAGE_SIZE)
2059 tail_page->real_end = 0;
2060
2061 local_sub(length, &tail_page->write);
2062 return;
2063 }
2064
2065 event = __rb_page_index(tail_page, tail);
2066 kmemcheck_annotate_bitfield(event, bitfield);
2067
2068 /* account for padding bytes */
2069 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2070
2071 /*
2072 * Save the original length to the meta data.
2073 * This will be used by the reader to add lost event
2074 * counter.
2075 */
2076 tail_page->real_end = tail;
2077
2078 /*
2079 * If this event is bigger than the minimum size, then
2080 * we need to be careful that we don't subtract the
2081 * write counter enough to allow another writer to slip
2082 * in on this page.
2083 * We put in a discarded commit instead, to make sure
2084 * that this space is not used again.
2085 *
2086 * If we are less than the minimum size, we don't need to
2087 * worry about it.
2088 */
2089 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2090 /* No room for any events */
2091
2092 /* Mark the rest of the page with padding */
2093 rb_event_set_padding(event);
2094
2095 /* Set the write back to the previous setting */
2096 local_sub(length, &tail_page->write);
2097 return;
2098 }
2099
2100 /* Put in a discarded event */
2101 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2102 event->type_len = RINGBUF_TYPE_PADDING;
2103 /* time delta must be non zero */
2104 event->time_delta = 1;
2105
2106 /* Set write to end of buffer */
2107 length = (tail + length) - BUF_PAGE_SIZE;
2108 local_sub(length, &tail_page->write);
2109 }
2110
2111 /*
2112 * This is the slow path, force gcc not to inline it.
2113 */
2114 static noinline struct ring_buffer_event *
2115 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2116 unsigned long length, unsigned long tail,
2117 struct buffer_page *tail_page, u64 ts)
2118 {
2119 struct buffer_page *commit_page = cpu_buffer->commit_page;
2120 struct ring_buffer *buffer = cpu_buffer->buffer;
2121 struct buffer_page *next_page;
2122 int ret;
2123
2124 next_page = tail_page;
2125
2126 rb_inc_page(cpu_buffer, &next_page);
2127
2128 /*
2129 * If for some reason, we had an interrupt storm that made
2130 * it all the way around the buffer, bail, and warn
2131 * about it.
2132 */
2133 if (unlikely(next_page == commit_page)) {
2134 local_inc(&cpu_buffer->commit_overrun);
2135 goto out_reset;
2136 }
2137
2138 /*
2139 * This is where the fun begins!
2140 *
2141 * We are fighting against races between a reader that
2142 * could be on another CPU trying to swap its reader
2143 * page with the buffer head.
2144 *
2145 * We are also fighting against interrupts coming in and
2146 * moving the head or tail on us as well.
2147 *
2148 * If the next page is the head page then we have filled
2149 * the buffer, unless the commit page is still on the
2150 * reader page.
2151 */
2152 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2153
2154 /*
2155 * If the commit is not on the reader page, then
2156 * move the header page.
2157 */
2158 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2159 /*
2160 * If we are not in overwrite mode,
2161 * this is easy, just stop here.
2162 */
2163 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2164 local_inc(&cpu_buffer->dropped_events);
2165 goto out_reset;
2166 }
2167
2168 ret = rb_handle_head_page(cpu_buffer,
2169 tail_page,
2170 next_page);
2171 if (ret < 0)
2172 goto out_reset;
2173 if (ret)
2174 goto out_again;
2175 } else {
2176 /*
2177 * We need to be careful here too. The
2178 * commit page could still be on the reader
2179 * page. We could have a small buffer, and
2180 * have filled up the buffer with events
2181 * from interrupts and such, and wrapped.
2182 *
2183 * Note, if the tail page is also the on the
2184 * reader_page, we let it move out.
2185 */
2186 if (unlikely((cpu_buffer->commit_page !=
2187 cpu_buffer->tail_page) &&
2188 (cpu_buffer->commit_page ==
2189 cpu_buffer->reader_page))) {
2190 local_inc(&cpu_buffer->commit_overrun);
2191 goto out_reset;
2192 }
2193 }
2194 }
2195
2196 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2197 if (ret) {
2198 /*
2199 * Nested commits always have zero deltas, so
2200 * just reread the time stamp
2201 */
2202 ts = rb_time_stamp(buffer);
2203 next_page->page->time_stamp = ts;
2204 }
2205
2206 out_again:
2207
2208 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2209
2210 /* fail and let the caller try again */
2211 return ERR_PTR(-EAGAIN);
2212
2213 out_reset:
2214 /* reset write */
2215 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2216
2217 return NULL;
2218 }
2219
2220 static struct ring_buffer_event *
2221 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2222 unsigned long length, u64 ts,
2223 u64 delta, int add_timestamp)
2224 {
2225 struct buffer_page *tail_page;
2226 struct ring_buffer_event *event;
2227 unsigned long tail, write;
2228
2229 /*
2230 * If the time delta since the last event is too big to
2231 * hold in the time field of the event, then we append a
2232 * TIME EXTEND event ahead of the data event.
2233 */
2234 if (unlikely(add_timestamp))
2235 length += RB_LEN_TIME_EXTEND;
2236
2237 tail_page = cpu_buffer->tail_page;
2238 write = local_add_return(length, &tail_page->write);
2239
2240 /* set write to only the index of the write */
2241 write &= RB_WRITE_MASK;
2242 tail = write - length;
2243
2244 /* See if we shot pass the end of this buffer page */
2245 if (unlikely(write > BUF_PAGE_SIZE))
2246 return rb_move_tail(cpu_buffer, length, tail,
2247 tail_page, ts);
2248
2249 /* We reserved something on the buffer */
2250
2251 event = __rb_page_index(tail_page, tail);
2252 kmemcheck_annotate_bitfield(event, bitfield);
2253 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2254
2255 local_inc(&tail_page->entries);
2256
2257 /*
2258 * If this is the first commit on the page, then update
2259 * its timestamp.
2260 */
2261 if (!tail)
2262 tail_page->page->time_stamp = ts;
2263
2264 /* account for these added bytes */
2265 local_add(length, &cpu_buffer->entries_bytes);
2266
2267 return event;
2268 }
2269
2270 static inline int
2271 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2272 struct ring_buffer_event *event)
2273 {
2274 unsigned long new_index, old_index;
2275 struct buffer_page *bpage;
2276 unsigned long index;
2277 unsigned long addr;
2278
2279 new_index = rb_event_index(event);
2280 old_index = new_index + rb_event_ts_length(event);
2281 addr = (unsigned long)event;
2282 addr &= PAGE_MASK;
2283
2284 bpage = cpu_buffer->tail_page;
2285
2286 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2287 unsigned long write_mask =
2288 local_read(&bpage->write) & ~RB_WRITE_MASK;
2289 unsigned long event_length = rb_event_length(event);
2290 /*
2291 * This is on the tail page. It is possible that
2292 * a write could come in and move the tail page
2293 * and write to the next page. That is fine
2294 * because we just shorten what is on this page.
2295 */
2296 old_index += write_mask;
2297 new_index += write_mask;
2298 index = local_cmpxchg(&bpage->write, old_index, new_index);
2299 if (index == old_index) {
2300 /* update counters */
2301 local_sub(event_length, &cpu_buffer->entries_bytes);
2302 return 1;
2303 }
2304 }
2305
2306 /* could not discard */
2307 return 0;
2308 }
2309
2310 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2311 {
2312 local_inc(&cpu_buffer->committing);
2313 local_inc(&cpu_buffer->commits);
2314 }
2315
2316 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2317 {
2318 unsigned long commits;
2319
2320 if (RB_WARN_ON(cpu_buffer,
2321 !local_read(&cpu_buffer->committing)))
2322 return;
2323
2324 again:
2325 commits = local_read(&cpu_buffer->commits);
2326 /* synchronize with interrupts */
2327 barrier();
2328 if (local_read(&cpu_buffer->committing) == 1)
2329 rb_set_commit_to_write(cpu_buffer);
2330
2331 local_dec(&cpu_buffer->committing);
2332
2333 /* synchronize with interrupts */
2334 barrier();
2335
2336 /*
2337 * Need to account for interrupts coming in between the
2338 * updating of the commit page and the clearing of the
2339 * committing counter.
2340 */
2341 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2342 !local_read(&cpu_buffer->committing)) {
2343 local_inc(&cpu_buffer->committing);
2344 goto again;
2345 }
2346 }
2347
2348 static struct ring_buffer_event *
2349 rb_reserve_next_event(struct ring_buffer *buffer,
2350 struct ring_buffer_per_cpu *cpu_buffer,
2351 unsigned long length)
2352 {
2353 struct ring_buffer_event *event;
2354 u64 ts, delta;
2355 int nr_loops = 0;
2356 int add_timestamp;
2357 u64 diff;
2358
2359 rb_start_commit(cpu_buffer);
2360
2361 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2362 /*
2363 * Due to the ability to swap a cpu buffer from a buffer
2364 * it is possible it was swapped before we committed.
2365 * (committing stops a swap). We check for it here and
2366 * if it happened, we have to fail the write.
2367 */
2368 barrier();
2369 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2370 local_dec(&cpu_buffer->committing);
2371 local_dec(&cpu_buffer->commits);
2372 return NULL;
2373 }
2374 #endif
2375
2376 length = rb_calculate_event_length(length);
2377 again:
2378 add_timestamp = 0;
2379 delta = 0;
2380
2381 /*
2382 * We allow for interrupts to reenter here and do a trace.
2383 * If one does, it will cause this original code to loop
2384 * back here. Even with heavy interrupts happening, this
2385 * should only happen a few times in a row. If this happens
2386 * 1000 times in a row, there must be either an interrupt
2387 * storm or we have something buggy.
2388 * Bail!
2389 */
2390 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2391 goto out_fail;
2392
2393 ts = rb_time_stamp(cpu_buffer->buffer);
2394 diff = ts - cpu_buffer->write_stamp;
2395
2396 /* make sure this diff is calculated here */
2397 barrier();
2398
2399 /* Did the write stamp get updated already? */
2400 if (likely(ts >= cpu_buffer->write_stamp)) {
2401 delta = diff;
2402 if (unlikely(test_time_stamp(delta))) {
2403 int local_clock_stable = 1;
2404 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2405 local_clock_stable = sched_clock_stable;
2406 #endif
2407 WARN_ONCE(delta > (1ULL << 59),
2408 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2409 (unsigned long long)delta,
2410 (unsigned long long)ts,
2411 (unsigned long long)cpu_buffer->write_stamp,
2412 local_clock_stable ? "" :
2413 "If you just came from a suspend/resume,\n"
2414 "please switch to the trace global clock:\n"
2415 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2416 add_timestamp = 1;
2417 }
2418 }
2419
2420 event = __rb_reserve_next(cpu_buffer, length, ts,
2421 delta, add_timestamp);
2422 if (unlikely(PTR_ERR(event) == -EAGAIN))
2423 goto again;
2424
2425 if (!event)
2426 goto out_fail;
2427
2428 return event;
2429
2430 out_fail:
2431 rb_end_commit(cpu_buffer);
2432 return NULL;
2433 }
2434
2435 #ifdef CONFIG_TRACING
2436
2437 #define TRACE_RECURSIVE_DEPTH 16
2438
2439 /* Keep this code out of the fast path cache */
2440 static noinline void trace_recursive_fail(void)
2441 {
2442 /* Disable all tracing before we do anything else */
2443 tracing_off_permanent();
2444
2445 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2446 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2447 trace_recursion_buffer(),
2448 hardirq_count() >> HARDIRQ_SHIFT,
2449 softirq_count() >> SOFTIRQ_SHIFT,
2450 in_nmi());
2451
2452 WARN_ON_ONCE(1);
2453 }
2454
2455 static inline int trace_recursive_lock(void)
2456 {
2457 trace_recursion_inc();
2458
2459 if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2460 return 0;
2461
2462 trace_recursive_fail();
2463
2464 return -1;
2465 }
2466
2467 static inline void trace_recursive_unlock(void)
2468 {
2469 WARN_ON_ONCE(!trace_recursion_buffer());
2470
2471 trace_recursion_dec();
2472 }
2473
2474 #else
2475
2476 #define trace_recursive_lock() (0)
2477 #define trace_recursive_unlock() do { } while (0)
2478
2479 #endif
2480
2481 /**
2482 * ring_buffer_lock_reserve - reserve a part of the buffer
2483 * @buffer: the ring buffer to reserve from
2484 * @length: the length of the data to reserve (excluding event header)
2485 *
2486 * Returns a reseverd event on the ring buffer to copy directly to.
2487 * The user of this interface will need to get the body to write into
2488 * and can use the ring_buffer_event_data() interface.
2489 *
2490 * The length is the length of the data needed, not the event length
2491 * which also includes the event header.
2492 *
2493 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2494 * If NULL is returned, then nothing has been allocated or locked.
2495 */
2496 struct ring_buffer_event *
2497 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2498 {
2499 struct ring_buffer_per_cpu *cpu_buffer;
2500 struct ring_buffer_event *event;
2501 int cpu;
2502
2503 if (ring_buffer_flags != RB_BUFFERS_ON)
2504 return NULL;
2505
2506 /* If we are tracing schedule, we don't want to recurse */
2507 preempt_disable_notrace();
2508
2509 if (atomic_read(&buffer->record_disabled))
2510 goto out_nocheck;
2511
2512 if (trace_recursive_lock())
2513 goto out_nocheck;
2514
2515 cpu = raw_smp_processor_id();
2516
2517 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2518 goto out;
2519
2520 cpu_buffer = buffer->buffers[cpu];
2521
2522 if (atomic_read(&cpu_buffer->record_disabled))
2523 goto out;
2524
2525 if (length > BUF_MAX_DATA_SIZE)
2526 goto out;
2527
2528 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2529 if (!event)
2530 goto out;
2531
2532 return event;
2533
2534 out:
2535 trace_recursive_unlock();
2536
2537 out_nocheck:
2538 preempt_enable_notrace();
2539 return NULL;
2540 }
2541 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2542
2543 static void
2544 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2545 struct ring_buffer_event *event)
2546 {
2547 u64 delta;
2548
2549 /*
2550 * The event first in the commit queue updates the
2551 * time stamp.
2552 */
2553 if (rb_event_is_commit(cpu_buffer, event)) {
2554 /*
2555 * A commit event that is first on a page
2556 * updates the write timestamp with the page stamp
2557 */
2558 if (!rb_event_index(event))
2559 cpu_buffer->write_stamp =
2560 cpu_buffer->commit_page->page->time_stamp;
2561 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2562 delta = event->array[0];
2563 delta <<= TS_SHIFT;
2564 delta += event->time_delta;
2565 cpu_buffer->write_stamp += delta;
2566 } else
2567 cpu_buffer->write_stamp += event->time_delta;
2568 }
2569 }
2570
2571 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2572 struct ring_buffer_event *event)
2573 {
2574 local_inc(&cpu_buffer->entries);
2575 rb_update_write_stamp(cpu_buffer, event);
2576 rb_end_commit(cpu_buffer);
2577 }
2578
2579 /**
2580 * ring_buffer_unlock_commit - commit a reserved
2581 * @buffer: The buffer to commit to
2582 * @event: The event pointer to commit.
2583 *
2584 * This commits the data to the ring buffer, and releases any locks held.
2585 *
2586 * Must be paired with ring_buffer_lock_reserve.
2587 */
2588 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2589 struct ring_buffer_event *event)
2590 {
2591 struct ring_buffer_per_cpu *cpu_buffer;
2592 int cpu = raw_smp_processor_id();
2593
2594 cpu_buffer = buffer->buffers[cpu];
2595
2596 rb_commit(cpu_buffer, event);
2597
2598 trace_recursive_unlock();
2599
2600 preempt_enable_notrace();
2601
2602 return 0;
2603 }
2604 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2605
2606 static inline void rb_event_discard(struct ring_buffer_event *event)
2607 {
2608 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2609 event = skip_time_extend(event);
2610
2611 /* array[0] holds the actual length for the discarded event */
2612 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2613 event->type_len = RINGBUF_TYPE_PADDING;
2614 /* time delta must be non zero */
2615 if (!event->time_delta)
2616 event->time_delta = 1;
2617 }
2618
2619 /*
2620 * Decrement the entries to the page that an event is on.
2621 * The event does not even need to exist, only the pointer
2622 * to the page it is on. This may only be called before the commit
2623 * takes place.
2624 */
2625 static inline void
2626 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2627 struct ring_buffer_event *event)
2628 {
2629 unsigned long addr = (unsigned long)event;
2630 struct buffer_page *bpage = cpu_buffer->commit_page;
2631 struct buffer_page *start;
2632
2633 addr &= PAGE_MASK;
2634
2635 /* Do the likely case first */
2636 if (likely(bpage->page == (void *)addr)) {
2637 local_dec(&bpage->entries);
2638 return;
2639 }
2640
2641 /*
2642 * Because the commit page may be on the reader page we
2643 * start with the next page and check the end loop there.
2644 */
2645 rb_inc_page(cpu_buffer, &bpage);
2646 start = bpage;
2647 do {
2648 if (bpage->page == (void *)addr) {
2649 local_dec(&bpage->entries);
2650 return;
2651 }
2652 rb_inc_page(cpu_buffer, &bpage);
2653 } while (bpage != start);
2654
2655 /* commit not part of this buffer?? */
2656 RB_WARN_ON(cpu_buffer, 1);
2657 }
2658
2659 /**
2660 * ring_buffer_commit_discard - discard an event that has not been committed
2661 * @buffer: the ring buffer
2662 * @event: non committed event to discard
2663 *
2664 * Sometimes an event that is in the ring buffer needs to be ignored.
2665 * This function lets the user discard an event in the ring buffer
2666 * and then that event will not be read later.
2667 *
2668 * This function only works if it is called before the the item has been
2669 * committed. It will try to free the event from the ring buffer
2670 * if another event has not been added behind it.
2671 *
2672 * If another event has been added behind it, it will set the event
2673 * up as discarded, and perform the commit.
2674 *
2675 * If this function is called, do not call ring_buffer_unlock_commit on
2676 * the event.
2677 */
2678 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2679 struct ring_buffer_event *event)
2680 {
2681 struct ring_buffer_per_cpu *cpu_buffer;
2682 int cpu;
2683
2684 /* The event is discarded regardless */
2685 rb_event_discard(event);
2686
2687 cpu = smp_processor_id();
2688 cpu_buffer = buffer->buffers[cpu];
2689
2690 /*
2691 * This must only be called if the event has not been
2692 * committed yet. Thus we can assume that preemption
2693 * is still disabled.
2694 */
2695 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2696
2697 rb_decrement_entry(cpu_buffer, event);
2698 if (rb_try_to_discard(cpu_buffer, event))
2699 goto out;
2700
2701 /*
2702 * The commit is still visible by the reader, so we
2703 * must still update the timestamp.
2704 */
2705 rb_update_write_stamp(cpu_buffer, event);
2706 out:
2707 rb_end_commit(cpu_buffer);
2708
2709 trace_recursive_unlock();
2710
2711 preempt_enable_notrace();
2712
2713 }
2714 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2715
2716 /**
2717 * ring_buffer_write - write data to the buffer without reserving
2718 * @buffer: The ring buffer to write to.
2719 * @length: The length of the data being written (excluding the event header)
2720 * @data: The data to write to the buffer.
2721 *
2722 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2723 * one function. If you already have the data to write to the buffer, it
2724 * may be easier to simply call this function.
2725 *
2726 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2727 * and not the length of the event which would hold the header.
2728 */
2729 int ring_buffer_write(struct ring_buffer *buffer,
2730 unsigned long length,
2731 void *data)
2732 {
2733 struct ring_buffer_per_cpu *cpu_buffer;
2734 struct ring_buffer_event *event;
2735 void *body;
2736 int ret = -EBUSY;
2737 int cpu;
2738
2739 if (ring_buffer_flags != RB_BUFFERS_ON)
2740 return -EBUSY;
2741
2742 preempt_disable_notrace();
2743
2744 if (atomic_read(&buffer->record_disabled))
2745 goto out;
2746
2747 cpu = raw_smp_processor_id();
2748
2749 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2750 goto out;
2751
2752 cpu_buffer = buffer->buffers[cpu];
2753
2754 if (atomic_read(&cpu_buffer->record_disabled))
2755 goto out;
2756
2757 if (length > BUF_MAX_DATA_SIZE)
2758 goto out;
2759
2760 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2761 if (!event)
2762 goto out;
2763
2764 body = rb_event_data(event);
2765
2766 memcpy(body, data, length);
2767
2768 rb_commit(cpu_buffer, event);
2769
2770 ret = 0;
2771 out:
2772 preempt_enable_notrace();
2773
2774 return ret;
2775 }
2776 EXPORT_SYMBOL_GPL(ring_buffer_write);
2777
2778 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2779 {
2780 struct buffer_page *reader = cpu_buffer->reader_page;
2781 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2782 struct buffer_page *commit = cpu_buffer->commit_page;
2783
2784 /* In case of error, head will be NULL */
2785 if (unlikely(!head))
2786 return 1;
2787
2788 return reader->read == rb_page_commit(reader) &&
2789 (commit == reader ||
2790 (commit == head &&
2791 head->read == rb_page_commit(commit)));
2792 }
2793
2794 /**
2795 * ring_buffer_record_disable - stop all writes into the buffer
2796 * @buffer: The ring buffer to stop writes to.
2797 *
2798 * This prevents all writes to the buffer. Any attempt to write
2799 * to the buffer after this will fail and return NULL.
2800 *
2801 * The caller should call synchronize_sched() after this.
2802 */
2803 void ring_buffer_record_disable(struct ring_buffer *buffer)
2804 {
2805 atomic_inc(&buffer->record_disabled);
2806 }
2807 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2808
2809 /**
2810 * ring_buffer_record_enable - enable writes to the buffer
2811 * @buffer: The ring buffer to enable writes
2812 *
2813 * Note, multiple disables will need the same number of enables
2814 * to truly enable the writing (much like preempt_disable).
2815 */
2816 void ring_buffer_record_enable(struct ring_buffer *buffer)
2817 {
2818 atomic_dec(&buffer->record_disabled);
2819 }
2820 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2821
2822 /**
2823 * ring_buffer_record_off - stop all writes into the buffer
2824 * @buffer: The ring buffer to stop writes to.
2825 *
2826 * This prevents all writes to the buffer. Any attempt to write
2827 * to the buffer after this will fail and return NULL.
2828 *
2829 * This is different than ring_buffer_record_disable() as
2830 * it works like an on/off switch, where as the disable() version
2831 * must be paired with a enable().
2832 */
2833 void ring_buffer_record_off(struct ring_buffer *buffer)
2834 {
2835 unsigned int rd;
2836 unsigned int new_rd;
2837
2838 do {
2839 rd = atomic_read(&buffer->record_disabled);
2840 new_rd = rd | RB_BUFFER_OFF;
2841 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2842 }
2843 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2844
2845 /**
2846 * ring_buffer_record_on - restart writes into the buffer
2847 * @buffer: The ring buffer to start writes to.
2848 *
2849 * This enables all writes to the buffer that was disabled by
2850 * ring_buffer_record_off().
2851 *
2852 * This is different than ring_buffer_record_enable() as
2853 * it works like an on/off switch, where as the enable() version
2854 * must be paired with a disable().
2855 */
2856 void ring_buffer_record_on(struct ring_buffer *buffer)
2857 {
2858 unsigned int rd;
2859 unsigned int new_rd;
2860
2861 do {
2862 rd = atomic_read(&buffer->record_disabled);
2863 new_rd = rd & ~RB_BUFFER_OFF;
2864 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2865 }
2866 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2867
2868 /**
2869 * ring_buffer_record_is_on - return true if the ring buffer can write
2870 * @buffer: The ring buffer to see if write is enabled
2871 *
2872 * Returns true if the ring buffer is in a state that it accepts writes.
2873 */
2874 int ring_buffer_record_is_on(struct ring_buffer *buffer)
2875 {
2876 return !atomic_read(&buffer->record_disabled);
2877 }
2878
2879 /**
2880 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2881 * @buffer: The ring buffer to stop writes to.
2882 * @cpu: The CPU buffer to stop
2883 *
2884 * This prevents all writes to the buffer. Any attempt to write
2885 * to the buffer after this will fail and return NULL.
2886 *
2887 * The caller should call synchronize_sched() after this.
2888 */
2889 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2890 {
2891 struct ring_buffer_per_cpu *cpu_buffer;
2892
2893 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2894 return;
2895
2896 cpu_buffer = buffer->buffers[cpu];
2897 atomic_inc(&cpu_buffer->record_disabled);
2898 }
2899 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2900
2901 /**
2902 * ring_buffer_record_enable_cpu - enable writes to the buffer
2903 * @buffer: The ring buffer to enable writes
2904 * @cpu: The CPU to enable.
2905 *
2906 * Note, multiple disables will need the same number of enables
2907 * to truly enable the writing (much like preempt_disable).
2908 */
2909 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2910 {
2911 struct ring_buffer_per_cpu *cpu_buffer;
2912
2913 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2914 return;
2915
2916 cpu_buffer = buffer->buffers[cpu];
2917 atomic_dec(&cpu_buffer->record_disabled);
2918 }
2919 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2920
2921 /*
2922 * The total entries in the ring buffer is the running counter
2923 * of entries entered into the ring buffer, minus the sum of
2924 * the entries read from the ring buffer and the number of
2925 * entries that were overwritten.
2926 */
2927 static inline unsigned long
2928 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2929 {
2930 return local_read(&cpu_buffer->entries) -
2931 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2932 }
2933
2934 /**
2935 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2936 * @buffer: The ring buffer
2937 * @cpu: The per CPU buffer to read from.
2938 */
2939 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2940 {
2941 unsigned long flags;
2942 struct ring_buffer_per_cpu *cpu_buffer;
2943 struct buffer_page *bpage;
2944 u64 ret = 0;
2945
2946 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2947 return 0;
2948
2949 cpu_buffer = buffer->buffers[cpu];
2950 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2951 /*
2952 * if the tail is on reader_page, oldest time stamp is on the reader
2953 * page
2954 */
2955 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2956 bpage = cpu_buffer->reader_page;
2957 else
2958 bpage = rb_set_head_page(cpu_buffer);
2959 if (bpage)
2960 ret = bpage->page->time_stamp;
2961 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2962
2963 return ret;
2964 }
2965 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2966
2967 /**
2968 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2969 * @buffer: The ring buffer
2970 * @cpu: The per CPU buffer to read from.
2971 */
2972 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2973 {
2974 struct ring_buffer_per_cpu *cpu_buffer;
2975 unsigned long ret;
2976
2977 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2978 return 0;
2979
2980 cpu_buffer = buffer->buffers[cpu];
2981 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2982
2983 return ret;
2984 }
2985 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2986
2987 /**
2988 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2989 * @buffer: The ring buffer
2990 * @cpu: The per CPU buffer to get the entries from.
2991 */
2992 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2993 {
2994 struct ring_buffer_per_cpu *cpu_buffer;
2995
2996 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2997 return 0;
2998
2999 cpu_buffer = buffer->buffers[cpu];
3000
3001 return rb_num_of_entries(cpu_buffer);
3002 }
3003 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3004
3005 /**
3006 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3007 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3008 * @buffer: The ring buffer
3009 * @cpu: The per CPU buffer to get the number of overruns from
3010 */
3011 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3012 {
3013 struct ring_buffer_per_cpu *cpu_buffer;
3014 unsigned long ret;
3015
3016 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3017 return 0;
3018
3019 cpu_buffer = buffer->buffers[cpu];
3020 ret = local_read(&cpu_buffer->overrun);
3021
3022 return ret;
3023 }
3024 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3025
3026 /**
3027 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3028 * commits failing due to the buffer wrapping around while there are uncommitted
3029 * events, such as during an interrupt storm.
3030 * @buffer: The ring buffer
3031 * @cpu: The per CPU buffer to get the number of overruns from
3032 */
3033 unsigned long
3034 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3035 {
3036 struct ring_buffer_per_cpu *cpu_buffer;
3037 unsigned long ret;
3038
3039 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3040 return 0;
3041
3042 cpu_buffer = buffer->buffers[cpu];
3043 ret = local_read(&cpu_buffer->commit_overrun);
3044
3045 return ret;
3046 }
3047 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3048
3049 /**
3050 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3051 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3052 * @buffer: The ring buffer
3053 * @cpu: The per CPU buffer to get the number of overruns from
3054 */
3055 unsigned long
3056 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3057 {
3058 struct ring_buffer_per_cpu *cpu_buffer;
3059 unsigned long ret;
3060
3061 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3062 return 0;
3063
3064 cpu_buffer = buffer->buffers[cpu];
3065 ret = local_read(&cpu_buffer->dropped_events);
3066
3067 return ret;
3068 }
3069 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3070
3071 /**
3072 * ring_buffer_entries - get the number of entries in a buffer
3073 * @buffer: The ring buffer
3074 *
3075 * Returns the total number of entries in the ring buffer
3076 * (all CPU entries)
3077 */
3078 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3079 {
3080 struct ring_buffer_per_cpu *cpu_buffer;
3081 unsigned long entries = 0;
3082 int cpu;
3083
3084 /* if you care about this being correct, lock the buffer */
3085 for_each_buffer_cpu(buffer, cpu) {
3086 cpu_buffer = buffer->buffers[cpu];
3087 entries += rb_num_of_entries(cpu_buffer);
3088 }
3089
3090 return entries;
3091 }
3092 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3093
3094 /**
3095 * ring_buffer_overruns - get the number of overruns in buffer
3096 * @buffer: The ring buffer
3097 *
3098 * Returns the total number of overruns in the ring buffer
3099 * (all CPU entries)
3100 */
3101 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3102 {
3103 struct ring_buffer_per_cpu *cpu_buffer;
3104 unsigned long overruns = 0;
3105 int cpu;
3106
3107 /* if you care about this being correct, lock the buffer */
3108 for_each_buffer_cpu(buffer, cpu) {
3109 cpu_buffer = buffer->buffers[cpu];
3110 overruns += local_read(&cpu_buffer->overrun);
3111 }
3112
3113 return overruns;
3114 }
3115 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3116
3117 static void rb_iter_reset(struct ring_buffer_iter *iter)
3118 {
3119 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3120
3121 /* Iterator usage is expected to have record disabled */
3122 if (list_empty(&cpu_buffer->reader_page->list)) {
3123 iter->head_page = rb_set_head_page(cpu_buffer);
3124 if (unlikely(!iter->head_page))
3125 return;
3126 iter->head = iter->head_page->read;
3127 } else {
3128 iter->head_page = cpu_buffer->reader_page;
3129 iter->head = cpu_buffer->reader_page->read;
3130 }
3131 if (iter->head)
3132 iter->read_stamp = cpu_buffer->read_stamp;
3133 else
3134 iter->read_stamp = iter->head_page->page->time_stamp;
3135 iter->cache_reader_page = cpu_buffer->reader_page;
3136 iter->cache_read = cpu_buffer->read;
3137 }
3138
3139 /**
3140 * ring_buffer_iter_reset - reset an iterator
3141 * @iter: The iterator to reset
3142 *
3143 * Resets the iterator, so that it will start from the beginning
3144 * again.
3145 */
3146 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3147 {
3148 struct ring_buffer_per_cpu *cpu_buffer;
3149 unsigned long flags;
3150
3151 if (!iter)
3152 return;
3153
3154 cpu_buffer = iter->cpu_buffer;
3155
3156 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3157 rb_iter_reset(iter);
3158 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3159 }
3160 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3161
3162 /**
3163 * ring_buffer_iter_empty - check if an iterator has no more to read
3164 * @iter: The iterator to check
3165 */
3166 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3167 {
3168 struct ring_buffer_per_cpu *cpu_buffer;
3169
3170 cpu_buffer = iter->cpu_buffer;
3171
3172 return iter->head_page == cpu_buffer->commit_page &&
3173 iter->head == rb_commit_index(cpu_buffer);
3174 }
3175 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3176
3177 static void
3178 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3179 struct ring_buffer_event *event)
3180 {
3181 u64 delta;
3182
3183 switch (event->type_len) {
3184 case RINGBUF_TYPE_PADDING:
3185 return;
3186
3187 case RINGBUF_TYPE_TIME_EXTEND:
3188 delta = event->array[0];
3189 delta <<= TS_SHIFT;
3190 delta += event->time_delta;
3191 cpu_buffer->read_stamp += delta;
3192 return;
3193
3194 case RINGBUF_TYPE_TIME_STAMP:
3195 /* FIXME: not implemented */
3196 return;
3197
3198 case RINGBUF_TYPE_DATA:
3199 cpu_buffer->read_stamp += event->time_delta;
3200 return;
3201
3202 default:
3203 BUG();
3204 }
3205 return;
3206 }
3207
3208 static void
3209 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3210 struct ring_buffer_event *event)
3211 {
3212 u64 delta;
3213
3214 switch (event->type_len) {
3215 case RINGBUF_TYPE_PADDING:
3216 return;
3217
3218 case RINGBUF_TYPE_TIME_EXTEND:
3219 delta = event->array[0];
3220 delta <<= TS_SHIFT;
3221 delta += event->time_delta;
3222 iter->read_stamp += delta;
3223 return;
3224
3225 case RINGBUF_TYPE_TIME_STAMP:
3226 /* FIXME: not implemented */
3227 return;
3228
3229 case RINGBUF_TYPE_DATA:
3230 iter->read_stamp += event->time_delta;
3231 return;
3232
3233 default:
3234 BUG();
3235 }
3236 return;
3237 }
3238
3239 static struct buffer_page *
3240 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3241 {
3242 struct buffer_page *reader = NULL;
3243 unsigned long overwrite;
3244 unsigned long flags;
3245 int nr_loops = 0;
3246 int ret;
3247
3248 local_irq_save(flags);
3249 arch_spin_lock(&cpu_buffer->lock);
3250
3251 again:
3252 /*
3253 * This should normally only loop twice. But because the
3254 * start of the reader inserts an empty page, it causes
3255 * a case where we will loop three times. There should be no
3256 * reason to loop four times (that I know of).
3257 */
3258 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3259 reader = NULL;
3260 goto out;
3261 }
3262
3263 reader = cpu_buffer->reader_page;
3264
3265 /* If there's more to read, return this page */
3266 if (cpu_buffer->reader_page->read < rb_page_size(reader))
3267 goto out;
3268
3269 /* Never should we have an index greater than the size */
3270 if (RB_WARN_ON(cpu_buffer,
3271 cpu_buffer->reader_page->read > rb_page_size(reader)))
3272 goto out;
3273
3274 /* check if we caught up to the tail */
3275 reader = NULL;
3276 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3277 goto out;
3278
3279 /* Don't bother swapping if the ring buffer is empty */
3280 if (rb_num_of_entries(cpu_buffer) == 0)
3281 goto out;
3282
3283 /*
3284 * Reset the reader page to size zero.
3285 */
3286 local_set(&cpu_buffer->reader_page->write, 0);
3287 local_set(&cpu_buffer->reader_page->entries, 0);
3288 local_set(&cpu_buffer->reader_page->page->commit, 0);
3289 cpu_buffer->reader_page->real_end = 0;
3290
3291 spin:
3292 /*
3293 * Splice the empty reader page into the list around the head.
3294 */
3295 reader = rb_set_head_page(cpu_buffer);
3296 if (!reader)
3297 goto out;
3298 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3299 cpu_buffer->reader_page->list.prev = reader->list.prev;
3300
3301 /*
3302 * cpu_buffer->pages just needs to point to the buffer, it
3303 * has no specific buffer page to point to. Lets move it out
3304 * of our way so we don't accidentally swap it.
3305 */
3306 cpu_buffer->pages = reader->list.prev;
3307
3308 /* The reader page will be pointing to the new head */
3309 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3310
3311 /*
3312 * We want to make sure we read the overruns after we set up our
3313 * pointers to the next object. The writer side does a
3314 * cmpxchg to cross pages which acts as the mb on the writer
3315 * side. Note, the reader will constantly fail the swap
3316 * while the writer is updating the pointers, so this
3317 * guarantees that the overwrite recorded here is the one we
3318 * want to compare with the last_overrun.
3319 */
3320 smp_mb();
3321 overwrite = local_read(&(cpu_buffer->overrun));
3322
3323 /*
3324 * Here's the tricky part.
3325 *
3326 * We need to move the pointer past the header page.
3327 * But we can only do that if a writer is not currently
3328 * moving it. The page before the header page has the
3329 * flag bit '1' set if it is pointing to the page we want.
3330 * but if the writer is in the process of moving it
3331 * than it will be '2' or already moved '0'.
3332 */
3333
3334 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3335
3336 /*
3337 * If we did not convert it, then we must try again.
3338 */
3339 if (!ret)
3340 goto spin;
3341
3342 /*
3343 * Yeah! We succeeded in replacing the page.
3344 *
3345 * Now make the new head point back to the reader page.
3346 */
3347 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3348 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3349
3350 /* Finally update the reader page to the new head */
3351 cpu_buffer->reader_page = reader;
3352 rb_reset_reader_page(cpu_buffer);
3353
3354 if (overwrite != cpu_buffer->last_overrun) {
3355 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3356 cpu_buffer->last_overrun = overwrite;
3357 }
3358
3359 goto again;
3360
3361 out:
3362 arch_spin_unlock(&cpu_buffer->lock);
3363 local_irq_restore(flags);
3364
3365 return reader;
3366 }
3367
3368 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3369 {
3370 struct ring_buffer_event *event;
3371 struct buffer_page *reader;
3372 unsigned length;
3373
3374 reader = rb_get_reader_page(cpu_buffer);
3375
3376 /* This function should not be called when buffer is empty */
3377 if (RB_WARN_ON(cpu_buffer, !reader))
3378 return;
3379
3380 event = rb_reader_event(cpu_buffer);
3381
3382 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3383 cpu_buffer->read++;
3384
3385 rb_update_read_stamp(cpu_buffer, event);
3386
3387 length = rb_event_length(event);
3388 cpu_buffer->reader_page->read += length;
3389 }
3390
3391 static void rb_advance_iter(struct ring_buffer_iter *iter)
3392 {
3393 struct ring_buffer_per_cpu *cpu_buffer;
3394 struct ring_buffer_event *event;
3395 unsigned length;
3396
3397 cpu_buffer = iter->cpu_buffer;
3398
3399 /*
3400 * Check if we are at the end of the buffer.
3401 */
3402 if (iter->head >= rb_page_size(iter->head_page)) {
3403 /* discarded commits can make the page empty */
3404 if (iter->head_page == cpu_buffer->commit_page)
3405 return;
3406 rb_inc_iter(iter);
3407 return;
3408 }
3409
3410 event = rb_iter_head_event(iter);
3411
3412 length = rb_event_length(event);
3413
3414 /*
3415 * This should not be called to advance the header if we are
3416 * at the tail of the buffer.
3417 */
3418 if (RB_WARN_ON(cpu_buffer,
3419 (iter->head_page == cpu_buffer->commit_page) &&
3420 (iter->head + length > rb_commit_index(cpu_buffer))))
3421 return;
3422
3423 rb_update_iter_read_stamp(iter, event);
3424
3425 iter->head += length;
3426
3427 /* check for end of page padding */
3428 if ((iter->head >= rb_page_size(iter->head_page)) &&
3429 (iter->head_page != cpu_buffer->commit_page))
3430 rb_advance_iter(iter);
3431 }
3432
3433 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3434 {
3435 return cpu_buffer->lost_events;
3436 }
3437
3438 static struct ring_buffer_event *
3439 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3440 unsigned long *lost_events)
3441 {
3442 struct ring_buffer_event *event;
3443 struct buffer_page *reader;
3444 int nr_loops = 0;
3445
3446 again:
3447 /*
3448 * We repeat when a time extend is encountered.
3449 * Since the time extend is always attached to a data event,
3450 * we should never loop more than once.
3451 * (We never hit the following condition more than twice).
3452 */
3453 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3454 return NULL;
3455
3456 reader = rb_get_reader_page(cpu_buffer);
3457 if (!reader)
3458 return NULL;
3459
3460 event = rb_reader_event(cpu_buffer);
3461
3462 switch (event->type_len) {
3463 case RINGBUF_TYPE_PADDING:
3464 if (rb_null_event(event))
3465 RB_WARN_ON(cpu_buffer, 1);
3466 /*
3467 * Because the writer could be discarding every
3468 * event it creates (which would probably be bad)
3469 * if we were to go back to "again" then we may never
3470 * catch up, and will trigger the warn on, or lock
3471 * the box. Return the padding, and we will release
3472 * the current locks, and try again.
3473 */
3474 return event;
3475
3476 case RINGBUF_TYPE_TIME_EXTEND:
3477 /* Internal data, OK to advance */
3478 rb_advance_reader(cpu_buffer);
3479 goto again;
3480
3481 case RINGBUF_TYPE_TIME_STAMP:
3482 /* FIXME: not implemented */
3483 rb_advance_reader(cpu_buffer);
3484 goto again;
3485
3486 case RINGBUF_TYPE_DATA:
3487 if (ts) {
3488 *ts = cpu_buffer->read_stamp + event->time_delta;
3489 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3490 cpu_buffer->cpu, ts);
3491 }
3492 if (lost_events)
3493 *lost_events = rb_lost_events(cpu_buffer);
3494 return event;
3495
3496 default:
3497 BUG();
3498 }
3499
3500 return NULL;
3501 }
3502 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3503
3504 static struct ring_buffer_event *
3505 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3506 {
3507 struct ring_buffer *buffer;
3508 struct ring_buffer_per_cpu *cpu_buffer;
3509 struct ring_buffer_event *event;
3510 int nr_loops = 0;
3511
3512 cpu_buffer = iter->cpu_buffer;
3513 buffer = cpu_buffer->buffer;
3514
3515 /*
3516 * Check if someone performed a consuming read to
3517 * the buffer. A consuming read invalidates the iterator
3518 * and we need to reset the iterator in this case.
3519 */
3520 if (unlikely(iter->cache_read != cpu_buffer->read ||
3521 iter->cache_reader_page != cpu_buffer->reader_page))
3522 rb_iter_reset(iter);
3523
3524 again:
3525 if (ring_buffer_iter_empty(iter))
3526 return NULL;
3527
3528 /*
3529 * We repeat when a time extend is encountered.
3530 * Since the time extend is always attached to a data event,
3531 * we should never loop more than once.
3532 * (We never hit the following condition more than twice).
3533 */
3534 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3535 return NULL;
3536
3537 if (rb_per_cpu_empty(cpu_buffer))
3538 return NULL;
3539
3540 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3541 rb_inc_iter(iter);
3542 goto again;
3543 }
3544
3545 event = rb_iter_head_event(iter);
3546
3547 switch (event->type_len) {
3548 case RINGBUF_TYPE_PADDING:
3549 if (rb_null_event(event)) {
3550 rb_inc_iter(iter);
3551 goto again;
3552 }
3553 rb_advance_iter(iter);
3554 return event;
3555
3556 case RINGBUF_TYPE_TIME_EXTEND:
3557 /* Internal data, OK to advance */
3558 rb_advance_iter(iter);
3559 goto again;
3560
3561 case RINGBUF_TYPE_TIME_STAMP:
3562 /* FIXME: not implemented */
3563 rb_advance_iter(iter);
3564 goto again;
3565
3566 case RINGBUF_TYPE_DATA:
3567 if (ts) {
3568 *ts = iter->read_stamp + event->time_delta;
3569 ring_buffer_normalize_time_stamp(buffer,
3570 cpu_buffer->cpu, ts);
3571 }
3572 return event;
3573
3574 default:
3575 BUG();
3576 }
3577
3578 return NULL;
3579 }
3580 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3581
3582 static inline int rb_ok_to_lock(void)
3583 {
3584 /*
3585 * If an NMI die dumps out the content of the ring buffer
3586 * do not grab locks. We also permanently disable the ring
3587 * buffer too. A one time deal is all you get from reading
3588 * the ring buffer from an NMI.
3589 */
3590 if (likely(!in_nmi()))
3591 return 1;
3592
3593 tracing_off_permanent();
3594 return 0;
3595 }
3596
3597 /**
3598 * ring_buffer_peek - peek at the next event to be read
3599 * @buffer: The ring buffer to read
3600 * @cpu: The cpu to peak at
3601 * @ts: The timestamp counter of this event.
3602 * @lost_events: a variable to store if events were lost (may be NULL)
3603 *
3604 * This will return the event that will be read next, but does
3605 * not consume the data.
3606 */
3607 struct ring_buffer_event *
3608 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3609 unsigned long *lost_events)
3610 {
3611 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3612 struct ring_buffer_event *event;
3613 unsigned long flags;
3614 int dolock;
3615
3616 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3617 return NULL;
3618
3619 dolock = rb_ok_to_lock();
3620 again:
3621 local_irq_save(flags);
3622 if (dolock)
3623 raw_spin_lock(&cpu_buffer->reader_lock);
3624 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3625 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3626 rb_advance_reader(cpu_buffer);
3627 if (dolock)
3628 raw_spin_unlock(&cpu_buffer->reader_lock);
3629 local_irq_restore(flags);
3630
3631 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3632 goto again;
3633
3634 return event;
3635 }
3636
3637 /**
3638 * ring_buffer_iter_peek - peek at the next event to be read
3639 * @iter: The ring buffer iterator
3640 * @ts: The timestamp counter of this event.
3641 *
3642 * This will return the event that will be read next, but does
3643 * not increment the iterator.
3644 */
3645 struct ring_buffer_event *
3646 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3647 {
3648 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3649 struct ring_buffer_event *event;
3650 unsigned long flags;
3651
3652 again:
3653 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3654 event = rb_iter_peek(iter, ts);
3655 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3656
3657 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3658 goto again;
3659
3660 return event;
3661 }
3662
3663 /**
3664 * ring_buffer_consume - return an event and consume it
3665 * @buffer: The ring buffer to get the next event from
3666 * @cpu: the cpu to read the buffer from
3667 * @ts: a variable to store the timestamp (may be NULL)
3668 * @lost_events: a variable to store if events were lost (may be NULL)
3669 *
3670 * Returns the next event in the ring buffer, and that event is consumed.
3671 * Meaning, that sequential reads will keep returning a different event,
3672 * and eventually empty the ring buffer if the producer is slower.
3673 */
3674 struct ring_buffer_event *
3675 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3676 unsigned long *lost_events)
3677 {
3678 struct ring_buffer_per_cpu *cpu_buffer;
3679 struct ring_buffer_event *event = NULL;
3680 unsigned long flags;
3681 int dolock;
3682
3683 dolock = rb_ok_to_lock();
3684
3685 again:
3686 /* might be called in atomic */
3687 preempt_disable();
3688
3689 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3690 goto out;
3691
3692 cpu_buffer = buffer->buffers[cpu];
3693 local_irq_save(flags);
3694 if (dolock)
3695 raw_spin_lock(&cpu_buffer->reader_lock);
3696
3697 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3698 if (event) {
3699 cpu_buffer->lost_events = 0;
3700 rb_advance_reader(cpu_buffer);
3701 }
3702
3703 if (dolock)
3704 raw_spin_unlock(&cpu_buffer->reader_lock);
3705 local_irq_restore(flags);
3706
3707 out:
3708 preempt_enable();
3709
3710 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3711 goto again;
3712
3713 return event;
3714 }
3715 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3716
3717 /**
3718 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3719 * @buffer: The ring buffer to read from
3720 * @cpu: The cpu buffer to iterate over
3721 *
3722 * This performs the initial preparations necessary to iterate
3723 * through the buffer. Memory is allocated, buffer recording
3724 * is disabled, and the iterator pointer is returned to the caller.
3725 *
3726 * Disabling buffer recordng prevents the reading from being
3727 * corrupted. This is not a consuming read, so a producer is not
3728 * expected.
3729 *
3730 * After a sequence of ring_buffer_read_prepare calls, the user is
3731 * expected to make at least one call to ring_buffer_prepare_sync.
3732 * Afterwards, ring_buffer_read_start is invoked to get things going
3733 * for real.
3734 *
3735 * This overall must be paired with ring_buffer_finish.
3736 */
3737 struct ring_buffer_iter *
3738 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3739 {
3740 struct ring_buffer_per_cpu *cpu_buffer;
3741 struct ring_buffer_iter *iter;
3742
3743 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3744 return NULL;
3745
3746 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3747 if (!iter)
3748 return NULL;
3749
3750 cpu_buffer = buffer->buffers[cpu];
3751
3752 iter->cpu_buffer = cpu_buffer;
3753
3754 atomic_inc(&buffer->resize_disabled);
3755 atomic_inc(&cpu_buffer->record_disabled);
3756
3757 return iter;
3758 }
3759 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3760
3761 /**
3762 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3763 *
3764 * All previously invoked ring_buffer_read_prepare calls to prepare
3765 * iterators will be synchronized. Afterwards, read_buffer_read_start
3766 * calls on those iterators are allowed.
3767 */
3768 void
3769 ring_buffer_read_prepare_sync(void)
3770 {
3771 synchronize_sched();
3772 }
3773 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3774
3775 /**
3776 * ring_buffer_read_start - start a non consuming read of the buffer
3777 * @iter: The iterator returned by ring_buffer_read_prepare
3778 *
3779 * This finalizes the startup of an iteration through the buffer.
3780 * The iterator comes from a call to ring_buffer_read_prepare and
3781 * an intervening ring_buffer_read_prepare_sync must have been
3782 * performed.
3783 *
3784 * Must be paired with ring_buffer_finish.
3785 */
3786 void
3787 ring_buffer_read_start(struct ring_buffer_iter *iter)
3788 {
3789 struct ring_buffer_per_cpu *cpu_buffer;
3790 unsigned long flags;
3791
3792 if (!iter)
3793 return;
3794
3795 cpu_buffer = iter->cpu_buffer;
3796
3797 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3798 arch_spin_lock(&cpu_buffer->lock);
3799 rb_iter_reset(iter);
3800 arch_spin_unlock(&cpu_buffer->lock);
3801 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3802 }
3803 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3804
3805 /**
3806 * ring_buffer_finish - finish reading the iterator of the buffer
3807 * @iter: The iterator retrieved by ring_buffer_start
3808 *
3809 * This re-enables the recording to the buffer, and frees the
3810 * iterator.
3811 */
3812 void
3813 ring_buffer_read_finish(struct ring_buffer_iter *iter)
3814 {
3815 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3816 unsigned long flags;
3817
3818 /*
3819 * Ring buffer is disabled from recording, here's a good place
3820 * to check the integrity of the ring buffer.
3821 * Must prevent readers from trying to read, as the check
3822 * clears the HEAD page and readers require it.
3823 */
3824 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3825 rb_check_pages(cpu_buffer);
3826 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3827
3828 atomic_dec(&cpu_buffer->record_disabled);
3829 atomic_dec(&cpu_buffer->buffer->resize_disabled);
3830 kfree(iter);
3831 }
3832 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3833
3834 /**
3835 * ring_buffer_read - read the next item in the ring buffer by the iterator
3836 * @iter: The ring buffer iterator
3837 * @ts: The time stamp of the event read.
3838 *
3839 * This reads the next event in the ring buffer and increments the iterator.
3840 */
3841 struct ring_buffer_event *
3842 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3843 {
3844 struct ring_buffer_event *event;
3845 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3846 unsigned long flags;
3847
3848 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3849 again:
3850 event = rb_iter_peek(iter, ts);
3851 if (!event)
3852 goto out;
3853
3854 if (event->type_len == RINGBUF_TYPE_PADDING)
3855 goto again;
3856
3857 rb_advance_iter(iter);
3858 out:
3859 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3860
3861 return event;
3862 }
3863 EXPORT_SYMBOL_GPL(ring_buffer_read);
3864
3865 /**
3866 * ring_buffer_size - return the size of the ring buffer (in bytes)
3867 * @buffer: The ring buffer.
3868 */
3869 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
3870 {
3871 /*
3872 * Earlier, this method returned
3873 * BUF_PAGE_SIZE * buffer->nr_pages
3874 * Since the nr_pages field is now removed, we have converted this to
3875 * return the per cpu buffer value.
3876 */
3877 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3878 return 0;
3879
3880 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
3881 }
3882 EXPORT_SYMBOL_GPL(ring_buffer_size);
3883
3884 static void
3885 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3886 {
3887 rb_head_page_deactivate(cpu_buffer);
3888
3889 cpu_buffer->head_page
3890 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3891 local_set(&cpu_buffer->head_page->write, 0);
3892 local_set(&cpu_buffer->head_page->entries, 0);
3893 local_set(&cpu_buffer->head_page->page->commit, 0);
3894
3895 cpu_buffer->head_page->read = 0;
3896
3897 cpu_buffer->tail_page = cpu_buffer->head_page;
3898 cpu_buffer->commit_page = cpu_buffer->head_page;
3899
3900 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3901 INIT_LIST_HEAD(&cpu_buffer->new_pages);
3902 local_set(&cpu_buffer->reader_page->write, 0);
3903 local_set(&cpu_buffer->reader_page->entries, 0);
3904 local_set(&cpu_buffer->reader_page->page->commit, 0);
3905 cpu_buffer->reader_page->read = 0;
3906
3907 local_set(&cpu_buffer->entries_bytes, 0);
3908 local_set(&cpu_buffer->overrun, 0);
3909 local_set(&cpu_buffer->commit_overrun, 0);
3910 local_set(&cpu_buffer->dropped_events, 0);
3911 local_set(&cpu_buffer->entries, 0);
3912 local_set(&cpu_buffer->committing, 0);
3913 local_set(&cpu_buffer->commits, 0);
3914 cpu_buffer->read = 0;
3915 cpu_buffer->read_bytes = 0;
3916
3917 cpu_buffer->write_stamp = 0;
3918 cpu_buffer->read_stamp = 0;
3919
3920 cpu_buffer->lost_events = 0;
3921 cpu_buffer->last_overrun = 0;
3922
3923 rb_head_page_activate(cpu_buffer);
3924 }
3925
3926 /**
3927 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3928 * @buffer: The ring buffer to reset a per cpu buffer of
3929 * @cpu: The CPU buffer to be reset
3930 */
3931 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3932 {
3933 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3934 unsigned long flags;
3935
3936 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3937 return;
3938
3939 atomic_inc(&buffer->resize_disabled);
3940 atomic_inc(&cpu_buffer->record_disabled);
3941
3942 /* Make sure all commits have finished */
3943 synchronize_sched();
3944
3945 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3946
3947 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3948 goto out;
3949
3950 arch_spin_lock(&cpu_buffer->lock);
3951
3952 rb_reset_cpu(cpu_buffer);
3953
3954 arch_spin_unlock(&cpu_buffer->lock);
3955
3956 out:
3957 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3958
3959 atomic_dec(&cpu_buffer->record_disabled);
3960 atomic_dec(&buffer->resize_disabled);
3961 }
3962 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3963
3964 /**
3965 * ring_buffer_reset - reset a ring buffer
3966 * @buffer: The ring buffer to reset all cpu buffers
3967 */
3968 void ring_buffer_reset(struct ring_buffer *buffer)
3969 {
3970 int cpu;
3971
3972 for_each_buffer_cpu(buffer, cpu)
3973 ring_buffer_reset_cpu(buffer, cpu);
3974 }
3975 EXPORT_SYMBOL_GPL(ring_buffer_reset);
3976
3977 /**
3978 * rind_buffer_empty - is the ring buffer empty?
3979 * @buffer: The ring buffer to test
3980 */
3981 int ring_buffer_empty(struct ring_buffer *buffer)
3982 {
3983 struct ring_buffer_per_cpu *cpu_buffer;
3984 unsigned long flags;
3985 int dolock;
3986 int cpu;
3987 int ret;
3988
3989 dolock = rb_ok_to_lock();
3990
3991 /* yes this is racy, but if you don't like the race, lock the buffer */
3992 for_each_buffer_cpu(buffer, cpu) {
3993 cpu_buffer = buffer->buffers[cpu];
3994 local_irq_save(flags);
3995 if (dolock)
3996 raw_spin_lock(&cpu_buffer->reader_lock);
3997 ret = rb_per_cpu_empty(cpu_buffer);
3998 if (dolock)
3999 raw_spin_unlock(&cpu_buffer->reader_lock);
4000 local_irq_restore(flags);
4001
4002 if (!ret)
4003 return 0;
4004 }
4005
4006 return 1;
4007 }
4008 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4009
4010 /**
4011 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4012 * @buffer: The ring buffer
4013 * @cpu: The CPU buffer to test
4014 */
4015 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4016 {
4017 struct ring_buffer_per_cpu *cpu_buffer;
4018 unsigned long flags;
4019 int dolock;
4020 int ret;
4021
4022 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4023 return 1;
4024
4025 dolock = rb_ok_to_lock();
4026
4027 cpu_buffer = buffer->buffers[cpu];
4028 local_irq_save(flags);
4029 if (dolock)
4030 raw_spin_lock(&cpu_buffer->reader_lock);
4031 ret = rb_per_cpu_empty(cpu_buffer);
4032 if (dolock)
4033 raw_spin_unlock(&cpu_buffer->reader_lock);
4034 local_irq_restore(flags);
4035
4036 return ret;
4037 }
4038 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4039
4040 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4041 /**
4042 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4043 * @buffer_a: One buffer to swap with
4044 * @buffer_b: The other buffer to swap with
4045 *
4046 * This function is useful for tracers that want to take a "snapshot"
4047 * of a CPU buffer and has another back up buffer lying around.
4048 * it is expected that the tracer handles the cpu buffer not being
4049 * used at the moment.
4050 */
4051 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4052 struct ring_buffer *buffer_b, int cpu)
4053 {
4054 struct ring_buffer_per_cpu *cpu_buffer_a;
4055 struct ring_buffer_per_cpu *cpu_buffer_b;
4056 int ret = -EINVAL;
4057
4058 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4059 !cpumask_test_cpu(cpu, buffer_b->cpumask))
4060 goto out;
4061
4062 cpu_buffer_a = buffer_a->buffers[cpu];
4063 cpu_buffer_b = buffer_b->buffers[cpu];
4064
4065 /* At least make sure the two buffers are somewhat the same */
4066 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4067 goto out;
4068
4069 ret = -EAGAIN;
4070
4071 if (ring_buffer_flags != RB_BUFFERS_ON)
4072 goto out;
4073
4074 if (atomic_read(&buffer_a->record_disabled))
4075 goto out;
4076
4077 if (atomic_read(&buffer_b->record_disabled))
4078 goto out;
4079
4080 if (atomic_read(&cpu_buffer_a->record_disabled))
4081 goto out;
4082
4083 if (atomic_read(&cpu_buffer_b->record_disabled))
4084 goto out;
4085
4086 /*
4087 * We can't do a synchronize_sched here because this
4088 * function can be called in atomic context.
4089 * Normally this will be called from the same CPU as cpu.
4090 * If not it's up to the caller to protect this.
4091 */
4092 atomic_inc(&cpu_buffer_a->record_disabled);
4093 atomic_inc(&cpu_buffer_b->record_disabled);
4094
4095 ret = -EBUSY;
4096 if (local_read(&cpu_buffer_a->committing))
4097 goto out_dec;
4098 if (local_read(&cpu_buffer_b->committing))
4099 goto out_dec;
4100
4101 buffer_a->buffers[cpu] = cpu_buffer_b;
4102 buffer_b->buffers[cpu] = cpu_buffer_a;
4103
4104 cpu_buffer_b->buffer = buffer_a;
4105 cpu_buffer_a->buffer = buffer_b;
4106
4107 ret = 0;
4108
4109 out_dec:
4110 atomic_dec(&cpu_buffer_a->record_disabled);
4111 atomic_dec(&cpu_buffer_b->record_disabled);
4112 out:
4113 return ret;
4114 }
4115 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4116 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4117
4118 /**
4119 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4120 * @buffer: the buffer to allocate for.
4121 *
4122 * This function is used in conjunction with ring_buffer_read_page.
4123 * When reading a full page from the ring buffer, these functions
4124 * can be used to speed up the process. The calling function should
4125 * allocate a few pages first with this function. Then when it
4126 * needs to get pages from the ring buffer, it passes the result
4127 * of this function into ring_buffer_read_page, which will swap
4128 * the page that was allocated, with the read page of the buffer.
4129 *
4130 * Returns:
4131 * The page allocated, or NULL on error.
4132 */
4133 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4134 {
4135 struct buffer_data_page *bpage;
4136 struct page *page;
4137
4138 page = alloc_pages_node(cpu_to_node(cpu),
4139 GFP_KERNEL | __GFP_NORETRY, 0);
4140 if (!page)
4141 return NULL;
4142
4143 bpage = page_address(page);
4144
4145 rb_init_page(bpage);
4146
4147 return bpage;
4148 }
4149 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4150
4151 /**
4152 * ring_buffer_free_read_page - free an allocated read page
4153 * @buffer: the buffer the page was allocate for
4154 * @data: the page to free
4155 *
4156 * Free a page allocated from ring_buffer_alloc_read_page.
4157 */
4158 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4159 {
4160 free_page((unsigned long)data);
4161 }
4162 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4163
4164 /**
4165 * ring_buffer_read_page - extract a page from the ring buffer
4166 * @buffer: buffer to extract from
4167 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4168 * @len: amount to extract
4169 * @cpu: the cpu of the buffer to extract
4170 * @full: should the extraction only happen when the page is full.
4171 *
4172 * This function will pull out a page from the ring buffer and consume it.
4173 * @data_page must be the address of the variable that was returned
4174 * from ring_buffer_alloc_read_page. This is because the page might be used
4175 * to swap with a page in the ring buffer.
4176 *
4177 * for example:
4178 * rpage = ring_buffer_alloc_read_page(buffer);
4179 * if (!rpage)
4180 * return error;
4181 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4182 * if (ret >= 0)
4183 * process_page(rpage, ret);
4184 *
4185 * When @full is set, the function will not return true unless
4186 * the writer is off the reader page.
4187 *
4188 * Note: it is up to the calling functions to handle sleeps and wakeups.
4189 * The ring buffer can be used anywhere in the kernel and can not
4190 * blindly call wake_up. The layer that uses the ring buffer must be
4191 * responsible for that.
4192 *
4193 * Returns:
4194 * >=0 if data has been transferred, returns the offset of consumed data.
4195 * <0 if no data has been transferred.
4196 */
4197 int ring_buffer_read_page(struct ring_buffer *buffer,
4198 void **data_page, size_t len, int cpu, int full)
4199 {
4200 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4201 struct ring_buffer_event *event;
4202 struct buffer_data_page *bpage;
4203 struct buffer_page *reader;
4204 unsigned long missed_events;
4205 unsigned long flags;
4206 unsigned int commit;
4207 unsigned int read;
4208 u64 save_timestamp;
4209 int ret = -1;
4210
4211 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4212 goto out;
4213
4214 /*
4215 * If len is not big enough to hold the page header, then
4216 * we can not copy anything.
4217 */
4218 if (len <= BUF_PAGE_HDR_SIZE)
4219 goto out;
4220
4221 len -= BUF_PAGE_HDR_SIZE;
4222
4223 if (!data_page)
4224 goto out;
4225
4226 bpage = *data_page;
4227 if (!bpage)
4228 goto out;
4229
4230 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4231
4232 reader = rb_get_reader_page(cpu_buffer);
4233 if (!reader)
4234 goto out_unlock;
4235
4236 event = rb_reader_event(cpu_buffer);
4237
4238 read = reader->read;
4239 commit = rb_page_commit(reader);
4240
4241 /* Check if any events were dropped */
4242 missed_events = cpu_buffer->lost_events;
4243
4244 /*
4245 * If this page has been partially read or
4246 * if len is not big enough to read the rest of the page or
4247 * a writer is still on the page, then
4248 * we must copy the data from the page to the buffer.
4249 * Otherwise, we can simply swap the page with the one passed in.
4250 */
4251 if (read || (len < (commit - read)) ||
4252 cpu_buffer->reader_page == cpu_buffer->commit_page) {
4253 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4254 unsigned int rpos = read;
4255 unsigned int pos = 0;
4256 unsigned int size;
4257
4258 if (full)
4259 goto out_unlock;
4260
4261 if (len > (commit - read))
4262 len = (commit - read);
4263
4264 /* Always keep the time extend and data together */
4265 size = rb_event_ts_length(event);
4266
4267 if (len < size)
4268 goto out_unlock;
4269
4270 /* save the current timestamp, since the user will need it */
4271 save_timestamp = cpu_buffer->read_stamp;
4272
4273 /* Need to copy one event at a time */
4274 do {
4275 /* We need the size of one event, because
4276 * rb_advance_reader only advances by one event,
4277 * whereas rb_event_ts_length may include the size of
4278 * one or two events.
4279 * We have already ensured there's enough space if this
4280 * is a time extend. */
4281 size = rb_event_length(event);
4282 memcpy(bpage->data + pos, rpage->data + rpos, size);
4283
4284 len -= size;
4285
4286 rb_advance_reader(cpu_buffer);
4287 rpos = reader->read;
4288 pos += size;
4289
4290 if (rpos >= commit)
4291 break;
4292
4293 event = rb_reader_event(cpu_buffer);
4294 /* Always keep the time extend and data together */
4295 size = rb_event_ts_length(event);
4296 } while (len >= size);
4297
4298 /* update bpage */
4299 local_set(&bpage->commit, pos);
4300 bpage->time_stamp = save_timestamp;
4301
4302 /* we copied everything to the beginning */
4303 read = 0;
4304 } else {
4305 /* update the entry counter */
4306 cpu_buffer->read += rb_page_entries(reader);
4307 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4308
4309 /* swap the pages */
4310 rb_init_page(bpage);
4311 bpage = reader->page;
4312 reader->page = *data_page;
4313 local_set(&reader->write, 0);
4314 local_set(&reader->entries, 0);
4315 reader->read = 0;
4316 *data_page = bpage;
4317
4318 /*
4319 * Use the real_end for the data size,
4320 * This gives us a chance to store the lost events
4321 * on the page.
4322 */
4323 if (reader->real_end)
4324 local_set(&bpage->commit, reader->real_end);
4325 }
4326 ret = read;
4327
4328 cpu_buffer->lost_events = 0;
4329
4330 commit = local_read(&bpage->commit);
4331 /*
4332 * Set a flag in the commit field if we lost events
4333 */
4334 if (missed_events) {
4335 /* If there is room at the end of the page to save the
4336 * missed events, then record it there.
4337 */
4338 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4339 memcpy(&bpage->data[commit], &missed_events,
4340 sizeof(missed_events));
4341 local_add(RB_MISSED_STORED, &bpage->commit);
4342 commit += sizeof(missed_events);
4343 }
4344 local_add(RB_MISSED_EVENTS, &bpage->commit);
4345 }
4346
4347 /*
4348 * This page may be off to user land. Zero it out here.
4349 */
4350 if (commit < BUF_PAGE_SIZE)
4351 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4352
4353 out_unlock:
4354 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4355
4356 out:
4357 return ret;
4358 }
4359 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4360
4361 #ifdef CONFIG_HOTPLUG_CPU
4362 static int rb_cpu_notify(struct notifier_block *self,
4363 unsigned long action, void *hcpu)
4364 {
4365 struct ring_buffer *buffer =
4366 container_of(self, struct ring_buffer, cpu_notify);
4367 long cpu = (long)hcpu;
4368 int cpu_i, nr_pages_same;
4369 unsigned int nr_pages;
4370
4371 switch (action) {
4372 case CPU_UP_PREPARE:
4373 case CPU_UP_PREPARE_FROZEN:
4374 if (cpumask_test_cpu(cpu, buffer->cpumask))
4375 return NOTIFY_OK;
4376
4377 nr_pages = 0;
4378 nr_pages_same = 1;
4379 /* check if all cpu sizes are same */
4380 for_each_buffer_cpu(buffer, cpu_i) {
4381 /* fill in the size from first enabled cpu */
4382 if (nr_pages == 0)
4383 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4384 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4385 nr_pages_same = 0;
4386 break;
4387 }
4388 }
4389 /* allocate minimum pages, user can later expand it */
4390 if (!nr_pages_same)
4391 nr_pages = 2;
4392 buffer->buffers[cpu] =
4393 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4394 if (!buffer->buffers[cpu]) {
4395 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4396 cpu);
4397 return NOTIFY_OK;
4398 }
4399 smp_wmb();
4400 cpumask_set_cpu(cpu, buffer->cpumask);
4401 break;
4402 case CPU_DOWN_PREPARE:
4403 case CPU_DOWN_PREPARE_FROZEN:
4404 /*
4405 * Do nothing.
4406 * If we were to free the buffer, then the user would
4407 * lose any trace that was in the buffer.
4408 */
4409 break;
4410 default:
4411 break;
4412 }
4413 return NOTIFY_OK;
4414 }
4415 #endif