]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/ring_buffer.c
Merge tag 'lzo-update-signature-20130226' of git://github.com/markus-oberhumer/linux
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / ring_buffer.c
1 /*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/ftrace_event.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/debugfs.h>
12 #include <linux/uaccess.h>
13 #include <linux/hardirq.h>
14 #include <linux/kmemcheck.h>
15 #include <linux/module.h>
16 #include <linux/percpu.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/hash.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/fs.h>
24
25 #include <asm/local.h>
26
27 static void update_pages_handler(struct work_struct *work);
28
29 /*
30 * The ring buffer header is special. We must manually up keep it.
31 */
32 int ring_buffer_print_entry_header(struct trace_seq *s)
33 {
34 int ret;
35
36 ret = trace_seq_printf(s, "# compressed entry header\n");
37 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
38 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
39 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
40 ret = trace_seq_printf(s, "\n");
41 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
42 RINGBUF_TYPE_PADDING);
43 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
44 RINGBUF_TYPE_TIME_EXTEND);
45 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
46 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
47
48 return ret;
49 }
50
51 /*
52 * The ring buffer is made up of a list of pages. A separate list of pages is
53 * allocated for each CPU. A writer may only write to a buffer that is
54 * associated with the CPU it is currently executing on. A reader may read
55 * from any per cpu buffer.
56 *
57 * The reader is special. For each per cpu buffer, the reader has its own
58 * reader page. When a reader has read the entire reader page, this reader
59 * page is swapped with another page in the ring buffer.
60 *
61 * Now, as long as the writer is off the reader page, the reader can do what
62 * ever it wants with that page. The writer will never write to that page
63 * again (as long as it is out of the ring buffer).
64 *
65 * Here's some silly ASCII art.
66 *
67 * +------+
68 * |reader| RING BUFFER
69 * |page |
70 * +------+ +---+ +---+ +---+
71 * | |-->| |-->| |
72 * +---+ +---+ +---+
73 * ^ |
74 * | |
75 * +---------------+
76 *
77 *
78 * +------+
79 * |reader| RING BUFFER
80 * |page |------------------v
81 * +------+ +---+ +---+ +---+
82 * | |-->| |-->| |
83 * +---+ +---+ +---+
84 * ^ |
85 * | |
86 * +---------------+
87 *
88 *
89 * +------+
90 * |reader| RING BUFFER
91 * |page |------------------v
92 * +------+ +---+ +---+ +---+
93 * ^ | |-->| |-->| |
94 * | +---+ +---+ +---+
95 * | |
96 * | |
97 * +------------------------------+
98 *
99 *
100 * +------+
101 * |buffer| RING BUFFER
102 * |page |------------------v
103 * +------+ +---+ +---+ +---+
104 * ^ | | | |-->| |
105 * | New +---+ +---+ +---+
106 * | Reader------^ |
107 * | page |
108 * +------------------------------+
109 *
110 *
111 * After we make this swap, the reader can hand this page off to the splice
112 * code and be done with it. It can even allocate a new page if it needs to
113 * and swap that into the ring buffer.
114 *
115 * We will be using cmpxchg soon to make all this lockless.
116 *
117 */
118
119 /*
120 * A fast way to enable or disable all ring buffers is to
121 * call tracing_on or tracing_off. Turning off the ring buffers
122 * prevents all ring buffers from being recorded to.
123 * Turning this switch on, makes it OK to write to the
124 * ring buffer, if the ring buffer is enabled itself.
125 *
126 * There's three layers that must be on in order to write
127 * to the ring buffer.
128 *
129 * 1) This global flag must be set.
130 * 2) The ring buffer must be enabled for recording.
131 * 3) The per cpu buffer must be enabled for recording.
132 *
133 * In case of an anomaly, this global flag has a bit set that
134 * will permantly disable all ring buffers.
135 */
136
137 /*
138 * Global flag to disable all recording to ring buffers
139 * This has two bits: ON, DISABLED
140 *
141 * ON DISABLED
142 * ---- ----------
143 * 0 0 : ring buffers are off
144 * 1 0 : ring buffers are on
145 * X 1 : ring buffers are permanently disabled
146 */
147
148 enum {
149 RB_BUFFERS_ON_BIT = 0,
150 RB_BUFFERS_DISABLED_BIT = 1,
151 };
152
153 enum {
154 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
155 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
156 };
157
158 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
159
160 /* Used for individual buffers (after the counter) */
161 #define RB_BUFFER_OFF (1 << 20)
162
163 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
164
165 /**
166 * tracing_off_permanent - permanently disable ring buffers
167 *
168 * This function, once called, will disable all ring buffers
169 * permanently.
170 */
171 void tracing_off_permanent(void)
172 {
173 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
174 }
175
176 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
177 #define RB_ALIGNMENT 4U
178 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
179 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
180
181 #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
182 # define RB_FORCE_8BYTE_ALIGNMENT 0
183 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
184 #else
185 # define RB_FORCE_8BYTE_ALIGNMENT 1
186 # define RB_ARCH_ALIGNMENT 8U
187 #endif
188
189 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
190 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
191
192 enum {
193 RB_LEN_TIME_EXTEND = 8,
194 RB_LEN_TIME_STAMP = 16,
195 };
196
197 #define skip_time_extend(event) \
198 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
199
200 static inline int rb_null_event(struct ring_buffer_event *event)
201 {
202 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
203 }
204
205 static void rb_event_set_padding(struct ring_buffer_event *event)
206 {
207 /* padding has a NULL time_delta */
208 event->type_len = RINGBUF_TYPE_PADDING;
209 event->time_delta = 0;
210 }
211
212 static unsigned
213 rb_event_data_length(struct ring_buffer_event *event)
214 {
215 unsigned length;
216
217 if (event->type_len)
218 length = event->type_len * RB_ALIGNMENT;
219 else
220 length = event->array[0];
221 return length + RB_EVNT_HDR_SIZE;
222 }
223
224 /*
225 * Return the length of the given event. Will return
226 * the length of the time extend if the event is a
227 * time extend.
228 */
229 static inline unsigned
230 rb_event_length(struct ring_buffer_event *event)
231 {
232 switch (event->type_len) {
233 case RINGBUF_TYPE_PADDING:
234 if (rb_null_event(event))
235 /* undefined */
236 return -1;
237 return event->array[0] + RB_EVNT_HDR_SIZE;
238
239 case RINGBUF_TYPE_TIME_EXTEND:
240 return RB_LEN_TIME_EXTEND;
241
242 case RINGBUF_TYPE_TIME_STAMP:
243 return RB_LEN_TIME_STAMP;
244
245 case RINGBUF_TYPE_DATA:
246 return rb_event_data_length(event);
247 default:
248 BUG();
249 }
250 /* not hit */
251 return 0;
252 }
253
254 /*
255 * Return total length of time extend and data,
256 * or just the event length for all other events.
257 */
258 static inline unsigned
259 rb_event_ts_length(struct ring_buffer_event *event)
260 {
261 unsigned len = 0;
262
263 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
264 /* time extends include the data event after it */
265 len = RB_LEN_TIME_EXTEND;
266 event = skip_time_extend(event);
267 }
268 return len + rb_event_length(event);
269 }
270
271 /**
272 * ring_buffer_event_length - return the length of the event
273 * @event: the event to get the length of
274 *
275 * Returns the size of the data load of a data event.
276 * If the event is something other than a data event, it
277 * returns the size of the event itself. With the exception
278 * of a TIME EXTEND, where it still returns the size of the
279 * data load of the data event after it.
280 */
281 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
282 {
283 unsigned length;
284
285 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
286 event = skip_time_extend(event);
287
288 length = rb_event_length(event);
289 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
290 return length;
291 length -= RB_EVNT_HDR_SIZE;
292 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
293 length -= sizeof(event->array[0]);
294 return length;
295 }
296 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
297
298 /* inline for ring buffer fast paths */
299 static void *
300 rb_event_data(struct ring_buffer_event *event)
301 {
302 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
303 event = skip_time_extend(event);
304 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
305 /* If length is in len field, then array[0] has the data */
306 if (event->type_len)
307 return (void *)&event->array[0];
308 /* Otherwise length is in array[0] and array[1] has the data */
309 return (void *)&event->array[1];
310 }
311
312 /**
313 * ring_buffer_event_data - return the data of the event
314 * @event: the event to get the data from
315 */
316 void *ring_buffer_event_data(struct ring_buffer_event *event)
317 {
318 return rb_event_data(event);
319 }
320 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
321
322 #define for_each_buffer_cpu(buffer, cpu) \
323 for_each_cpu(cpu, buffer->cpumask)
324
325 #define TS_SHIFT 27
326 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
327 #define TS_DELTA_TEST (~TS_MASK)
328
329 /* Flag when events were overwritten */
330 #define RB_MISSED_EVENTS (1 << 31)
331 /* Missed count stored at end */
332 #define RB_MISSED_STORED (1 << 30)
333
334 struct buffer_data_page {
335 u64 time_stamp; /* page time stamp */
336 local_t commit; /* write committed index */
337 unsigned char data[]; /* data of buffer page */
338 };
339
340 /*
341 * Note, the buffer_page list must be first. The buffer pages
342 * are allocated in cache lines, which means that each buffer
343 * page will be at the beginning of a cache line, and thus
344 * the least significant bits will be zero. We use this to
345 * add flags in the list struct pointers, to make the ring buffer
346 * lockless.
347 */
348 struct buffer_page {
349 struct list_head list; /* list of buffer pages */
350 local_t write; /* index for next write */
351 unsigned read; /* index for next read */
352 local_t entries; /* entries on this page */
353 unsigned long real_end; /* real end of data */
354 struct buffer_data_page *page; /* Actual data page */
355 };
356
357 /*
358 * The buffer page counters, write and entries, must be reset
359 * atomically when crossing page boundaries. To synchronize this
360 * update, two counters are inserted into the number. One is
361 * the actual counter for the write position or count on the page.
362 *
363 * The other is a counter of updaters. Before an update happens
364 * the update partition of the counter is incremented. This will
365 * allow the updater to update the counter atomically.
366 *
367 * The counter is 20 bits, and the state data is 12.
368 */
369 #define RB_WRITE_MASK 0xfffff
370 #define RB_WRITE_INTCNT (1 << 20)
371
372 static void rb_init_page(struct buffer_data_page *bpage)
373 {
374 local_set(&bpage->commit, 0);
375 }
376
377 /**
378 * ring_buffer_page_len - the size of data on the page.
379 * @page: The page to read
380 *
381 * Returns the amount of data on the page, including buffer page header.
382 */
383 size_t ring_buffer_page_len(void *page)
384 {
385 return local_read(&((struct buffer_data_page *)page)->commit)
386 + BUF_PAGE_HDR_SIZE;
387 }
388
389 /*
390 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
391 * this issue out.
392 */
393 static void free_buffer_page(struct buffer_page *bpage)
394 {
395 free_page((unsigned long)bpage->page);
396 kfree(bpage);
397 }
398
399 /*
400 * We need to fit the time_stamp delta into 27 bits.
401 */
402 static inline int test_time_stamp(u64 delta)
403 {
404 if (delta & TS_DELTA_TEST)
405 return 1;
406 return 0;
407 }
408
409 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
410
411 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
412 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
413
414 int ring_buffer_print_page_header(struct trace_seq *s)
415 {
416 struct buffer_data_page field;
417 int ret;
418
419 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
420 "offset:0;\tsize:%u;\tsigned:%u;\n",
421 (unsigned int)sizeof(field.time_stamp),
422 (unsigned int)is_signed_type(u64));
423
424 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
425 "offset:%u;\tsize:%u;\tsigned:%u;\n",
426 (unsigned int)offsetof(typeof(field), commit),
427 (unsigned int)sizeof(field.commit),
428 (unsigned int)is_signed_type(long));
429
430 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
431 "offset:%u;\tsize:%u;\tsigned:%u;\n",
432 (unsigned int)offsetof(typeof(field), commit),
433 1,
434 (unsigned int)is_signed_type(long));
435
436 ret = trace_seq_printf(s, "\tfield: char data;\t"
437 "offset:%u;\tsize:%u;\tsigned:%u;\n",
438 (unsigned int)offsetof(typeof(field), data),
439 (unsigned int)BUF_PAGE_SIZE,
440 (unsigned int)is_signed_type(char));
441
442 return ret;
443 }
444
445 /*
446 * head_page == tail_page && head == tail then buffer is empty.
447 */
448 struct ring_buffer_per_cpu {
449 int cpu;
450 atomic_t record_disabled;
451 struct ring_buffer *buffer;
452 raw_spinlock_t reader_lock; /* serialize readers */
453 arch_spinlock_t lock;
454 struct lock_class_key lock_key;
455 unsigned int nr_pages;
456 struct list_head *pages;
457 struct buffer_page *head_page; /* read from head */
458 struct buffer_page *tail_page; /* write to tail */
459 struct buffer_page *commit_page; /* committed pages */
460 struct buffer_page *reader_page;
461 unsigned long lost_events;
462 unsigned long last_overrun;
463 local_t entries_bytes;
464 local_t entries;
465 local_t overrun;
466 local_t commit_overrun;
467 local_t dropped_events;
468 local_t committing;
469 local_t commits;
470 unsigned long read;
471 unsigned long read_bytes;
472 u64 write_stamp;
473 u64 read_stamp;
474 /* ring buffer pages to update, > 0 to add, < 0 to remove */
475 int nr_pages_to_update;
476 struct list_head new_pages; /* new pages to add */
477 struct work_struct update_pages_work;
478 struct completion update_done;
479 };
480
481 struct ring_buffer {
482 unsigned flags;
483 int cpus;
484 atomic_t record_disabled;
485 atomic_t resize_disabled;
486 cpumask_var_t cpumask;
487
488 struct lock_class_key *reader_lock_key;
489
490 struct mutex mutex;
491
492 struct ring_buffer_per_cpu **buffers;
493
494 #ifdef CONFIG_HOTPLUG_CPU
495 struct notifier_block cpu_notify;
496 #endif
497 u64 (*clock)(void);
498 };
499
500 struct ring_buffer_iter {
501 struct ring_buffer_per_cpu *cpu_buffer;
502 unsigned long head;
503 struct buffer_page *head_page;
504 struct buffer_page *cache_reader_page;
505 unsigned long cache_read;
506 u64 read_stamp;
507 };
508
509 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
510 #define RB_WARN_ON(b, cond) \
511 ({ \
512 int _____ret = unlikely(cond); \
513 if (_____ret) { \
514 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
515 struct ring_buffer_per_cpu *__b = \
516 (void *)b; \
517 atomic_inc(&__b->buffer->record_disabled); \
518 } else \
519 atomic_inc(&b->record_disabled); \
520 WARN_ON(1); \
521 } \
522 _____ret; \
523 })
524
525 /* Up this if you want to test the TIME_EXTENTS and normalization */
526 #define DEBUG_SHIFT 0
527
528 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
529 {
530 /* shift to debug/test normalization and TIME_EXTENTS */
531 return buffer->clock() << DEBUG_SHIFT;
532 }
533
534 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
535 {
536 u64 time;
537
538 preempt_disable_notrace();
539 time = rb_time_stamp(buffer);
540 preempt_enable_no_resched_notrace();
541
542 return time;
543 }
544 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
545
546 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
547 int cpu, u64 *ts)
548 {
549 /* Just stupid testing the normalize function and deltas */
550 *ts >>= DEBUG_SHIFT;
551 }
552 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
553
554 /*
555 * Making the ring buffer lockless makes things tricky.
556 * Although writes only happen on the CPU that they are on,
557 * and they only need to worry about interrupts. Reads can
558 * happen on any CPU.
559 *
560 * The reader page is always off the ring buffer, but when the
561 * reader finishes with a page, it needs to swap its page with
562 * a new one from the buffer. The reader needs to take from
563 * the head (writes go to the tail). But if a writer is in overwrite
564 * mode and wraps, it must push the head page forward.
565 *
566 * Here lies the problem.
567 *
568 * The reader must be careful to replace only the head page, and
569 * not another one. As described at the top of the file in the
570 * ASCII art, the reader sets its old page to point to the next
571 * page after head. It then sets the page after head to point to
572 * the old reader page. But if the writer moves the head page
573 * during this operation, the reader could end up with the tail.
574 *
575 * We use cmpxchg to help prevent this race. We also do something
576 * special with the page before head. We set the LSB to 1.
577 *
578 * When the writer must push the page forward, it will clear the
579 * bit that points to the head page, move the head, and then set
580 * the bit that points to the new head page.
581 *
582 * We also don't want an interrupt coming in and moving the head
583 * page on another writer. Thus we use the second LSB to catch
584 * that too. Thus:
585 *
586 * head->list->prev->next bit 1 bit 0
587 * ------- -------
588 * Normal page 0 0
589 * Points to head page 0 1
590 * New head page 1 0
591 *
592 * Note we can not trust the prev pointer of the head page, because:
593 *
594 * +----+ +-----+ +-----+
595 * | |------>| T |---X--->| N |
596 * | |<------| | | |
597 * +----+ +-----+ +-----+
598 * ^ ^ |
599 * | +-----+ | |
600 * +----------| R |----------+ |
601 * | |<-----------+
602 * +-----+
603 *
604 * Key: ---X--> HEAD flag set in pointer
605 * T Tail page
606 * R Reader page
607 * N Next page
608 *
609 * (see __rb_reserve_next() to see where this happens)
610 *
611 * What the above shows is that the reader just swapped out
612 * the reader page with a page in the buffer, but before it
613 * could make the new header point back to the new page added
614 * it was preempted by a writer. The writer moved forward onto
615 * the new page added by the reader and is about to move forward
616 * again.
617 *
618 * You can see, it is legitimate for the previous pointer of
619 * the head (or any page) not to point back to itself. But only
620 * temporarially.
621 */
622
623 #define RB_PAGE_NORMAL 0UL
624 #define RB_PAGE_HEAD 1UL
625 #define RB_PAGE_UPDATE 2UL
626
627
628 #define RB_FLAG_MASK 3UL
629
630 /* PAGE_MOVED is not part of the mask */
631 #define RB_PAGE_MOVED 4UL
632
633 /*
634 * rb_list_head - remove any bit
635 */
636 static struct list_head *rb_list_head(struct list_head *list)
637 {
638 unsigned long val = (unsigned long)list;
639
640 return (struct list_head *)(val & ~RB_FLAG_MASK);
641 }
642
643 /*
644 * rb_is_head_page - test if the given page is the head page
645 *
646 * Because the reader may move the head_page pointer, we can
647 * not trust what the head page is (it may be pointing to
648 * the reader page). But if the next page is a header page,
649 * its flags will be non zero.
650 */
651 static inline int
652 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
653 struct buffer_page *page, struct list_head *list)
654 {
655 unsigned long val;
656
657 val = (unsigned long)list->next;
658
659 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
660 return RB_PAGE_MOVED;
661
662 return val & RB_FLAG_MASK;
663 }
664
665 /*
666 * rb_is_reader_page
667 *
668 * The unique thing about the reader page, is that, if the
669 * writer is ever on it, the previous pointer never points
670 * back to the reader page.
671 */
672 static int rb_is_reader_page(struct buffer_page *page)
673 {
674 struct list_head *list = page->list.prev;
675
676 return rb_list_head(list->next) != &page->list;
677 }
678
679 /*
680 * rb_set_list_to_head - set a list_head to be pointing to head.
681 */
682 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
683 struct list_head *list)
684 {
685 unsigned long *ptr;
686
687 ptr = (unsigned long *)&list->next;
688 *ptr |= RB_PAGE_HEAD;
689 *ptr &= ~RB_PAGE_UPDATE;
690 }
691
692 /*
693 * rb_head_page_activate - sets up head page
694 */
695 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
696 {
697 struct buffer_page *head;
698
699 head = cpu_buffer->head_page;
700 if (!head)
701 return;
702
703 /*
704 * Set the previous list pointer to have the HEAD flag.
705 */
706 rb_set_list_to_head(cpu_buffer, head->list.prev);
707 }
708
709 static void rb_list_head_clear(struct list_head *list)
710 {
711 unsigned long *ptr = (unsigned long *)&list->next;
712
713 *ptr &= ~RB_FLAG_MASK;
714 }
715
716 /*
717 * rb_head_page_dactivate - clears head page ptr (for free list)
718 */
719 static void
720 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
721 {
722 struct list_head *hd;
723
724 /* Go through the whole list and clear any pointers found. */
725 rb_list_head_clear(cpu_buffer->pages);
726
727 list_for_each(hd, cpu_buffer->pages)
728 rb_list_head_clear(hd);
729 }
730
731 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
732 struct buffer_page *head,
733 struct buffer_page *prev,
734 int old_flag, int new_flag)
735 {
736 struct list_head *list;
737 unsigned long val = (unsigned long)&head->list;
738 unsigned long ret;
739
740 list = &prev->list;
741
742 val &= ~RB_FLAG_MASK;
743
744 ret = cmpxchg((unsigned long *)&list->next,
745 val | old_flag, val | new_flag);
746
747 /* check if the reader took the page */
748 if ((ret & ~RB_FLAG_MASK) != val)
749 return RB_PAGE_MOVED;
750
751 return ret & RB_FLAG_MASK;
752 }
753
754 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
755 struct buffer_page *head,
756 struct buffer_page *prev,
757 int old_flag)
758 {
759 return rb_head_page_set(cpu_buffer, head, prev,
760 old_flag, RB_PAGE_UPDATE);
761 }
762
763 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
764 struct buffer_page *head,
765 struct buffer_page *prev,
766 int old_flag)
767 {
768 return rb_head_page_set(cpu_buffer, head, prev,
769 old_flag, RB_PAGE_HEAD);
770 }
771
772 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
773 struct buffer_page *head,
774 struct buffer_page *prev,
775 int old_flag)
776 {
777 return rb_head_page_set(cpu_buffer, head, prev,
778 old_flag, RB_PAGE_NORMAL);
779 }
780
781 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
782 struct buffer_page **bpage)
783 {
784 struct list_head *p = rb_list_head((*bpage)->list.next);
785
786 *bpage = list_entry(p, struct buffer_page, list);
787 }
788
789 static struct buffer_page *
790 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
791 {
792 struct buffer_page *head;
793 struct buffer_page *page;
794 struct list_head *list;
795 int i;
796
797 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
798 return NULL;
799
800 /* sanity check */
801 list = cpu_buffer->pages;
802 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
803 return NULL;
804
805 page = head = cpu_buffer->head_page;
806 /*
807 * It is possible that the writer moves the header behind
808 * where we started, and we miss in one loop.
809 * A second loop should grab the header, but we'll do
810 * three loops just because I'm paranoid.
811 */
812 for (i = 0; i < 3; i++) {
813 do {
814 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
815 cpu_buffer->head_page = page;
816 return page;
817 }
818 rb_inc_page(cpu_buffer, &page);
819 } while (page != head);
820 }
821
822 RB_WARN_ON(cpu_buffer, 1);
823
824 return NULL;
825 }
826
827 static int rb_head_page_replace(struct buffer_page *old,
828 struct buffer_page *new)
829 {
830 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
831 unsigned long val;
832 unsigned long ret;
833
834 val = *ptr & ~RB_FLAG_MASK;
835 val |= RB_PAGE_HEAD;
836
837 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
838
839 return ret == val;
840 }
841
842 /*
843 * rb_tail_page_update - move the tail page forward
844 *
845 * Returns 1 if moved tail page, 0 if someone else did.
846 */
847 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
848 struct buffer_page *tail_page,
849 struct buffer_page *next_page)
850 {
851 struct buffer_page *old_tail;
852 unsigned long old_entries;
853 unsigned long old_write;
854 int ret = 0;
855
856 /*
857 * The tail page now needs to be moved forward.
858 *
859 * We need to reset the tail page, but without messing
860 * with possible erasing of data brought in by interrupts
861 * that have moved the tail page and are currently on it.
862 *
863 * We add a counter to the write field to denote this.
864 */
865 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
866 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
867
868 /*
869 * Just make sure we have seen our old_write and synchronize
870 * with any interrupts that come in.
871 */
872 barrier();
873
874 /*
875 * If the tail page is still the same as what we think
876 * it is, then it is up to us to update the tail
877 * pointer.
878 */
879 if (tail_page == cpu_buffer->tail_page) {
880 /* Zero the write counter */
881 unsigned long val = old_write & ~RB_WRITE_MASK;
882 unsigned long eval = old_entries & ~RB_WRITE_MASK;
883
884 /*
885 * This will only succeed if an interrupt did
886 * not come in and change it. In which case, we
887 * do not want to modify it.
888 *
889 * We add (void) to let the compiler know that we do not care
890 * about the return value of these functions. We use the
891 * cmpxchg to only update if an interrupt did not already
892 * do it for us. If the cmpxchg fails, we don't care.
893 */
894 (void)local_cmpxchg(&next_page->write, old_write, val);
895 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
896
897 /*
898 * No need to worry about races with clearing out the commit.
899 * it only can increment when a commit takes place. But that
900 * only happens in the outer most nested commit.
901 */
902 local_set(&next_page->page->commit, 0);
903
904 old_tail = cmpxchg(&cpu_buffer->tail_page,
905 tail_page, next_page);
906
907 if (old_tail == tail_page)
908 ret = 1;
909 }
910
911 return ret;
912 }
913
914 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
915 struct buffer_page *bpage)
916 {
917 unsigned long val = (unsigned long)bpage;
918
919 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
920 return 1;
921
922 return 0;
923 }
924
925 /**
926 * rb_check_list - make sure a pointer to a list has the last bits zero
927 */
928 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
929 struct list_head *list)
930 {
931 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
932 return 1;
933 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
934 return 1;
935 return 0;
936 }
937
938 /**
939 * check_pages - integrity check of buffer pages
940 * @cpu_buffer: CPU buffer with pages to test
941 *
942 * As a safety measure we check to make sure the data pages have not
943 * been corrupted.
944 */
945 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
946 {
947 struct list_head *head = cpu_buffer->pages;
948 struct buffer_page *bpage, *tmp;
949
950 /* Reset the head page if it exists */
951 if (cpu_buffer->head_page)
952 rb_set_head_page(cpu_buffer);
953
954 rb_head_page_deactivate(cpu_buffer);
955
956 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
957 return -1;
958 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
959 return -1;
960
961 if (rb_check_list(cpu_buffer, head))
962 return -1;
963
964 list_for_each_entry_safe(bpage, tmp, head, list) {
965 if (RB_WARN_ON(cpu_buffer,
966 bpage->list.next->prev != &bpage->list))
967 return -1;
968 if (RB_WARN_ON(cpu_buffer,
969 bpage->list.prev->next != &bpage->list))
970 return -1;
971 if (rb_check_list(cpu_buffer, &bpage->list))
972 return -1;
973 }
974
975 rb_head_page_activate(cpu_buffer);
976
977 return 0;
978 }
979
980 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
981 {
982 int i;
983 struct buffer_page *bpage, *tmp;
984
985 for (i = 0; i < nr_pages; i++) {
986 struct page *page;
987 /*
988 * __GFP_NORETRY flag makes sure that the allocation fails
989 * gracefully without invoking oom-killer and the system is
990 * not destabilized.
991 */
992 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
993 GFP_KERNEL | __GFP_NORETRY,
994 cpu_to_node(cpu));
995 if (!bpage)
996 goto free_pages;
997
998 list_add(&bpage->list, pages);
999
1000 page = alloc_pages_node(cpu_to_node(cpu),
1001 GFP_KERNEL | __GFP_NORETRY, 0);
1002 if (!page)
1003 goto free_pages;
1004 bpage->page = page_address(page);
1005 rb_init_page(bpage->page);
1006 }
1007
1008 return 0;
1009
1010 free_pages:
1011 list_for_each_entry_safe(bpage, tmp, pages, list) {
1012 list_del_init(&bpage->list);
1013 free_buffer_page(bpage);
1014 }
1015
1016 return -ENOMEM;
1017 }
1018
1019 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1020 unsigned nr_pages)
1021 {
1022 LIST_HEAD(pages);
1023
1024 WARN_ON(!nr_pages);
1025
1026 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1027 return -ENOMEM;
1028
1029 /*
1030 * The ring buffer page list is a circular list that does not
1031 * start and end with a list head. All page list items point to
1032 * other pages.
1033 */
1034 cpu_buffer->pages = pages.next;
1035 list_del(&pages);
1036
1037 cpu_buffer->nr_pages = nr_pages;
1038
1039 rb_check_pages(cpu_buffer);
1040
1041 return 0;
1042 }
1043
1044 static struct ring_buffer_per_cpu *
1045 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1046 {
1047 struct ring_buffer_per_cpu *cpu_buffer;
1048 struct buffer_page *bpage;
1049 struct page *page;
1050 int ret;
1051
1052 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1053 GFP_KERNEL, cpu_to_node(cpu));
1054 if (!cpu_buffer)
1055 return NULL;
1056
1057 cpu_buffer->cpu = cpu;
1058 cpu_buffer->buffer = buffer;
1059 raw_spin_lock_init(&cpu_buffer->reader_lock);
1060 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1061 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1062 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1063 init_completion(&cpu_buffer->update_done);
1064
1065 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1066 GFP_KERNEL, cpu_to_node(cpu));
1067 if (!bpage)
1068 goto fail_free_buffer;
1069
1070 rb_check_bpage(cpu_buffer, bpage);
1071
1072 cpu_buffer->reader_page = bpage;
1073 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1074 if (!page)
1075 goto fail_free_reader;
1076 bpage->page = page_address(page);
1077 rb_init_page(bpage->page);
1078
1079 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1080 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1081
1082 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1083 if (ret < 0)
1084 goto fail_free_reader;
1085
1086 cpu_buffer->head_page
1087 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1088 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1089
1090 rb_head_page_activate(cpu_buffer);
1091
1092 return cpu_buffer;
1093
1094 fail_free_reader:
1095 free_buffer_page(cpu_buffer->reader_page);
1096
1097 fail_free_buffer:
1098 kfree(cpu_buffer);
1099 return NULL;
1100 }
1101
1102 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1103 {
1104 struct list_head *head = cpu_buffer->pages;
1105 struct buffer_page *bpage, *tmp;
1106
1107 free_buffer_page(cpu_buffer->reader_page);
1108
1109 rb_head_page_deactivate(cpu_buffer);
1110
1111 if (head) {
1112 list_for_each_entry_safe(bpage, tmp, head, list) {
1113 list_del_init(&bpage->list);
1114 free_buffer_page(bpage);
1115 }
1116 bpage = list_entry(head, struct buffer_page, list);
1117 free_buffer_page(bpage);
1118 }
1119
1120 kfree(cpu_buffer);
1121 }
1122
1123 #ifdef CONFIG_HOTPLUG_CPU
1124 static int rb_cpu_notify(struct notifier_block *self,
1125 unsigned long action, void *hcpu);
1126 #endif
1127
1128 /**
1129 * ring_buffer_alloc - allocate a new ring_buffer
1130 * @size: the size in bytes per cpu that is needed.
1131 * @flags: attributes to set for the ring buffer.
1132 *
1133 * Currently the only flag that is available is the RB_FL_OVERWRITE
1134 * flag. This flag means that the buffer will overwrite old data
1135 * when the buffer wraps. If this flag is not set, the buffer will
1136 * drop data when the tail hits the head.
1137 */
1138 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1139 struct lock_class_key *key)
1140 {
1141 struct ring_buffer *buffer;
1142 int bsize;
1143 int cpu, nr_pages;
1144
1145 /* keep it in its own cache line */
1146 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1147 GFP_KERNEL);
1148 if (!buffer)
1149 return NULL;
1150
1151 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1152 goto fail_free_buffer;
1153
1154 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1155 buffer->flags = flags;
1156 buffer->clock = trace_clock_local;
1157 buffer->reader_lock_key = key;
1158
1159 /* need at least two pages */
1160 if (nr_pages < 2)
1161 nr_pages = 2;
1162
1163 /*
1164 * In case of non-hotplug cpu, if the ring-buffer is allocated
1165 * in early initcall, it will not be notified of secondary cpus.
1166 * In that off case, we need to allocate for all possible cpus.
1167 */
1168 #ifdef CONFIG_HOTPLUG_CPU
1169 get_online_cpus();
1170 cpumask_copy(buffer->cpumask, cpu_online_mask);
1171 #else
1172 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1173 #endif
1174 buffer->cpus = nr_cpu_ids;
1175
1176 bsize = sizeof(void *) * nr_cpu_ids;
1177 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1178 GFP_KERNEL);
1179 if (!buffer->buffers)
1180 goto fail_free_cpumask;
1181
1182 for_each_buffer_cpu(buffer, cpu) {
1183 buffer->buffers[cpu] =
1184 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1185 if (!buffer->buffers[cpu])
1186 goto fail_free_buffers;
1187 }
1188
1189 #ifdef CONFIG_HOTPLUG_CPU
1190 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1191 buffer->cpu_notify.priority = 0;
1192 register_cpu_notifier(&buffer->cpu_notify);
1193 #endif
1194
1195 put_online_cpus();
1196 mutex_init(&buffer->mutex);
1197
1198 return buffer;
1199
1200 fail_free_buffers:
1201 for_each_buffer_cpu(buffer, cpu) {
1202 if (buffer->buffers[cpu])
1203 rb_free_cpu_buffer(buffer->buffers[cpu]);
1204 }
1205 kfree(buffer->buffers);
1206
1207 fail_free_cpumask:
1208 free_cpumask_var(buffer->cpumask);
1209 put_online_cpus();
1210
1211 fail_free_buffer:
1212 kfree(buffer);
1213 return NULL;
1214 }
1215 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1216
1217 /**
1218 * ring_buffer_free - free a ring buffer.
1219 * @buffer: the buffer to free.
1220 */
1221 void
1222 ring_buffer_free(struct ring_buffer *buffer)
1223 {
1224 int cpu;
1225
1226 get_online_cpus();
1227
1228 #ifdef CONFIG_HOTPLUG_CPU
1229 unregister_cpu_notifier(&buffer->cpu_notify);
1230 #endif
1231
1232 for_each_buffer_cpu(buffer, cpu)
1233 rb_free_cpu_buffer(buffer->buffers[cpu]);
1234
1235 put_online_cpus();
1236
1237 kfree(buffer->buffers);
1238 free_cpumask_var(buffer->cpumask);
1239
1240 kfree(buffer);
1241 }
1242 EXPORT_SYMBOL_GPL(ring_buffer_free);
1243
1244 void ring_buffer_set_clock(struct ring_buffer *buffer,
1245 u64 (*clock)(void))
1246 {
1247 buffer->clock = clock;
1248 }
1249
1250 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1251
1252 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1253 {
1254 return local_read(&bpage->entries) & RB_WRITE_MASK;
1255 }
1256
1257 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1258 {
1259 return local_read(&bpage->write) & RB_WRITE_MASK;
1260 }
1261
1262 static int
1263 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1264 {
1265 struct list_head *tail_page, *to_remove, *next_page;
1266 struct buffer_page *to_remove_page, *tmp_iter_page;
1267 struct buffer_page *last_page, *first_page;
1268 unsigned int nr_removed;
1269 unsigned long head_bit;
1270 int page_entries;
1271
1272 head_bit = 0;
1273
1274 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1275 atomic_inc(&cpu_buffer->record_disabled);
1276 /*
1277 * We don't race with the readers since we have acquired the reader
1278 * lock. We also don't race with writers after disabling recording.
1279 * This makes it easy to figure out the first and the last page to be
1280 * removed from the list. We unlink all the pages in between including
1281 * the first and last pages. This is done in a busy loop so that we
1282 * lose the least number of traces.
1283 * The pages are freed after we restart recording and unlock readers.
1284 */
1285 tail_page = &cpu_buffer->tail_page->list;
1286
1287 /*
1288 * tail page might be on reader page, we remove the next page
1289 * from the ring buffer
1290 */
1291 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1292 tail_page = rb_list_head(tail_page->next);
1293 to_remove = tail_page;
1294
1295 /* start of pages to remove */
1296 first_page = list_entry(rb_list_head(to_remove->next),
1297 struct buffer_page, list);
1298
1299 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1300 to_remove = rb_list_head(to_remove)->next;
1301 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1302 }
1303
1304 next_page = rb_list_head(to_remove)->next;
1305
1306 /*
1307 * Now we remove all pages between tail_page and next_page.
1308 * Make sure that we have head_bit value preserved for the
1309 * next page
1310 */
1311 tail_page->next = (struct list_head *)((unsigned long)next_page |
1312 head_bit);
1313 next_page = rb_list_head(next_page);
1314 next_page->prev = tail_page;
1315
1316 /* make sure pages points to a valid page in the ring buffer */
1317 cpu_buffer->pages = next_page;
1318
1319 /* update head page */
1320 if (head_bit)
1321 cpu_buffer->head_page = list_entry(next_page,
1322 struct buffer_page, list);
1323
1324 /*
1325 * change read pointer to make sure any read iterators reset
1326 * themselves
1327 */
1328 cpu_buffer->read = 0;
1329
1330 /* pages are removed, resume tracing and then free the pages */
1331 atomic_dec(&cpu_buffer->record_disabled);
1332 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1333
1334 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1335
1336 /* last buffer page to remove */
1337 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1338 list);
1339 tmp_iter_page = first_page;
1340
1341 do {
1342 to_remove_page = tmp_iter_page;
1343 rb_inc_page(cpu_buffer, &tmp_iter_page);
1344
1345 /* update the counters */
1346 page_entries = rb_page_entries(to_remove_page);
1347 if (page_entries) {
1348 /*
1349 * If something was added to this page, it was full
1350 * since it is not the tail page. So we deduct the
1351 * bytes consumed in ring buffer from here.
1352 * Increment overrun to account for the lost events.
1353 */
1354 local_add(page_entries, &cpu_buffer->overrun);
1355 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1356 }
1357
1358 /*
1359 * We have already removed references to this list item, just
1360 * free up the buffer_page and its page
1361 */
1362 free_buffer_page(to_remove_page);
1363 nr_removed--;
1364
1365 } while (to_remove_page != last_page);
1366
1367 RB_WARN_ON(cpu_buffer, nr_removed);
1368
1369 return nr_removed == 0;
1370 }
1371
1372 static int
1373 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1374 {
1375 struct list_head *pages = &cpu_buffer->new_pages;
1376 int retries, success;
1377
1378 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1379 /*
1380 * We are holding the reader lock, so the reader page won't be swapped
1381 * in the ring buffer. Now we are racing with the writer trying to
1382 * move head page and the tail page.
1383 * We are going to adapt the reader page update process where:
1384 * 1. We first splice the start and end of list of new pages between
1385 * the head page and its previous page.
1386 * 2. We cmpxchg the prev_page->next to point from head page to the
1387 * start of new pages list.
1388 * 3. Finally, we update the head->prev to the end of new list.
1389 *
1390 * We will try this process 10 times, to make sure that we don't keep
1391 * spinning.
1392 */
1393 retries = 10;
1394 success = 0;
1395 while (retries--) {
1396 struct list_head *head_page, *prev_page, *r;
1397 struct list_head *last_page, *first_page;
1398 struct list_head *head_page_with_bit;
1399
1400 head_page = &rb_set_head_page(cpu_buffer)->list;
1401 if (!head_page)
1402 break;
1403 prev_page = head_page->prev;
1404
1405 first_page = pages->next;
1406 last_page = pages->prev;
1407
1408 head_page_with_bit = (struct list_head *)
1409 ((unsigned long)head_page | RB_PAGE_HEAD);
1410
1411 last_page->next = head_page_with_bit;
1412 first_page->prev = prev_page;
1413
1414 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1415
1416 if (r == head_page_with_bit) {
1417 /*
1418 * yay, we replaced the page pointer to our new list,
1419 * now, we just have to update to head page's prev
1420 * pointer to point to end of list
1421 */
1422 head_page->prev = last_page;
1423 success = 1;
1424 break;
1425 }
1426 }
1427
1428 if (success)
1429 INIT_LIST_HEAD(pages);
1430 /*
1431 * If we weren't successful in adding in new pages, warn and stop
1432 * tracing
1433 */
1434 RB_WARN_ON(cpu_buffer, !success);
1435 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1436
1437 /* free pages if they weren't inserted */
1438 if (!success) {
1439 struct buffer_page *bpage, *tmp;
1440 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1441 list) {
1442 list_del_init(&bpage->list);
1443 free_buffer_page(bpage);
1444 }
1445 }
1446 return success;
1447 }
1448
1449 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1450 {
1451 int success;
1452
1453 if (cpu_buffer->nr_pages_to_update > 0)
1454 success = rb_insert_pages(cpu_buffer);
1455 else
1456 success = rb_remove_pages(cpu_buffer,
1457 -cpu_buffer->nr_pages_to_update);
1458
1459 if (success)
1460 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1461 }
1462
1463 static void update_pages_handler(struct work_struct *work)
1464 {
1465 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1466 struct ring_buffer_per_cpu, update_pages_work);
1467 rb_update_pages(cpu_buffer);
1468 complete(&cpu_buffer->update_done);
1469 }
1470
1471 /**
1472 * ring_buffer_resize - resize the ring buffer
1473 * @buffer: the buffer to resize.
1474 * @size: the new size.
1475 *
1476 * Minimum size is 2 * BUF_PAGE_SIZE.
1477 *
1478 * Returns 0 on success and < 0 on failure.
1479 */
1480 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1481 int cpu_id)
1482 {
1483 struct ring_buffer_per_cpu *cpu_buffer;
1484 unsigned nr_pages;
1485 int cpu, err = 0;
1486
1487 /*
1488 * Always succeed at resizing a non-existent buffer:
1489 */
1490 if (!buffer)
1491 return size;
1492
1493 /* Make sure the requested buffer exists */
1494 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1495 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1496 return size;
1497
1498 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1499 size *= BUF_PAGE_SIZE;
1500
1501 /* we need a minimum of two pages */
1502 if (size < BUF_PAGE_SIZE * 2)
1503 size = BUF_PAGE_SIZE * 2;
1504
1505 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1506
1507 /*
1508 * Don't succeed if resizing is disabled, as a reader might be
1509 * manipulating the ring buffer and is expecting a sane state while
1510 * this is true.
1511 */
1512 if (atomic_read(&buffer->resize_disabled))
1513 return -EBUSY;
1514
1515 /* prevent another thread from changing buffer sizes */
1516 mutex_lock(&buffer->mutex);
1517
1518 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1519 /* calculate the pages to update */
1520 for_each_buffer_cpu(buffer, cpu) {
1521 cpu_buffer = buffer->buffers[cpu];
1522
1523 cpu_buffer->nr_pages_to_update = nr_pages -
1524 cpu_buffer->nr_pages;
1525 /*
1526 * nothing more to do for removing pages or no update
1527 */
1528 if (cpu_buffer->nr_pages_to_update <= 0)
1529 continue;
1530 /*
1531 * to add pages, make sure all new pages can be
1532 * allocated without receiving ENOMEM
1533 */
1534 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1535 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1536 &cpu_buffer->new_pages, cpu)) {
1537 /* not enough memory for new pages */
1538 err = -ENOMEM;
1539 goto out_err;
1540 }
1541 }
1542
1543 get_online_cpus();
1544 /*
1545 * Fire off all the required work handlers
1546 * We can't schedule on offline CPUs, but it's not necessary
1547 * since we can change their buffer sizes without any race.
1548 */
1549 for_each_buffer_cpu(buffer, cpu) {
1550 cpu_buffer = buffer->buffers[cpu];
1551 if (!cpu_buffer->nr_pages_to_update)
1552 continue;
1553
1554 if (cpu_online(cpu))
1555 schedule_work_on(cpu,
1556 &cpu_buffer->update_pages_work);
1557 else
1558 rb_update_pages(cpu_buffer);
1559 }
1560
1561 /* wait for all the updates to complete */
1562 for_each_buffer_cpu(buffer, cpu) {
1563 cpu_buffer = buffer->buffers[cpu];
1564 if (!cpu_buffer->nr_pages_to_update)
1565 continue;
1566
1567 if (cpu_online(cpu))
1568 wait_for_completion(&cpu_buffer->update_done);
1569 cpu_buffer->nr_pages_to_update = 0;
1570 }
1571
1572 put_online_cpus();
1573 } else {
1574 /* Make sure this CPU has been intitialized */
1575 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1576 goto out;
1577
1578 cpu_buffer = buffer->buffers[cpu_id];
1579
1580 if (nr_pages == cpu_buffer->nr_pages)
1581 goto out;
1582
1583 cpu_buffer->nr_pages_to_update = nr_pages -
1584 cpu_buffer->nr_pages;
1585
1586 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1587 if (cpu_buffer->nr_pages_to_update > 0 &&
1588 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1589 &cpu_buffer->new_pages, cpu_id)) {
1590 err = -ENOMEM;
1591 goto out_err;
1592 }
1593
1594 get_online_cpus();
1595
1596 if (cpu_online(cpu_id)) {
1597 schedule_work_on(cpu_id,
1598 &cpu_buffer->update_pages_work);
1599 wait_for_completion(&cpu_buffer->update_done);
1600 } else
1601 rb_update_pages(cpu_buffer);
1602
1603 cpu_buffer->nr_pages_to_update = 0;
1604 put_online_cpus();
1605 }
1606
1607 out:
1608 /*
1609 * The ring buffer resize can happen with the ring buffer
1610 * enabled, so that the update disturbs the tracing as little
1611 * as possible. But if the buffer is disabled, we do not need
1612 * to worry about that, and we can take the time to verify
1613 * that the buffer is not corrupt.
1614 */
1615 if (atomic_read(&buffer->record_disabled)) {
1616 atomic_inc(&buffer->record_disabled);
1617 /*
1618 * Even though the buffer was disabled, we must make sure
1619 * that it is truly disabled before calling rb_check_pages.
1620 * There could have been a race between checking
1621 * record_disable and incrementing it.
1622 */
1623 synchronize_sched();
1624 for_each_buffer_cpu(buffer, cpu) {
1625 cpu_buffer = buffer->buffers[cpu];
1626 rb_check_pages(cpu_buffer);
1627 }
1628 atomic_dec(&buffer->record_disabled);
1629 }
1630
1631 mutex_unlock(&buffer->mutex);
1632 return size;
1633
1634 out_err:
1635 for_each_buffer_cpu(buffer, cpu) {
1636 struct buffer_page *bpage, *tmp;
1637
1638 cpu_buffer = buffer->buffers[cpu];
1639 cpu_buffer->nr_pages_to_update = 0;
1640
1641 if (list_empty(&cpu_buffer->new_pages))
1642 continue;
1643
1644 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1645 list) {
1646 list_del_init(&bpage->list);
1647 free_buffer_page(bpage);
1648 }
1649 }
1650 mutex_unlock(&buffer->mutex);
1651 return err;
1652 }
1653 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1654
1655 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1656 {
1657 mutex_lock(&buffer->mutex);
1658 if (val)
1659 buffer->flags |= RB_FL_OVERWRITE;
1660 else
1661 buffer->flags &= ~RB_FL_OVERWRITE;
1662 mutex_unlock(&buffer->mutex);
1663 }
1664 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1665
1666 static inline void *
1667 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1668 {
1669 return bpage->data + index;
1670 }
1671
1672 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1673 {
1674 return bpage->page->data + index;
1675 }
1676
1677 static inline struct ring_buffer_event *
1678 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1679 {
1680 return __rb_page_index(cpu_buffer->reader_page,
1681 cpu_buffer->reader_page->read);
1682 }
1683
1684 static inline struct ring_buffer_event *
1685 rb_iter_head_event(struct ring_buffer_iter *iter)
1686 {
1687 return __rb_page_index(iter->head_page, iter->head);
1688 }
1689
1690 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1691 {
1692 return local_read(&bpage->page->commit);
1693 }
1694
1695 /* Size is determined by what has been committed */
1696 static inline unsigned rb_page_size(struct buffer_page *bpage)
1697 {
1698 return rb_page_commit(bpage);
1699 }
1700
1701 static inline unsigned
1702 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1703 {
1704 return rb_page_commit(cpu_buffer->commit_page);
1705 }
1706
1707 static inline unsigned
1708 rb_event_index(struct ring_buffer_event *event)
1709 {
1710 unsigned long addr = (unsigned long)event;
1711
1712 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1713 }
1714
1715 static inline int
1716 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1717 struct ring_buffer_event *event)
1718 {
1719 unsigned long addr = (unsigned long)event;
1720 unsigned long index;
1721
1722 index = rb_event_index(event);
1723 addr &= PAGE_MASK;
1724
1725 return cpu_buffer->commit_page->page == (void *)addr &&
1726 rb_commit_index(cpu_buffer) == index;
1727 }
1728
1729 static void
1730 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1731 {
1732 unsigned long max_count;
1733
1734 /*
1735 * We only race with interrupts and NMIs on this CPU.
1736 * If we own the commit event, then we can commit
1737 * all others that interrupted us, since the interruptions
1738 * are in stack format (they finish before they come
1739 * back to us). This allows us to do a simple loop to
1740 * assign the commit to the tail.
1741 */
1742 again:
1743 max_count = cpu_buffer->nr_pages * 100;
1744
1745 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1746 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1747 return;
1748 if (RB_WARN_ON(cpu_buffer,
1749 rb_is_reader_page(cpu_buffer->tail_page)))
1750 return;
1751 local_set(&cpu_buffer->commit_page->page->commit,
1752 rb_page_write(cpu_buffer->commit_page));
1753 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1754 cpu_buffer->write_stamp =
1755 cpu_buffer->commit_page->page->time_stamp;
1756 /* add barrier to keep gcc from optimizing too much */
1757 barrier();
1758 }
1759 while (rb_commit_index(cpu_buffer) !=
1760 rb_page_write(cpu_buffer->commit_page)) {
1761
1762 local_set(&cpu_buffer->commit_page->page->commit,
1763 rb_page_write(cpu_buffer->commit_page));
1764 RB_WARN_ON(cpu_buffer,
1765 local_read(&cpu_buffer->commit_page->page->commit) &
1766 ~RB_WRITE_MASK);
1767 barrier();
1768 }
1769
1770 /* again, keep gcc from optimizing */
1771 barrier();
1772
1773 /*
1774 * If an interrupt came in just after the first while loop
1775 * and pushed the tail page forward, we will be left with
1776 * a dangling commit that will never go forward.
1777 */
1778 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1779 goto again;
1780 }
1781
1782 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1783 {
1784 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1785 cpu_buffer->reader_page->read = 0;
1786 }
1787
1788 static void rb_inc_iter(struct ring_buffer_iter *iter)
1789 {
1790 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1791
1792 /*
1793 * The iterator could be on the reader page (it starts there).
1794 * But the head could have moved, since the reader was
1795 * found. Check for this case and assign the iterator
1796 * to the head page instead of next.
1797 */
1798 if (iter->head_page == cpu_buffer->reader_page)
1799 iter->head_page = rb_set_head_page(cpu_buffer);
1800 else
1801 rb_inc_page(cpu_buffer, &iter->head_page);
1802
1803 iter->read_stamp = iter->head_page->page->time_stamp;
1804 iter->head = 0;
1805 }
1806
1807 /* Slow path, do not inline */
1808 static noinline struct ring_buffer_event *
1809 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1810 {
1811 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1812
1813 /* Not the first event on the page? */
1814 if (rb_event_index(event)) {
1815 event->time_delta = delta & TS_MASK;
1816 event->array[0] = delta >> TS_SHIFT;
1817 } else {
1818 /* nope, just zero it */
1819 event->time_delta = 0;
1820 event->array[0] = 0;
1821 }
1822
1823 return skip_time_extend(event);
1824 }
1825
1826 /**
1827 * rb_update_event - update event type and data
1828 * @event: the even to update
1829 * @type: the type of event
1830 * @length: the size of the event field in the ring buffer
1831 *
1832 * Update the type and data fields of the event. The length
1833 * is the actual size that is written to the ring buffer,
1834 * and with this, we can determine what to place into the
1835 * data field.
1836 */
1837 static void
1838 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1839 struct ring_buffer_event *event, unsigned length,
1840 int add_timestamp, u64 delta)
1841 {
1842 /* Only a commit updates the timestamp */
1843 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1844 delta = 0;
1845
1846 /*
1847 * If we need to add a timestamp, then we
1848 * add it to the start of the resevered space.
1849 */
1850 if (unlikely(add_timestamp)) {
1851 event = rb_add_time_stamp(event, delta);
1852 length -= RB_LEN_TIME_EXTEND;
1853 delta = 0;
1854 }
1855
1856 event->time_delta = delta;
1857 length -= RB_EVNT_HDR_SIZE;
1858 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1859 event->type_len = 0;
1860 event->array[0] = length;
1861 } else
1862 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1863 }
1864
1865 /*
1866 * rb_handle_head_page - writer hit the head page
1867 *
1868 * Returns: +1 to retry page
1869 * 0 to continue
1870 * -1 on error
1871 */
1872 static int
1873 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1874 struct buffer_page *tail_page,
1875 struct buffer_page *next_page)
1876 {
1877 struct buffer_page *new_head;
1878 int entries;
1879 int type;
1880 int ret;
1881
1882 entries = rb_page_entries(next_page);
1883
1884 /*
1885 * The hard part is here. We need to move the head
1886 * forward, and protect against both readers on
1887 * other CPUs and writers coming in via interrupts.
1888 */
1889 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1890 RB_PAGE_HEAD);
1891
1892 /*
1893 * type can be one of four:
1894 * NORMAL - an interrupt already moved it for us
1895 * HEAD - we are the first to get here.
1896 * UPDATE - we are the interrupt interrupting
1897 * a current move.
1898 * MOVED - a reader on another CPU moved the next
1899 * pointer to its reader page. Give up
1900 * and try again.
1901 */
1902
1903 switch (type) {
1904 case RB_PAGE_HEAD:
1905 /*
1906 * We changed the head to UPDATE, thus
1907 * it is our responsibility to update
1908 * the counters.
1909 */
1910 local_add(entries, &cpu_buffer->overrun);
1911 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1912
1913 /*
1914 * The entries will be zeroed out when we move the
1915 * tail page.
1916 */
1917
1918 /* still more to do */
1919 break;
1920
1921 case RB_PAGE_UPDATE:
1922 /*
1923 * This is an interrupt that interrupt the
1924 * previous update. Still more to do.
1925 */
1926 break;
1927 case RB_PAGE_NORMAL:
1928 /*
1929 * An interrupt came in before the update
1930 * and processed this for us.
1931 * Nothing left to do.
1932 */
1933 return 1;
1934 case RB_PAGE_MOVED:
1935 /*
1936 * The reader is on another CPU and just did
1937 * a swap with our next_page.
1938 * Try again.
1939 */
1940 return 1;
1941 default:
1942 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1943 return -1;
1944 }
1945
1946 /*
1947 * Now that we are here, the old head pointer is
1948 * set to UPDATE. This will keep the reader from
1949 * swapping the head page with the reader page.
1950 * The reader (on another CPU) will spin till
1951 * we are finished.
1952 *
1953 * We just need to protect against interrupts
1954 * doing the job. We will set the next pointer
1955 * to HEAD. After that, we set the old pointer
1956 * to NORMAL, but only if it was HEAD before.
1957 * otherwise we are an interrupt, and only
1958 * want the outer most commit to reset it.
1959 */
1960 new_head = next_page;
1961 rb_inc_page(cpu_buffer, &new_head);
1962
1963 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1964 RB_PAGE_NORMAL);
1965
1966 /*
1967 * Valid returns are:
1968 * HEAD - an interrupt came in and already set it.
1969 * NORMAL - One of two things:
1970 * 1) We really set it.
1971 * 2) A bunch of interrupts came in and moved
1972 * the page forward again.
1973 */
1974 switch (ret) {
1975 case RB_PAGE_HEAD:
1976 case RB_PAGE_NORMAL:
1977 /* OK */
1978 break;
1979 default:
1980 RB_WARN_ON(cpu_buffer, 1);
1981 return -1;
1982 }
1983
1984 /*
1985 * It is possible that an interrupt came in,
1986 * set the head up, then more interrupts came in
1987 * and moved it again. When we get back here,
1988 * the page would have been set to NORMAL but we
1989 * just set it back to HEAD.
1990 *
1991 * How do you detect this? Well, if that happened
1992 * the tail page would have moved.
1993 */
1994 if (ret == RB_PAGE_NORMAL) {
1995 /*
1996 * If the tail had moved passed next, then we need
1997 * to reset the pointer.
1998 */
1999 if (cpu_buffer->tail_page != tail_page &&
2000 cpu_buffer->tail_page != next_page)
2001 rb_head_page_set_normal(cpu_buffer, new_head,
2002 next_page,
2003 RB_PAGE_HEAD);
2004 }
2005
2006 /*
2007 * If this was the outer most commit (the one that
2008 * changed the original pointer from HEAD to UPDATE),
2009 * then it is up to us to reset it to NORMAL.
2010 */
2011 if (type == RB_PAGE_HEAD) {
2012 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2013 tail_page,
2014 RB_PAGE_UPDATE);
2015 if (RB_WARN_ON(cpu_buffer,
2016 ret != RB_PAGE_UPDATE))
2017 return -1;
2018 }
2019
2020 return 0;
2021 }
2022
2023 static unsigned rb_calculate_event_length(unsigned length)
2024 {
2025 struct ring_buffer_event event; /* Used only for sizeof array */
2026
2027 /* zero length can cause confusions */
2028 if (!length)
2029 length = 1;
2030
2031 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2032 length += sizeof(event.array[0]);
2033
2034 length += RB_EVNT_HDR_SIZE;
2035 length = ALIGN(length, RB_ARCH_ALIGNMENT);
2036
2037 return length;
2038 }
2039
2040 static inline void
2041 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2042 struct buffer_page *tail_page,
2043 unsigned long tail, unsigned long length)
2044 {
2045 struct ring_buffer_event *event;
2046
2047 /*
2048 * Only the event that crossed the page boundary
2049 * must fill the old tail_page with padding.
2050 */
2051 if (tail >= BUF_PAGE_SIZE) {
2052 /*
2053 * If the page was filled, then we still need
2054 * to update the real_end. Reset it to zero
2055 * and the reader will ignore it.
2056 */
2057 if (tail == BUF_PAGE_SIZE)
2058 tail_page->real_end = 0;
2059
2060 local_sub(length, &tail_page->write);
2061 return;
2062 }
2063
2064 event = __rb_page_index(tail_page, tail);
2065 kmemcheck_annotate_bitfield(event, bitfield);
2066
2067 /* account for padding bytes */
2068 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2069
2070 /*
2071 * Save the original length to the meta data.
2072 * This will be used by the reader to add lost event
2073 * counter.
2074 */
2075 tail_page->real_end = tail;
2076
2077 /*
2078 * If this event is bigger than the minimum size, then
2079 * we need to be careful that we don't subtract the
2080 * write counter enough to allow another writer to slip
2081 * in on this page.
2082 * We put in a discarded commit instead, to make sure
2083 * that this space is not used again.
2084 *
2085 * If we are less than the minimum size, we don't need to
2086 * worry about it.
2087 */
2088 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2089 /* No room for any events */
2090
2091 /* Mark the rest of the page with padding */
2092 rb_event_set_padding(event);
2093
2094 /* Set the write back to the previous setting */
2095 local_sub(length, &tail_page->write);
2096 return;
2097 }
2098
2099 /* Put in a discarded event */
2100 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2101 event->type_len = RINGBUF_TYPE_PADDING;
2102 /* time delta must be non zero */
2103 event->time_delta = 1;
2104
2105 /* Set write to end of buffer */
2106 length = (tail + length) - BUF_PAGE_SIZE;
2107 local_sub(length, &tail_page->write);
2108 }
2109
2110 /*
2111 * This is the slow path, force gcc not to inline it.
2112 */
2113 static noinline struct ring_buffer_event *
2114 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2115 unsigned long length, unsigned long tail,
2116 struct buffer_page *tail_page, u64 ts)
2117 {
2118 struct buffer_page *commit_page = cpu_buffer->commit_page;
2119 struct ring_buffer *buffer = cpu_buffer->buffer;
2120 struct buffer_page *next_page;
2121 int ret;
2122
2123 next_page = tail_page;
2124
2125 rb_inc_page(cpu_buffer, &next_page);
2126
2127 /*
2128 * If for some reason, we had an interrupt storm that made
2129 * it all the way around the buffer, bail, and warn
2130 * about it.
2131 */
2132 if (unlikely(next_page == commit_page)) {
2133 local_inc(&cpu_buffer->commit_overrun);
2134 goto out_reset;
2135 }
2136
2137 /*
2138 * This is where the fun begins!
2139 *
2140 * We are fighting against races between a reader that
2141 * could be on another CPU trying to swap its reader
2142 * page with the buffer head.
2143 *
2144 * We are also fighting against interrupts coming in and
2145 * moving the head or tail on us as well.
2146 *
2147 * If the next page is the head page then we have filled
2148 * the buffer, unless the commit page is still on the
2149 * reader page.
2150 */
2151 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2152
2153 /*
2154 * If the commit is not on the reader page, then
2155 * move the header page.
2156 */
2157 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2158 /*
2159 * If we are not in overwrite mode,
2160 * this is easy, just stop here.
2161 */
2162 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2163 local_inc(&cpu_buffer->dropped_events);
2164 goto out_reset;
2165 }
2166
2167 ret = rb_handle_head_page(cpu_buffer,
2168 tail_page,
2169 next_page);
2170 if (ret < 0)
2171 goto out_reset;
2172 if (ret)
2173 goto out_again;
2174 } else {
2175 /*
2176 * We need to be careful here too. The
2177 * commit page could still be on the reader
2178 * page. We could have a small buffer, and
2179 * have filled up the buffer with events
2180 * from interrupts and such, and wrapped.
2181 *
2182 * Note, if the tail page is also the on the
2183 * reader_page, we let it move out.
2184 */
2185 if (unlikely((cpu_buffer->commit_page !=
2186 cpu_buffer->tail_page) &&
2187 (cpu_buffer->commit_page ==
2188 cpu_buffer->reader_page))) {
2189 local_inc(&cpu_buffer->commit_overrun);
2190 goto out_reset;
2191 }
2192 }
2193 }
2194
2195 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2196 if (ret) {
2197 /*
2198 * Nested commits always have zero deltas, so
2199 * just reread the time stamp
2200 */
2201 ts = rb_time_stamp(buffer);
2202 next_page->page->time_stamp = ts;
2203 }
2204
2205 out_again:
2206
2207 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2208
2209 /* fail and let the caller try again */
2210 return ERR_PTR(-EAGAIN);
2211
2212 out_reset:
2213 /* reset write */
2214 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2215
2216 return NULL;
2217 }
2218
2219 static struct ring_buffer_event *
2220 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2221 unsigned long length, u64 ts,
2222 u64 delta, int add_timestamp)
2223 {
2224 struct buffer_page *tail_page;
2225 struct ring_buffer_event *event;
2226 unsigned long tail, write;
2227
2228 /*
2229 * If the time delta since the last event is too big to
2230 * hold in the time field of the event, then we append a
2231 * TIME EXTEND event ahead of the data event.
2232 */
2233 if (unlikely(add_timestamp))
2234 length += RB_LEN_TIME_EXTEND;
2235
2236 tail_page = cpu_buffer->tail_page;
2237 write = local_add_return(length, &tail_page->write);
2238
2239 /* set write to only the index of the write */
2240 write &= RB_WRITE_MASK;
2241 tail = write - length;
2242
2243 /* See if we shot pass the end of this buffer page */
2244 if (unlikely(write > BUF_PAGE_SIZE))
2245 return rb_move_tail(cpu_buffer, length, tail,
2246 tail_page, ts);
2247
2248 /* We reserved something on the buffer */
2249
2250 event = __rb_page_index(tail_page, tail);
2251 kmemcheck_annotate_bitfield(event, bitfield);
2252 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2253
2254 local_inc(&tail_page->entries);
2255
2256 /*
2257 * If this is the first commit on the page, then update
2258 * its timestamp.
2259 */
2260 if (!tail)
2261 tail_page->page->time_stamp = ts;
2262
2263 /* account for these added bytes */
2264 local_add(length, &cpu_buffer->entries_bytes);
2265
2266 return event;
2267 }
2268
2269 static inline int
2270 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2271 struct ring_buffer_event *event)
2272 {
2273 unsigned long new_index, old_index;
2274 struct buffer_page *bpage;
2275 unsigned long index;
2276 unsigned long addr;
2277
2278 new_index = rb_event_index(event);
2279 old_index = new_index + rb_event_ts_length(event);
2280 addr = (unsigned long)event;
2281 addr &= PAGE_MASK;
2282
2283 bpage = cpu_buffer->tail_page;
2284
2285 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2286 unsigned long write_mask =
2287 local_read(&bpage->write) & ~RB_WRITE_MASK;
2288 unsigned long event_length = rb_event_length(event);
2289 /*
2290 * This is on the tail page. It is possible that
2291 * a write could come in and move the tail page
2292 * and write to the next page. That is fine
2293 * because we just shorten what is on this page.
2294 */
2295 old_index += write_mask;
2296 new_index += write_mask;
2297 index = local_cmpxchg(&bpage->write, old_index, new_index);
2298 if (index == old_index) {
2299 /* update counters */
2300 local_sub(event_length, &cpu_buffer->entries_bytes);
2301 return 1;
2302 }
2303 }
2304
2305 /* could not discard */
2306 return 0;
2307 }
2308
2309 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2310 {
2311 local_inc(&cpu_buffer->committing);
2312 local_inc(&cpu_buffer->commits);
2313 }
2314
2315 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2316 {
2317 unsigned long commits;
2318
2319 if (RB_WARN_ON(cpu_buffer,
2320 !local_read(&cpu_buffer->committing)))
2321 return;
2322
2323 again:
2324 commits = local_read(&cpu_buffer->commits);
2325 /* synchronize with interrupts */
2326 barrier();
2327 if (local_read(&cpu_buffer->committing) == 1)
2328 rb_set_commit_to_write(cpu_buffer);
2329
2330 local_dec(&cpu_buffer->committing);
2331
2332 /* synchronize with interrupts */
2333 barrier();
2334
2335 /*
2336 * Need to account for interrupts coming in between the
2337 * updating of the commit page and the clearing of the
2338 * committing counter.
2339 */
2340 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2341 !local_read(&cpu_buffer->committing)) {
2342 local_inc(&cpu_buffer->committing);
2343 goto again;
2344 }
2345 }
2346
2347 static struct ring_buffer_event *
2348 rb_reserve_next_event(struct ring_buffer *buffer,
2349 struct ring_buffer_per_cpu *cpu_buffer,
2350 unsigned long length)
2351 {
2352 struct ring_buffer_event *event;
2353 u64 ts, delta;
2354 int nr_loops = 0;
2355 int add_timestamp;
2356 u64 diff;
2357
2358 rb_start_commit(cpu_buffer);
2359
2360 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2361 /*
2362 * Due to the ability to swap a cpu buffer from a buffer
2363 * it is possible it was swapped before we committed.
2364 * (committing stops a swap). We check for it here and
2365 * if it happened, we have to fail the write.
2366 */
2367 barrier();
2368 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2369 local_dec(&cpu_buffer->committing);
2370 local_dec(&cpu_buffer->commits);
2371 return NULL;
2372 }
2373 #endif
2374
2375 length = rb_calculate_event_length(length);
2376 again:
2377 add_timestamp = 0;
2378 delta = 0;
2379
2380 /*
2381 * We allow for interrupts to reenter here and do a trace.
2382 * If one does, it will cause this original code to loop
2383 * back here. Even with heavy interrupts happening, this
2384 * should only happen a few times in a row. If this happens
2385 * 1000 times in a row, there must be either an interrupt
2386 * storm or we have something buggy.
2387 * Bail!
2388 */
2389 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2390 goto out_fail;
2391
2392 ts = rb_time_stamp(cpu_buffer->buffer);
2393 diff = ts - cpu_buffer->write_stamp;
2394
2395 /* make sure this diff is calculated here */
2396 barrier();
2397
2398 /* Did the write stamp get updated already? */
2399 if (likely(ts >= cpu_buffer->write_stamp)) {
2400 delta = diff;
2401 if (unlikely(test_time_stamp(delta))) {
2402 int local_clock_stable = 1;
2403 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2404 local_clock_stable = sched_clock_stable;
2405 #endif
2406 WARN_ONCE(delta > (1ULL << 59),
2407 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2408 (unsigned long long)delta,
2409 (unsigned long long)ts,
2410 (unsigned long long)cpu_buffer->write_stamp,
2411 local_clock_stable ? "" :
2412 "If you just came from a suspend/resume,\n"
2413 "please switch to the trace global clock:\n"
2414 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2415 add_timestamp = 1;
2416 }
2417 }
2418
2419 event = __rb_reserve_next(cpu_buffer, length, ts,
2420 delta, add_timestamp);
2421 if (unlikely(PTR_ERR(event) == -EAGAIN))
2422 goto again;
2423
2424 if (!event)
2425 goto out_fail;
2426
2427 return event;
2428
2429 out_fail:
2430 rb_end_commit(cpu_buffer);
2431 return NULL;
2432 }
2433
2434 #ifdef CONFIG_TRACING
2435
2436 /*
2437 * The lock and unlock are done within a preempt disable section.
2438 * The current_context per_cpu variable can only be modified
2439 * by the current task between lock and unlock. But it can
2440 * be modified more than once via an interrupt. To pass this
2441 * information from the lock to the unlock without having to
2442 * access the 'in_interrupt()' functions again (which do show
2443 * a bit of overhead in something as critical as function tracing,
2444 * we use a bitmask trick.
2445 *
2446 * bit 0 = NMI context
2447 * bit 1 = IRQ context
2448 * bit 2 = SoftIRQ context
2449 * bit 3 = normal context.
2450 *
2451 * This works because this is the order of contexts that can
2452 * preempt other contexts. A SoftIRQ never preempts an IRQ
2453 * context.
2454 *
2455 * When the context is determined, the corresponding bit is
2456 * checked and set (if it was set, then a recursion of that context
2457 * happened).
2458 *
2459 * On unlock, we need to clear this bit. To do so, just subtract
2460 * 1 from the current_context and AND it to itself.
2461 *
2462 * (binary)
2463 * 101 - 1 = 100
2464 * 101 & 100 = 100 (clearing bit zero)
2465 *
2466 * 1010 - 1 = 1001
2467 * 1010 & 1001 = 1000 (clearing bit 1)
2468 *
2469 * The least significant bit can be cleared this way, and it
2470 * just so happens that it is the same bit corresponding to
2471 * the current context.
2472 */
2473 static DEFINE_PER_CPU(unsigned int, current_context);
2474
2475 static __always_inline int trace_recursive_lock(void)
2476 {
2477 unsigned int val = this_cpu_read(current_context);
2478 int bit;
2479
2480 if (in_interrupt()) {
2481 if (in_nmi())
2482 bit = 0;
2483 else if (in_irq())
2484 bit = 1;
2485 else
2486 bit = 2;
2487 } else
2488 bit = 3;
2489
2490 if (unlikely(val & (1 << bit)))
2491 return 1;
2492
2493 val |= (1 << bit);
2494 this_cpu_write(current_context, val);
2495
2496 return 0;
2497 }
2498
2499 static __always_inline void trace_recursive_unlock(void)
2500 {
2501 unsigned int val = this_cpu_read(current_context);
2502
2503 val--;
2504 val &= this_cpu_read(current_context);
2505 this_cpu_write(current_context, val);
2506 }
2507
2508 #else
2509
2510 #define trace_recursive_lock() (0)
2511 #define trace_recursive_unlock() do { } while (0)
2512
2513 #endif
2514
2515 /**
2516 * ring_buffer_lock_reserve - reserve a part of the buffer
2517 * @buffer: the ring buffer to reserve from
2518 * @length: the length of the data to reserve (excluding event header)
2519 *
2520 * Returns a reseverd event on the ring buffer to copy directly to.
2521 * The user of this interface will need to get the body to write into
2522 * and can use the ring_buffer_event_data() interface.
2523 *
2524 * The length is the length of the data needed, not the event length
2525 * which also includes the event header.
2526 *
2527 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2528 * If NULL is returned, then nothing has been allocated or locked.
2529 */
2530 struct ring_buffer_event *
2531 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2532 {
2533 struct ring_buffer_per_cpu *cpu_buffer;
2534 struct ring_buffer_event *event;
2535 int cpu;
2536
2537 if (ring_buffer_flags != RB_BUFFERS_ON)
2538 return NULL;
2539
2540 /* If we are tracing schedule, we don't want to recurse */
2541 preempt_disable_notrace();
2542
2543 if (atomic_read(&buffer->record_disabled))
2544 goto out_nocheck;
2545
2546 if (trace_recursive_lock())
2547 goto out_nocheck;
2548
2549 cpu = raw_smp_processor_id();
2550
2551 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2552 goto out;
2553
2554 cpu_buffer = buffer->buffers[cpu];
2555
2556 if (atomic_read(&cpu_buffer->record_disabled))
2557 goto out;
2558
2559 if (length > BUF_MAX_DATA_SIZE)
2560 goto out;
2561
2562 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2563 if (!event)
2564 goto out;
2565
2566 return event;
2567
2568 out:
2569 trace_recursive_unlock();
2570
2571 out_nocheck:
2572 preempt_enable_notrace();
2573 return NULL;
2574 }
2575 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2576
2577 static void
2578 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2579 struct ring_buffer_event *event)
2580 {
2581 u64 delta;
2582
2583 /*
2584 * The event first in the commit queue updates the
2585 * time stamp.
2586 */
2587 if (rb_event_is_commit(cpu_buffer, event)) {
2588 /*
2589 * A commit event that is first on a page
2590 * updates the write timestamp with the page stamp
2591 */
2592 if (!rb_event_index(event))
2593 cpu_buffer->write_stamp =
2594 cpu_buffer->commit_page->page->time_stamp;
2595 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2596 delta = event->array[0];
2597 delta <<= TS_SHIFT;
2598 delta += event->time_delta;
2599 cpu_buffer->write_stamp += delta;
2600 } else
2601 cpu_buffer->write_stamp += event->time_delta;
2602 }
2603 }
2604
2605 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2606 struct ring_buffer_event *event)
2607 {
2608 local_inc(&cpu_buffer->entries);
2609 rb_update_write_stamp(cpu_buffer, event);
2610 rb_end_commit(cpu_buffer);
2611 }
2612
2613 /**
2614 * ring_buffer_unlock_commit - commit a reserved
2615 * @buffer: The buffer to commit to
2616 * @event: The event pointer to commit.
2617 *
2618 * This commits the data to the ring buffer, and releases any locks held.
2619 *
2620 * Must be paired with ring_buffer_lock_reserve.
2621 */
2622 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2623 struct ring_buffer_event *event)
2624 {
2625 struct ring_buffer_per_cpu *cpu_buffer;
2626 int cpu = raw_smp_processor_id();
2627
2628 cpu_buffer = buffer->buffers[cpu];
2629
2630 rb_commit(cpu_buffer, event);
2631
2632 trace_recursive_unlock();
2633
2634 preempt_enable_notrace();
2635
2636 return 0;
2637 }
2638 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2639
2640 static inline void rb_event_discard(struct ring_buffer_event *event)
2641 {
2642 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2643 event = skip_time_extend(event);
2644
2645 /* array[0] holds the actual length for the discarded event */
2646 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2647 event->type_len = RINGBUF_TYPE_PADDING;
2648 /* time delta must be non zero */
2649 if (!event->time_delta)
2650 event->time_delta = 1;
2651 }
2652
2653 /*
2654 * Decrement the entries to the page that an event is on.
2655 * The event does not even need to exist, only the pointer
2656 * to the page it is on. This may only be called before the commit
2657 * takes place.
2658 */
2659 static inline void
2660 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2661 struct ring_buffer_event *event)
2662 {
2663 unsigned long addr = (unsigned long)event;
2664 struct buffer_page *bpage = cpu_buffer->commit_page;
2665 struct buffer_page *start;
2666
2667 addr &= PAGE_MASK;
2668
2669 /* Do the likely case first */
2670 if (likely(bpage->page == (void *)addr)) {
2671 local_dec(&bpage->entries);
2672 return;
2673 }
2674
2675 /*
2676 * Because the commit page may be on the reader page we
2677 * start with the next page and check the end loop there.
2678 */
2679 rb_inc_page(cpu_buffer, &bpage);
2680 start = bpage;
2681 do {
2682 if (bpage->page == (void *)addr) {
2683 local_dec(&bpage->entries);
2684 return;
2685 }
2686 rb_inc_page(cpu_buffer, &bpage);
2687 } while (bpage != start);
2688
2689 /* commit not part of this buffer?? */
2690 RB_WARN_ON(cpu_buffer, 1);
2691 }
2692
2693 /**
2694 * ring_buffer_commit_discard - discard an event that has not been committed
2695 * @buffer: the ring buffer
2696 * @event: non committed event to discard
2697 *
2698 * Sometimes an event that is in the ring buffer needs to be ignored.
2699 * This function lets the user discard an event in the ring buffer
2700 * and then that event will not be read later.
2701 *
2702 * This function only works if it is called before the the item has been
2703 * committed. It will try to free the event from the ring buffer
2704 * if another event has not been added behind it.
2705 *
2706 * If another event has been added behind it, it will set the event
2707 * up as discarded, and perform the commit.
2708 *
2709 * If this function is called, do not call ring_buffer_unlock_commit on
2710 * the event.
2711 */
2712 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2713 struct ring_buffer_event *event)
2714 {
2715 struct ring_buffer_per_cpu *cpu_buffer;
2716 int cpu;
2717
2718 /* The event is discarded regardless */
2719 rb_event_discard(event);
2720
2721 cpu = smp_processor_id();
2722 cpu_buffer = buffer->buffers[cpu];
2723
2724 /*
2725 * This must only be called if the event has not been
2726 * committed yet. Thus we can assume that preemption
2727 * is still disabled.
2728 */
2729 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2730
2731 rb_decrement_entry(cpu_buffer, event);
2732 if (rb_try_to_discard(cpu_buffer, event))
2733 goto out;
2734
2735 /*
2736 * The commit is still visible by the reader, so we
2737 * must still update the timestamp.
2738 */
2739 rb_update_write_stamp(cpu_buffer, event);
2740 out:
2741 rb_end_commit(cpu_buffer);
2742
2743 trace_recursive_unlock();
2744
2745 preempt_enable_notrace();
2746
2747 }
2748 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2749
2750 /**
2751 * ring_buffer_write - write data to the buffer without reserving
2752 * @buffer: The ring buffer to write to.
2753 * @length: The length of the data being written (excluding the event header)
2754 * @data: The data to write to the buffer.
2755 *
2756 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2757 * one function. If you already have the data to write to the buffer, it
2758 * may be easier to simply call this function.
2759 *
2760 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2761 * and not the length of the event which would hold the header.
2762 */
2763 int ring_buffer_write(struct ring_buffer *buffer,
2764 unsigned long length,
2765 void *data)
2766 {
2767 struct ring_buffer_per_cpu *cpu_buffer;
2768 struct ring_buffer_event *event;
2769 void *body;
2770 int ret = -EBUSY;
2771 int cpu;
2772
2773 if (ring_buffer_flags != RB_BUFFERS_ON)
2774 return -EBUSY;
2775
2776 preempt_disable_notrace();
2777
2778 if (atomic_read(&buffer->record_disabled))
2779 goto out;
2780
2781 cpu = raw_smp_processor_id();
2782
2783 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2784 goto out;
2785
2786 cpu_buffer = buffer->buffers[cpu];
2787
2788 if (atomic_read(&cpu_buffer->record_disabled))
2789 goto out;
2790
2791 if (length > BUF_MAX_DATA_SIZE)
2792 goto out;
2793
2794 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2795 if (!event)
2796 goto out;
2797
2798 body = rb_event_data(event);
2799
2800 memcpy(body, data, length);
2801
2802 rb_commit(cpu_buffer, event);
2803
2804 ret = 0;
2805 out:
2806 preempt_enable_notrace();
2807
2808 return ret;
2809 }
2810 EXPORT_SYMBOL_GPL(ring_buffer_write);
2811
2812 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2813 {
2814 struct buffer_page *reader = cpu_buffer->reader_page;
2815 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2816 struct buffer_page *commit = cpu_buffer->commit_page;
2817
2818 /* In case of error, head will be NULL */
2819 if (unlikely(!head))
2820 return 1;
2821
2822 return reader->read == rb_page_commit(reader) &&
2823 (commit == reader ||
2824 (commit == head &&
2825 head->read == rb_page_commit(commit)));
2826 }
2827
2828 /**
2829 * ring_buffer_record_disable - stop all writes into the buffer
2830 * @buffer: The ring buffer to stop writes to.
2831 *
2832 * This prevents all writes to the buffer. Any attempt to write
2833 * to the buffer after this will fail and return NULL.
2834 *
2835 * The caller should call synchronize_sched() after this.
2836 */
2837 void ring_buffer_record_disable(struct ring_buffer *buffer)
2838 {
2839 atomic_inc(&buffer->record_disabled);
2840 }
2841 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2842
2843 /**
2844 * ring_buffer_record_enable - enable writes to the buffer
2845 * @buffer: The ring buffer to enable writes
2846 *
2847 * Note, multiple disables will need the same number of enables
2848 * to truly enable the writing (much like preempt_disable).
2849 */
2850 void ring_buffer_record_enable(struct ring_buffer *buffer)
2851 {
2852 atomic_dec(&buffer->record_disabled);
2853 }
2854 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2855
2856 /**
2857 * ring_buffer_record_off - stop all writes into the buffer
2858 * @buffer: The ring buffer to stop writes to.
2859 *
2860 * This prevents all writes to the buffer. Any attempt to write
2861 * to the buffer after this will fail and return NULL.
2862 *
2863 * This is different than ring_buffer_record_disable() as
2864 * it works like an on/off switch, where as the disable() version
2865 * must be paired with a enable().
2866 */
2867 void ring_buffer_record_off(struct ring_buffer *buffer)
2868 {
2869 unsigned int rd;
2870 unsigned int new_rd;
2871
2872 do {
2873 rd = atomic_read(&buffer->record_disabled);
2874 new_rd = rd | RB_BUFFER_OFF;
2875 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2876 }
2877 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2878
2879 /**
2880 * ring_buffer_record_on - restart writes into the buffer
2881 * @buffer: The ring buffer to start writes to.
2882 *
2883 * This enables all writes to the buffer that was disabled by
2884 * ring_buffer_record_off().
2885 *
2886 * This is different than ring_buffer_record_enable() as
2887 * it works like an on/off switch, where as the enable() version
2888 * must be paired with a disable().
2889 */
2890 void ring_buffer_record_on(struct ring_buffer *buffer)
2891 {
2892 unsigned int rd;
2893 unsigned int new_rd;
2894
2895 do {
2896 rd = atomic_read(&buffer->record_disabled);
2897 new_rd = rd & ~RB_BUFFER_OFF;
2898 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2899 }
2900 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2901
2902 /**
2903 * ring_buffer_record_is_on - return true if the ring buffer can write
2904 * @buffer: The ring buffer to see if write is enabled
2905 *
2906 * Returns true if the ring buffer is in a state that it accepts writes.
2907 */
2908 int ring_buffer_record_is_on(struct ring_buffer *buffer)
2909 {
2910 return !atomic_read(&buffer->record_disabled);
2911 }
2912
2913 /**
2914 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2915 * @buffer: The ring buffer to stop writes to.
2916 * @cpu: The CPU buffer to stop
2917 *
2918 * This prevents all writes to the buffer. Any attempt to write
2919 * to the buffer after this will fail and return NULL.
2920 *
2921 * The caller should call synchronize_sched() after this.
2922 */
2923 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2924 {
2925 struct ring_buffer_per_cpu *cpu_buffer;
2926
2927 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2928 return;
2929
2930 cpu_buffer = buffer->buffers[cpu];
2931 atomic_inc(&cpu_buffer->record_disabled);
2932 }
2933 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2934
2935 /**
2936 * ring_buffer_record_enable_cpu - enable writes to the buffer
2937 * @buffer: The ring buffer to enable writes
2938 * @cpu: The CPU to enable.
2939 *
2940 * Note, multiple disables will need the same number of enables
2941 * to truly enable the writing (much like preempt_disable).
2942 */
2943 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2944 {
2945 struct ring_buffer_per_cpu *cpu_buffer;
2946
2947 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2948 return;
2949
2950 cpu_buffer = buffer->buffers[cpu];
2951 atomic_dec(&cpu_buffer->record_disabled);
2952 }
2953 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2954
2955 /*
2956 * The total entries in the ring buffer is the running counter
2957 * of entries entered into the ring buffer, minus the sum of
2958 * the entries read from the ring buffer and the number of
2959 * entries that were overwritten.
2960 */
2961 static inline unsigned long
2962 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2963 {
2964 return local_read(&cpu_buffer->entries) -
2965 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2966 }
2967
2968 /**
2969 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2970 * @buffer: The ring buffer
2971 * @cpu: The per CPU buffer to read from.
2972 */
2973 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2974 {
2975 unsigned long flags;
2976 struct ring_buffer_per_cpu *cpu_buffer;
2977 struct buffer_page *bpage;
2978 u64 ret = 0;
2979
2980 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2981 return 0;
2982
2983 cpu_buffer = buffer->buffers[cpu];
2984 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2985 /*
2986 * if the tail is on reader_page, oldest time stamp is on the reader
2987 * page
2988 */
2989 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2990 bpage = cpu_buffer->reader_page;
2991 else
2992 bpage = rb_set_head_page(cpu_buffer);
2993 if (bpage)
2994 ret = bpage->page->time_stamp;
2995 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2996
2997 return ret;
2998 }
2999 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3000
3001 /**
3002 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3003 * @buffer: The ring buffer
3004 * @cpu: The per CPU buffer to read from.
3005 */
3006 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3007 {
3008 struct ring_buffer_per_cpu *cpu_buffer;
3009 unsigned long ret;
3010
3011 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3012 return 0;
3013
3014 cpu_buffer = buffer->buffers[cpu];
3015 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3016
3017 return ret;
3018 }
3019 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3020
3021 /**
3022 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3023 * @buffer: The ring buffer
3024 * @cpu: The per CPU buffer to get the entries from.
3025 */
3026 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3027 {
3028 struct ring_buffer_per_cpu *cpu_buffer;
3029
3030 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3031 return 0;
3032
3033 cpu_buffer = buffer->buffers[cpu];
3034
3035 return rb_num_of_entries(cpu_buffer);
3036 }
3037 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3038
3039 /**
3040 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3041 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3042 * @buffer: The ring buffer
3043 * @cpu: The per CPU buffer to get the number of overruns from
3044 */
3045 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3046 {
3047 struct ring_buffer_per_cpu *cpu_buffer;
3048 unsigned long ret;
3049
3050 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3051 return 0;
3052
3053 cpu_buffer = buffer->buffers[cpu];
3054 ret = local_read(&cpu_buffer->overrun);
3055
3056 return ret;
3057 }
3058 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3059
3060 /**
3061 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3062 * commits failing due to the buffer wrapping around while there are uncommitted
3063 * events, such as during an interrupt storm.
3064 * @buffer: The ring buffer
3065 * @cpu: The per CPU buffer to get the number of overruns from
3066 */
3067 unsigned long
3068 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3069 {
3070 struct ring_buffer_per_cpu *cpu_buffer;
3071 unsigned long ret;
3072
3073 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3074 return 0;
3075
3076 cpu_buffer = buffer->buffers[cpu];
3077 ret = local_read(&cpu_buffer->commit_overrun);
3078
3079 return ret;
3080 }
3081 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3082
3083 /**
3084 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3085 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3086 * @buffer: The ring buffer
3087 * @cpu: The per CPU buffer to get the number of overruns from
3088 */
3089 unsigned long
3090 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3091 {
3092 struct ring_buffer_per_cpu *cpu_buffer;
3093 unsigned long ret;
3094
3095 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3096 return 0;
3097
3098 cpu_buffer = buffer->buffers[cpu];
3099 ret = local_read(&cpu_buffer->dropped_events);
3100
3101 return ret;
3102 }
3103 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3104
3105 /**
3106 * ring_buffer_read_events_cpu - get the number of events successfully read
3107 * @buffer: The ring buffer
3108 * @cpu: The per CPU buffer to get the number of events read
3109 */
3110 unsigned long
3111 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3112 {
3113 struct ring_buffer_per_cpu *cpu_buffer;
3114
3115 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3116 return 0;
3117
3118 cpu_buffer = buffer->buffers[cpu];
3119 return cpu_buffer->read;
3120 }
3121 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3122
3123 /**
3124 * ring_buffer_entries - get the number of entries in a buffer
3125 * @buffer: The ring buffer
3126 *
3127 * Returns the total number of entries in the ring buffer
3128 * (all CPU entries)
3129 */
3130 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3131 {
3132 struct ring_buffer_per_cpu *cpu_buffer;
3133 unsigned long entries = 0;
3134 int cpu;
3135
3136 /* if you care about this being correct, lock the buffer */
3137 for_each_buffer_cpu(buffer, cpu) {
3138 cpu_buffer = buffer->buffers[cpu];
3139 entries += rb_num_of_entries(cpu_buffer);
3140 }
3141
3142 return entries;
3143 }
3144 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3145
3146 /**
3147 * ring_buffer_overruns - get the number of overruns in buffer
3148 * @buffer: The ring buffer
3149 *
3150 * Returns the total number of overruns in the ring buffer
3151 * (all CPU entries)
3152 */
3153 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3154 {
3155 struct ring_buffer_per_cpu *cpu_buffer;
3156 unsigned long overruns = 0;
3157 int cpu;
3158
3159 /* if you care about this being correct, lock the buffer */
3160 for_each_buffer_cpu(buffer, cpu) {
3161 cpu_buffer = buffer->buffers[cpu];
3162 overruns += local_read(&cpu_buffer->overrun);
3163 }
3164
3165 return overruns;
3166 }
3167 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3168
3169 static void rb_iter_reset(struct ring_buffer_iter *iter)
3170 {
3171 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3172
3173 /* Iterator usage is expected to have record disabled */
3174 if (list_empty(&cpu_buffer->reader_page->list)) {
3175 iter->head_page = rb_set_head_page(cpu_buffer);
3176 if (unlikely(!iter->head_page))
3177 return;
3178 iter->head = iter->head_page->read;
3179 } else {
3180 iter->head_page = cpu_buffer->reader_page;
3181 iter->head = cpu_buffer->reader_page->read;
3182 }
3183 if (iter->head)
3184 iter->read_stamp = cpu_buffer->read_stamp;
3185 else
3186 iter->read_stamp = iter->head_page->page->time_stamp;
3187 iter->cache_reader_page = cpu_buffer->reader_page;
3188 iter->cache_read = cpu_buffer->read;
3189 }
3190
3191 /**
3192 * ring_buffer_iter_reset - reset an iterator
3193 * @iter: The iterator to reset
3194 *
3195 * Resets the iterator, so that it will start from the beginning
3196 * again.
3197 */
3198 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3199 {
3200 struct ring_buffer_per_cpu *cpu_buffer;
3201 unsigned long flags;
3202
3203 if (!iter)
3204 return;
3205
3206 cpu_buffer = iter->cpu_buffer;
3207
3208 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3209 rb_iter_reset(iter);
3210 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3211 }
3212 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3213
3214 /**
3215 * ring_buffer_iter_empty - check if an iterator has no more to read
3216 * @iter: The iterator to check
3217 */
3218 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3219 {
3220 struct ring_buffer_per_cpu *cpu_buffer;
3221
3222 cpu_buffer = iter->cpu_buffer;
3223
3224 return iter->head_page == cpu_buffer->commit_page &&
3225 iter->head == rb_commit_index(cpu_buffer);
3226 }
3227 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3228
3229 static void
3230 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3231 struct ring_buffer_event *event)
3232 {
3233 u64 delta;
3234
3235 switch (event->type_len) {
3236 case RINGBUF_TYPE_PADDING:
3237 return;
3238
3239 case RINGBUF_TYPE_TIME_EXTEND:
3240 delta = event->array[0];
3241 delta <<= TS_SHIFT;
3242 delta += event->time_delta;
3243 cpu_buffer->read_stamp += delta;
3244 return;
3245
3246 case RINGBUF_TYPE_TIME_STAMP:
3247 /* FIXME: not implemented */
3248 return;
3249
3250 case RINGBUF_TYPE_DATA:
3251 cpu_buffer->read_stamp += event->time_delta;
3252 return;
3253
3254 default:
3255 BUG();
3256 }
3257 return;
3258 }
3259
3260 static void
3261 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3262 struct ring_buffer_event *event)
3263 {
3264 u64 delta;
3265
3266 switch (event->type_len) {
3267 case RINGBUF_TYPE_PADDING:
3268 return;
3269
3270 case RINGBUF_TYPE_TIME_EXTEND:
3271 delta = event->array[0];
3272 delta <<= TS_SHIFT;
3273 delta += event->time_delta;
3274 iter->read_stamp += delta;
3275 return;
3276
3277 case RINGBUF_TYPE_TIME_STAMP:
3278 /* FIXME: not implemented */
3279 return;
3280
3281 case RINGBUF_TYPE_DATA:
3282 iter->read_stamp += event->time_delta;
3283 return;
3284
3285 default:
3286 BUG();
3287 }
3288 return;
3289 }
3290
3291 static struct buffer_page *
3292 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3293 {
3294 struct buffer_page *reader = NULL;
3295 unsigned long overwrite;
3296 unsigned long flags;
3297 int nr_loops = 0;
3298 int ret;
3299
3300 local_irq_save(flags);
3301 arch_spin_lock(&cpu_buffer->lock);
3302
3303 again:
3304 /*
3305 * This should normally only loop twice. But because the
3306 * start of the reader inserts an empty page, it causes
3307 * a case where we will loop three times. There should be no
3308 * reason to loop four times (that I know of).
3309 */
3310 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3311 reader = NULL;
3312 goto out;
3313 }
3314
3315 reader = cpu_buffer->reader_page;
3316
3317 /* If there's more to read, return this page */
3318 if (cpu_buffer->reader_page->read < rb_page_size(reader))
3319 goto out;
3320
3321 /* Never should we have an index greater than the size */
3322 if (RB_WARN_ON(cpu_buffer,
3323 cpu_buffer->reader_page->read > rb_page_size(reader)))
3324 goto out;
3325
3326 /* check if we caught up to the tail */
3327 reader = NULL;
3328 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3329 goto out;
3330
3331 /* Don't bother swapping if the ring buffer is empty */
3332 if (rb_num_of_entries(cpu_buffer) == 0)
3333 goto out;
3334
3335 /*
3336 * Reset the reader page to size zero.
3337 */
3338 local_set(&cpu_buffer->reader_page->write, 0);
3339 local_set(&cpu_buffer->reader_page->entries, 0);
3340 local_set(&cpu_buffer->reader_page->page->commit, 0);
3341 cpu_buffer->reader_page->real_end = 0;
3342
3343 spin:
3344 /*
3345 * Splice the empty reader page into the list around the head.
3346 */
3347 reader = rb_set_head_page(cpu_buffer);
3348 if (!reader)
3349 goto out;
3350 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3351 cpu_buffer->reader_page->list.prev = reader->list.prev;
3352
3353 /*
3354 * cpu_buffer->pages just needs to point to the buffer, it
3355 * has no specific buffer page to point to. Lets move it out
3356 * of our way so we don't accidentally swap it.
3357 */
3358 cpu_buffer->pages = reader->list.prev;
3359
3360 /* The reader page will be pointing to the new head */
3361 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3362
3363 /*
3364 * We want to make sure we read the overruns after we set up our
3365 * pointers to the next object. The writer side does a
3366 * cmpxchg to cross pages which acts as the mb on the writer
3367 * side. Note, the reader will constantly fail the swap
3368 * while the writer is updating the pointers, so this
3369 * guarantees that the overwrite recorded here is the one we
3370 * want to compare with the last_overrun.
3371 */
3372 smp_mb();
3373 overwrite = local_read(&(cpu_buffer->overrun));
3374
3375 /*
3376 * Here's the tricky part.
3377 *
3378 * We need to move the pointer past the header page.
3379 * But we can only do that if a writer is not currently
3380 * moving it. The page before the header page has the
3381 * flag bit '1' set if it is pointing to the page we want.
3382 * but if the writer is in the process of moving it
3383 * than it will be '2' or already moved '0'.
3384 */
3385
3386 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3387
3388 /*
3389 * If we did not convert it, then we must try again.
3390 */
3391 if (!ret)
3392 goto spin;
3393
3394 /*
3395 * Yeah! We succeeded in replacing the page.
3396 *
3397 * Now make the new head point back to the reader page.
3398 */
3399 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3400 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3401
3402 /* Finally update the reader page to the new head */
3403 cpu_buffer->reader_page = reader;
3404 rb_reset_reader_page(cpu_buffer);
3405
3406 if (overwrite != cpu_buffer->last_overrun) {
3407 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3408 cpu_buffer->last_overrun = overwrite;
3409 }
3410
3411 goto again;
3412
3413 out:
3414 arch_spin_unlock(&cpu_buffer->lock);
3415 local_irq_restore(flags);
3416
3417 return reader;
3418 }
3419
3420 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3421 {
3422 struct ring_buffer_event *event;
3423 struct buffer_page *reader;
3424 unsigned length;
3425
3426 reader = rb_get_reader_page(cpu_buffer);
3427
3428 /* This function should not be called when buffer is empty */
3429 if (RB_WARN_ON(cpu_buffer, !reader))
3430 return;
3431
3432 event = rb_reader_event(cpu_buffer);
3433
3434 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3435 cpu_buffer->read++;
3436
3437 rb_update_read_stamp(cpu_buffer, event);
3438
3439 length = rb_event_length(event);
3440 cpu_buffer->reader_page->read += length;
3441 }
3442
3443 static void rb_advance_iter(struct ring_buffer_iter *iter)
3444 {
3445 struct ring_buffer_per_cpu *cpu_buffer;
3446 struct ring_buffer_event *event;
3447 unsigned length;
3448
3449 cpu_buffer = iter->cpu_buffer;
3450
3451 /*
3452 * Check if we are at the end of the buffer.
3453 */
3454 if (iter->head >= rb_page_size(iter->head_page)) {
3455 /* discarded commits can make the page empty */
3456 if (iter->head_page == cpu_buffer->commit_page)
3457 return;
3458 rb_inc_iter(iter);
3459 return;
3460 }
3461
3462 event = rb_iter_head_event(iter);
3463
3464 length = rb_event_length(event);
3465
3466 /*
3467 * This should not be called to advance the header if we are
3468 * at the tail of the buffer.
3469 */
3470 if (RB_WARN_ON(cpu_buffer,
3471 (iter->head_page == cpu_buffer->commit_page) &&
3472 (iter->head + length > rb_commit_index(cpu_buffer))))
3473 return;
3474
3475 rb_update_iter_read_stamp(iter, event);
3476
3477 iter->head += length;
3478
3479 /* check for end of page padding */
3480 if ((iter->head >= rb_page_size(iter->head_page)) &&
3481 (iter->head_page != cpu_buffer->commit_page))
3482 rb_inc_iter(iter);
3483 }
3484
3485 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3486 {
3487 return cpu_buffer->lost_events;
3488 }
3489
3490 static struct ring_buffer_event *
3491 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3492 unsigned long *lost_events)
3493 {
3494 struct ring_buffer_event *event;
3495 struct buffer_page *reader;
3496 int nr_loops = 0;
3497
3498 again:
3499 /*
3500 * We repeat when a time extend is encountered.
3501 * Since the time extend is always attached to a data event,
3502 * we should never loop more than once.
3503 * (We never hit the following condition more than twice).
3504 */
3505 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3506 return NULL;
3507
3508 reader = rb_get_reader_page(cpu_buffer);
3509 if (!reader)
3510 return NULL;
3511
3512 event = rb_reader_event(cpu_buffer);
3513
3514 switch (event->type_len) {
3515 case RINGBUF_TYPE_PADDING:
3516 if (rb_null_event(event))
3517 RB_WARN_ON(cpu_buffer, 1);
3518 /*
3519 * Because the writer could be discarding every
3520 * event it creates (which would probably be bad)
3521 * if we were to go back to "again" then we may never
3522 * catch up, and will trigger the warn on, or lock
3523 * the box. Return the padding, and we will release
3524 * the current locks, and try again.
3525 */
3526 return event;
3527
3528 case RINGBUF_TYPE_TIME_EXTEND:
3529 /* Internal data, OK to advance */
3530 rb_advance_reader(cpu_buffer);
3531 goto again;
3532
3533 case RINGBUF_TYPE_TIME_STAMP:
3534 /* FIXME: not implemented */
3535 rb_advance_reader(cpu_buffer);
3536 goto again;
3537
3538 case RINGBUF_TYPE_DATA:
3539 if (ts) {
3540 *ts = cpu_buffer->read_stamp + event->time_delta;
3541 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3542 cpu_buffer->cpu, ts);
3543 }
3544 if (lost_events)
3545 *lost_events = rb_lost_events(cpu_buffer);
3546 return event;
3547
3548 default:
3549 BUG();
3550 }
3551
3552 return NULL;
3553 }
3554 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3555
3556 static struct ring_buffer_event *
3557 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3558 {
3559 struct ring_buffer *buffer;
3560 struct ring_buffer_per_cpu *cpu_buffer;
3561 struct ring_buffer_event *event;
3562 int nr_loops = 0;
3563
3564 cpu_buffer = iter->cpu_buffer;
3565 buffer = cpu_buffer->buffer;
3566
3567 /*
3568 * Check if someone performed a consuming read to
3569 * the buffer. A consuming read invalidates the iterator
3570 * and we need to reset the iterator in this case.
3571 */
3572 if (unlikely(iter->cache_read != cpu_buffer->read ||
3573 iter->cache_reader_page != cpu_buffer->reader_page))
3574 rb_iter_reset(iter);
3575
3576 again:
3577 if (ring_buffer_iter_empty(iter))
3578 return NULL;
3579
3580 /*
3581 * We repeat when a time extend is encountered.
3582 * Since the time extend is always attached to a data event,
3583 * we should never loop more than once.
3584 * (We never hit the following condition more than twice).
3585 */
3586 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3587 return NULL;
3588
3589 if (rb_per_cpu_empty(cpu_buffer))
3590 return NULL;
3591
3592 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3593 rb_inc_iter(iter);
3594 goto again;
3595 }
3596
3597 event = rb_iter_head_event(iter);
3598
3599 switch (event->type_len) {
3600 case RINGBUF_TYPE_PADDING:
3601 if (rb_null_event(event)) {
3602 rb_inc_iter(iter);
3603 goto again;
3604 }
3605 rb_advance_iter(iter);
3606 return event;
3607
3608 case RINGBUF_TYPE_TIME_EXTEND:
3609 /* Internal data, OK to advance */
3610 rb_advance_iter(iter);
3611 goto again;
3612
3613 case RINGBUF_TYPE_TIME_STAMP:
3614 /* FIXME: not implemented */
3615 rb_advance_iter(iter);
3616 goto again;
3617
3618 case RINGBUF_TYPE_DATA:
3619 if (ts) {
3620 *ts = iter->read_stamp + event->time_delta;
3621 ring_buffer_normalize_time_stamp(buffer,
3622 cpu_buffer->cpu, ts);
3623 }
3624 return event;
3625
3626 default:
3627 BUG();
3628 }
3629
3630 return NULL;
3631 }
3632 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3633
3634 static inline int rb_ok_to_lock(void)
3635 {
3636 /*
3637 * If an NMI die dumps out the content of the ring buffer
3638 * do not grab locks. We also permanently disable the ring
3639 * buffer too. A one time deal is all you get from reading
3640 * the ring buffer from an NMI.
3641 */
3642 if (likely(!in_nmi()))
3643 return 1;
3644
3645 tracing_off_permanent();
3646 return 0;
3647 }
3648
3649 /**
3650 * ring_buffer_peek - peek at the next event to be read
3651 * @buffer: The ring buffer to read
3652 * @cpu: The cpu to peak at
3653 * @ts: The timestamp counter of this event.
3654 * @lost_events: a variable to store if events were lost (may be NULL)
3655 *
3656 * This will return the event that will be read next, but does
3657 * not consume the data.
3658 */
3659 struct ring_buffer_event *
3660 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3661 unsigned long *lost_events)
3662 {
3663 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3664 struct ring_buffer_event *event;
3665 unsigned long flags;
3666 int dolock;
3667
3668 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3669 return NULL;
3670
3671 dolock = rb_ok_to_lock();
3672 again:
3673 local_irq_save(flags);
3674 if (dolock)
3675 raw_spin_lock(&cpu_buffer->reader_lock);
3676 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3677 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3678 rb_advance_reader(cpu_buffer);
3679 if (dolock)
3680 raw_spin_unlock(&cpu_buffer->reader_lock);
3681 local_irq_restore(flags);
3682
3683 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3684 goto again;
3685
3686 return event;
3687 }
3688
3689 /**
3690 * ring_buffer_iter_peek - peek at the next event to be read
3691 * @iter: The ring buffer iterator
3692 * @ts: The timestamp counter of this event.
3693 *
3694 * This will return the event that will be read next, but does
3695 * not increment the iterator.
3696 */
3697 struct ring_buffer_event *
3698 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3699 {
3700 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3701 struct ring_buffer_event *event;
3702 unsigned long flags;
3703
3704 again:
3705 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3706 event = rb_iter_peek(iter, ts);
3707 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3708
3709 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3710 goto again;
3711
3712 return event;
3713 }
3714
3715 /**
3716 * ring_buffer_consume - return an event and consume it
3717 * @buffer: The ring buffer to get the next event from
3718 * @cpu: the cpu to read the buffer from
3719 * @ts: a variable to store the timestamp (may be NULL)
3720 * @lost_events: a variable to store if events were lost (may be NULL)
3721 *
3722 * Returns the next event in the ring buffer, and that event is consumed.
3723 * Meaning, that sequential reads will keep returning a different event,
3724 * and eventually empty the ring buffer if the producer is slower.
3725 */
3726 struct ring_buffer_event *
3727 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3728 unsigned long *lost_events)
3729 {
3730 struct ring_buffer_per_cpu *cpu_buffer;
3731 struct ring_buffer_event *event = NULL;
3732 unsigned long flags;
3733 int dolock;
3734
3735 dolock = rb_ok_to_lock();
3736
3737 again:
3738 /* might be called in atomic */
3739 preempt_disable();
3740
3741 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3742 goto out;
3743
3744 cpu_buffer = buffer->buffers[cpu];
3745 local_irq_save(flags);
3746 if (dolock)
3747 raw_spin_lock(&cpu_buffer->reader_lock);
3748
3749 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3750 if (event) {
3751 cpu_buffer->lost_events = 0;
3752 rb_advance_reader(cpu_buffer);
3753 }
3754
3755 if (dolock)
3756 raw_spin_unlock(&cpu_buffer->reader_lock);
3757 local_irq_restore(flags);
3758
3759 out:
3760 preempt_enable();
3761
3762 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3763 goto again;
3764
3765 return event;
3766 }
3767 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3768
3769 /**
3770 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3771 * @buffer: The ring buffer to read from
3772 * @cpu: The cpu buffer to iterate over
3773 *
3774 * This performs the initial preparations necessary to iterate
3775 * through the buffer. Memory is allocated, buffer recording
3776 * is disabled, and the iterator pointer is returned to the caller.
3777 *
3778 * Disabling buffer recordng prevents the reading from being
3779 * corrupted. This is not a consuming read, so a producer is not
3780 * expected.
3781 *
3782 * After a sequence of ring_buffer_read_prepare calls, the user is
3783 * expected to make at least one call to ring_buffer_prepare_sync.
3784 * Afterwards, ring_buffer_read_start is invoked to get things going
3785 * for real.
3786 *
3787 * This overall must be paired with ring_buffer_finish.
3788 */
3789 struct ring_buffer_iter *
3790 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3791 {
3792 struct ring_buffer_per_cpu *cpu_buffer;
3793 struct ring_buffer_iter *iter;
3794
3795 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3796 return NULL;
3797
3798 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3799 if (!iter)
3800 return NULL;
3801
3802 cpu_buffer = buffer->buffers[cpu];
3803
3804 iter->cpu_buffer = cpu_buffer;
3805
3806 atomic_inc(&buffer->resize_disabled);
3807 atomic_inc(&cpu_buffer->record_disabled);
3808
3809 return iter;
3810 }
3811 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3812
3813 /**
3814 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3815 *
3816 * All previously invoked ring_buffer_read_prepare calls to prepare
3817 * iterators will be synchronized. Afterwards, read_buffer_read_start
3818 * calls on those iterators are allowed.
3819 */
3820 void
3821 ring_buffer_read_prepare_sync(void)
3822 {
3823 synchronize_sched();
3824 }
3825 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3826
3827 /**
3828 * ring_buffer_read_start - start a non consuming read of the buffer
3829 * @iter: The iterator returned by ring_buffer_read_prepare
3830 *
3831 * This finalizes the startup of an iteration through the buffer.
3832 * The iterator comes from a call to ring_buffer_read_prepare and
3833 * an intervening ring_buffer_read_prepare_sync must have been
3834 * performed.
3835 *
3836 * Must be paired with ring_buffer_finish.
3837 */
3838 void
3839 ring_buffer_read_start(struct ring_buffer_iter *iter)
3840 {
3841 struct ring_buffer_per_cpu *cpu_buffer;
3842 unsigned long flags;
3843
3844 if (!iter)
3845 return;
3846
3847 cpu_buffer = iter->cpu_buffer;
3848
3849 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3850 arch_spin_lock(&cpu_buffer->lock);
3851 rb_iter_reset(iter);
3852 arch_spin_unlock(&cpu_buffer->lock);
3853 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3854 }
3855 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3856
3857 /**
3858 * ring_buffer_finish - finish reading the iterator of the buffer
3859 * @iter: The iterator retrieved by ring_buffer_start
3860 *
3861 * This re-enables the recording to the buffer, and frees the
3862 * iterator.
3863 */
3864 void
3865 ring_buffer_read_finish(struct ring_buffer_iter *iter)
3866 {
3867 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3868 unsigned long flags;
3869
3870 /*
3871 * Ring buffer is disabled from recording, here's a good place
3872 * to check the integrity of the ring buffer.
3873 * Must prevent readers from trying to read, as the check
3874 * clears the HEAD page and readers require it.
3875 */
3876 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3877 rb_check_pages(cpu_buffer);
3878 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3879
3880 atomic_dec(&cpu_buffer->record_disabled);
3881 atomic_dec(&cpu_buffer->buffer->resize_disabled);
3882 kfree(iter);
3883 }
3884 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3885
3886 /**
3887 * ring_buffer_read - read the next item in the ring buffer by the iterator
3888 * @iter: The ring buffer iterator
3889 * @ts: The time stamp of the event read.
3890 *
3891 * This reads the next event in the ring buffer and increments the iterator.
3892 */
3893 struct ring_buffer_event *
3894 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3895 {
3896 struct ring_buffer_event *event;
3897 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3898 unsigned long flags;
3899
3900 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3901 again:
3902 event = rb_iter_peek(iter, ts);
3903 if (!event)
3904 goto out;
3905
3906 if (event->type_len == RINGBUF_TYPE_PADDING)
3907 goto again;
3908
3909 rb_advance_iter(iter);
3910 out:
3911 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3912
3913 return event;
3914 }
3915 EXPORT_SYMBOL_GPL(ring_buffer_read);
3916
3917 /**
3918 * ring_buffer_size - return the size of the ring buffer (in bytes)
3919 * @buffer: The ring buffer.
3920 */
3921 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
3922 {
3923 /*
3924 * Earlier, this method returned
3925 * BUF_PAGE_SIZE * buffer->nr_pages
3926 * Since the nr_pages field is now removed, we have converted this to
3927 * return the per cpu buffer value.
3928 */
3929 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3930 return 0;
3931
3932 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
3933 }
3934 EXPORT_SYMBOL_GPL(ring_buffer_size);
3935
3936 static void
3937 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3938 {
3939 rb_head_page_deactivate(cpu_buffer);
3940
3941 cpu_buffer->head_page
3942 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3943 local_set(&cpu_buffer->head_page->write, 0);
3944 local_set(&cpu_buffer->head_page->entries, 0);
3945 local_set(&cpu_buffer->head_page->page->commit, 0);
3946
3947 cpu_buffer->head_page->read = 0;
3948
3949 cpu_buffer->tail_page = cpu_buffer->head_page;
3950 cpu_buffer->commit_page = cpu_buffer->head_page;
3951
3952 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3953 INIT_LIST_HEAD(&cpu_buffer->new_pages);
3954 local_set(&cpu_buffer->reader_page->write, 0);
3955 local_set(&cpu_buffer->reader_page->entries, 0);
3956 local_set(&cpu_buffer->reader_page->page->commit, 0);
3957 cpu_buffer->reader_page->read = 0;
3958
3959 local_set(&cpu_buffer->entries_bytes, 0);
3960 local_set(&cpu_buffer->overrun, 0);
3961 local_set(&cpu_buffer->commit_overrun, 0);
3962 local_set(&cpu_buffer->dropped_events, 0);
3963 local_set(&cpu_buffer->entries, 0);
3964 local_set(&cpu_buffer->committing, 0);
3965 local_set(&cpu_buffer->commits, 0);
3966 cpu_buffer->read = 0;
3967 cpu_buffer->read_bytes = 0;
3968
3969 cpu_buffer->write_stamp = 0;
3970 cpu_buffer->read_stamp = 0;
3971
3972 cpu_buffer->lost_events = 0;
3973 cpu_buffer->last_overrun = 0;
3974
3975 rb_head_page_activate(cpu_buffer);
3976 }
3977
3978 /**
3979 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3980 * @buffer: The ring buffer to reset a per cpu buffer of
3981 * @cpu: The CPU buffer to be reset
3982 */
3983 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3984 {
3985 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3986 unsigned long flags;
3987
3988 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3989 return;
3990
3991 atomic_inc(&buffer->resize_disabled);
3992 atomic_inc(&cpu_buffer->record_disabled);
3993
3994 /* Make sure all commits have finished */
3995 synchronize_sched();
3996
3997 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3998
3999 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4000 goto out;
4001
4002 arch_spin_lock(&cpu_buffer->lock);
4003
4004 rb_reset_cpu(cpu_buffer);
4005
4006 arch_spin_unlock(&cpu_buffer->lock);
4007
4008 out:
4009 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4010
4011 atomic_dec(&cpu_buffer->record_disabled);
4012 atomic_dec(&buffer->resize_disabled);
4013 }
4014 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4015
4016 /**
4017 * ring_buffer_reset - reset a ring buffer
4018 * @buffer: The ring buffer to reset all cpu buffers
4019 */
4020 void ring_buffer_reset(struct ring_buffer *buffer)
4021 {
4022 int cpu;
4023
4024 for_each_buffer_cpu(buffer, cpu)
4025 ring_buffer_reset_cpu(buffer, cpu);
4026 }
4027 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4028
4029 /**
4030 * rind_buffer_empty - is the ring buffer empty?
4031 * @buffer: The ring buffer to test
4032 */
4033 int ring_buffer_empty(struct ring_buffer *buffer)
4034 {
4035 struct ring_buffer_per_cpu *cpu_buffer;
4036 unsigned long flags;
4037 int dolock;
4038 int cpu;
4039 int ret;
4040
4041 dolock = rb_ok_to_lock();
4042
4043 /* yes this is racy, but if you don't like the race, lock the buffer */
4044 for_each_buffer_cpu(buffer, cpu) {
4045 cpu_buffer = buffer->buffers[cpu];
4046 local_irq_save(flags);
4047 if (dolock)
4048 raw_spin_lock(&cpu_buffer->reader_lock);
4049 ret = rb_per_cpu_empty(cpu_buffer);
4050 if (dolock)
4051 raw_spin_unlock(&cpu_buffer->reader_lock);
4052 local_irq_restore(flags);
4053
4054 if (!ret)
4055 return 0;
4056 }
4057
4058 return 1;
4059 }
4060 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4061
4062 /**
4063 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4064 * @buffer: The ring buffer
4065 * @cpu: The CPU buffer to test
4066 */
4067 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4068 {
4069 struct ring_buffer_per_cpu *cpu_buffer;
4070 unsigned long flags;
4071 int dolock;
4072 int ret;
4073
4074 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4075 return 1;
4076
4077 dolock = rb_ok_to_lock();
4078
4079 cpu_buffer = buffer->buffers[cpu];
4080 local_irq_save(flags);
4081 if (dolock)
4082 raw_spin_lock(&cpu_buffer->reader_lock);
4083 ret = rb_per_cpu_empty(cpu_buffer);
4084 if (dolock)
4085 raw_spin_unlock(&cpu_buffer->reader_lock);
4086 local_irq_restore(flags);
4087
4088 return ret;
4089 }
4090 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4091
4092 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4093 /**
4094 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4095 * @buffer_a: One buffer to swap with
4096 * @buffer_b: The other buffer to swap with
4097 *
4098 * This function is useful for tracers that want to take a "snapshot"
4099 * of a CPU buffer and has another back up buffer lying around.
4100 * it is expected that the tracer handles the cpu buffer not being
4101 * used at the moment.
4102 */
4103 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4104 struct ring_buffer *buffer_b, int cpu)
4105 {
4106 struct ring_buffer_per_cpu *cpu_buffer_a;
4107 struct ring_buffer_per_cpu *cpu_buffer_b;
4108 int ret = -EINVAL;
4109
4110 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4111 !cpumask_test_cpu(cpu, buffer_b->cpumask))
4112 goto out;
4113
4114 cpu_buffer_a = buffer_a->buffers[cpu];
4115 cpu_buffer_b = buffer_b->buffers[cpu];
4116
4117 /* At least make sure the two buffers are somewhat the same */
4118 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4119 goto out;
4120
4121 ret = -EAGAIN;
4122
4123 if (ring_buffer_flags != RB_BUFFERS_ON)
4124 goto out;
4125
4126 if (atomic_read(&buffer_a->record_disabled))
4127 goto out;
4128
4129 if (atomic_read(&buffer_b->record_disabled))
4130 goto out;
4131
4132 if (atomic_read(&cpu_buffer_a->record_disabled))
4133 goto out;
4134
4135 if (atomic_read(&cpu_buffer_b->record_disabled))
4136 goto out;
4137
4138 /*
4139 * We can't do a synchronize_sched here because this
4140 * function can be called in atomic context.
4141 * Normally this will be called from the same CPU as cpu.
4142 * If not it's up to the caller to protect this.
4143 */
4144 atomic_inc(&cpu_buffer_a->record_disabled);
4145 atomic_inc(&cpu_buffer_b->record_disabled);
4146
4147 ret = -EBUSY;
4148 if (local_read(&cpu_buffer_a->committing))
4149 goto out_dec;
4150 if (local_read(&cpu_buffer_b->committing))
4151 goto out_dec;
4152
4153 buffer_a->buffers[cpu] = cpu_buffer_b;
4154 buffer_b->buffers[cpu] = cpu_buffer_a;
4155
4156 cpu_buffer_b->buffer = buffer_a;
4157 cpu_buffer_a->buffer = buffer_b;
4158
4159 ret = 0;
4160
4161 out_dec:
4162 atomic_dec(&cpu_buffer_a->record_disabled);
4163 atomic_dec(&cpu_buffer_b->record_disabled);
4164 out:
4165 return ret;
4166 }
4167 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4168 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4169
4170 /**
4171 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4172 * @buffer: the buffer to allocate for.
4173 *
4174 * This function is used in conjunction with ring_buffer_read_page.
4175 * When reading a full page from the ring buffer, these functions
4176 * can be used to speed up the process. The calling function should
4177 * allocate a few pages first with this function. Then when it
4178 * needs to get pages from the ring buffer, it passes the result
4179 * of this function into ring_buffer_read_page, which will swap
4180 * the page that was allocated, with the read page of the buffer.
4181 *
4182 * Returns:
4183 * The page allocated, or NULL on error.
4184 */
4185 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4186 {
4187 struct buffer_data_page *bpage;
4188 struct page *page;
4189
4190 page = alloc_pages_node(cpu_to_node(cpu),
4191 GFP_KERNEL | __GFP_NORETRY, 0);
4192 if (!page)
4193 return NULL;
4194
4195 bpage = page_address(page);
4196
4197 rb_init_page(bpage);
4198
4199 return bpage;
4200 }
4201 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4202
4203 /**
4204 * ring_buffer_free_read_page - free an allocated read page
4205 * @buffer: the buffer the page was allocate for
4206 * @data: the page to free
4207 *
4208 * Free a page allocated from ring_buffer_alloc_read_page.
4209 */
4210 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4211 {
4212 free_page((unsigned long)data);
4213 }
4214 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4215
4216 /**
4217 * ring_buffer_read_page - extract a page from the ring buffer
4218 * @buffer: buffer to extract from
4219 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4220 * @len: amount to extract
4221 * @cpu: the cpu of the buffer to extract
4222 * @full: should the extraction only happen when the page is full.
4223 *
4224 * This function will pull out a page from the ring buffer and consume it.
4225 * @data_page must be the address of the variable that was returned
4226 * from ring_buffer_alloc_read_page. This is because the page might be used
4227 * to swap with a page in the ring buffer.
4228 *
4229 * for example:
4230 * rpage = ring_buffer_alloc_read_page(buffer);
4231 * if (!rpage)
4232 * return error;
4233 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4234 * if (ret >= 0)
4235 * process_page(rpage, ret);
4236 *
4237 * When @full is set, the function will not return true unless
4238 * the writer is off the reader page.
4239 *
4240 * Note: it is up to the calling functions to handle sleeps and wakeups.
4241 * The ring buffer can be used anywhere in the kernel and can not
4242 * blindly call wake_up. The layer that uses the ring buffer must be
4243 * responsible for that.
4244 *
4245 * Returns:
4246 * >=0 if data has been transferred, returns the offset of consumed data.
4247 * <0 if no data has been transferred.
4248 */
4249 int ring_buffer_read_page(struct ring_buffer *buffer,
4250 void **data_page, size_t len, int cpu, int full)
4251 {
4252 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4253 struct ring_buffer_event *event;
4254 struct buffer_data_page *bpage;
4255 struct buffer_page *reader;
4256 unsigned long missed_events;
4257 unsigned long flags;
4258 unsigned int commit;
4259 unsigned int read;
4260 u64 save_timestamp;
4261 int ret = -1;
4262
4263 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4264 goto out;
4265
4266 /*
4267 * If len is not big enough to hold the page header, then
4268 * we can not copy anything.
4269 */
4270 if (len <= BUF_PAGE_HDR_SIZE)
4271 goto out;
4272
4273 len -= BUF_PAGE_HDR_SIZE;
4274
4275 if (!data_page)
4276 goto out;
4277
4278 bpage = *data_page;
4279 if (!bpage)
4280 goto out;
4281
4282 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4283
4284 reader = rb_get_reader_page(cpu_buffer);
4285 if (!reader)
4286 goto out_unlock;
4287
4288 event = rb_reader_event(cpu_buffer);
4289
4290 read = reader->read;
4291 commit = rb_page_commit(reader);
4292
4293 /* Check if any events were dropped */
4294 missed_events = cpu_buffer->lost_events;
4295
4296 /*
4297 * If this page has been partially read or
4298 * if len is not big enough to read the rest of the page or
4299 * a writer is still on the page, then
4300 * we must copy the data from the page to the buffer.
4301 * Otherwise, we can simply swap the page with the one passed in.
4302 */
4303 if (read || (len < (commit - read)) ||
4304 cpu_buffer->reader_page == cpu_buffer->commit_page) {
4305 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4306 unsigned int rpos = read;
4307 unsigned int pos = 0;
4308 unsigned int size;
4309
4310 if (full)
4311 goto out_unlock;
4312
4313 if (len > (commit - read))
4314 len = (commit - read);
4315
4316 /* Always keep the time extend and data together */
4317 size = rb_event_ts_length(event);
4318
4319 if (len < size)
4320 goto out_unlock;
4321
4322 /* save the current timestamp, since the user will need it */
4323 save_timestamp = cpu_buffer->read_stamp;
4324
4325 /* Need to copy one event at a time */
4326 do {
4327 /* We need the size of one event, because
4328 * rb_advance_reader only advances by one event,
4329 * whereas rb_event_ts_length may include the size of
4330 * one or two events.
4331 * We have already ensured there's enough space if this
4332 * is a time extend. */
4333 size = rb_event_length(event);
4334 memcpy(bpage->data + pos, rpage->data + rpos, size);
4335
4336 len -= size;
4337
4338 rb_advance_reader(cpu_buffer);
4339 rpos = reader->read;
4340 pos += size;
4341
4342 if (rpos >= commit)
4343 break;
4344
4345 event = rb_reader_event(cpu_buffer);
4346 /* Always keep the time extend and data together */
4347 size = rb_event_ts_length(event);
4348 } while (len >= size);
4349
4350 /* update bpage */
4351 local_set(&bpage->commit, pos);
4352 bpage->time_stamp = save_timestamp;
4353
4354 /* we copied everything to the beginning */
4355 read = 0;
4356 } else {
4357 /* update the entry counter */
4358 cpu_buffer->read += rb_page_entries(reader);
4359 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4360
4361 /* swap the pages */
4362 rb_init_page(bpage);
4363 bpage = reader->page;
4364 reader->page = *data_page;
4365 local_set(&reader->write, 0);
4366 local_set(&reader->entries, 0);
4367 reader->read = 0;
4368 *data_page = bpage;
4369
4370 /*
4371 * Use the real_end for the data size,
4372 * This gives us a chance to store the lost events
4373 * on the page.
4374 */
4375 if (reader->real_end)
4376 local_set(&bpage->commit, reader->real_end);
4377 }
4378 ret = read;
4379
4380 cpu_buffer->lost_events = 0;
4381
4382 commit = local_read(&bpage->commit);
4383 /*
4384 * Set a flag in the commit field if we lost events
4385 */
4386 if (missed_events) {
4387 /* If there is room at the end of the page to save the
4388 * missed events, then record it there.
4389 */
4390 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4391 memcpy(&bpage->data[commit], &missed_events,
4392 sizeof(missed_events));
4393 local_add(RB_MISSED_STORED, &bpage->commit);
4394 commit += sizeof(missed_events);
4395 }
4396 local_add(RB_MISSED_EVENTS, &bpage->commit);
4397 }
4398
4399 /*
4400 * This page may be off to user land. Zero it out here.
4401 */
4402 if (commit < BUF_PAGE_SIZE)
4403 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4404
4405 out_unlock:
4406 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4407
4408 out:
4409 return ret;
4410 }
4411 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4412
4413 #ifdef CONFIG_HOTPLUG_CPU
4414 static int rb_cpu_notify(struct notifier_block *self,
4415 unsigned long action, void *hcpu)
4416 {
4417 struct ring_buffer *buffer =
4418 container_of(self, struct ring_buffer, cpu_notify);
4419 long cpu = (long)hcpu;
4420 int cpu_i, nr_pages_same;
4421 unsigned int nr_pages;
4422
4423 switch (action) {
4424 case CPU_UP_PREPARE:
4425 case CPU_UP_PREPARE_FROZEN:
4426 if (cpumask_test_cpu(cpu, buffer->cpumask))
4427 return NOTIFY_OK;
4428
4429 nr_pages = 0;
4430 nr_pages_same = 1;
4431 /* check if all cpu sizes are same */
4432 for_each_buffer_cpu(buffer, cpu_i) {
4433 /* fill in the size from first enabled cpu */
4434 if (nr_pages == 0)
4435 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4436 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4437 nr_pages_same = 0;
4438 break;
4439 }
4440 }
4441 /* allocate minimum pages, user can later expand it */
4442 if (!nr_pages_same)
4443 nr_pages = 2;
4444 buffer->buffers[cpu] =
4445 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4446 if (!buffer->buffers[cpu]) {
4447 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4448 cpu);
4449 return NOTIFY_OK;
4450 }
4451 smp_wmb();
4452 cpumask_set_cpu(cpu, buffer->cpumask);
4453 break;
4454 case CPU_DOWN_PREPARE:
4455 case CPU_DOWN_PREPARE_FROZEN:
4456 /*
4457 * Do nothing.
4458 * If we were to free the buffer, then the user would
4459 * lose any trace that was in the buffer.
4460 */
4461 break;
4462 default:
4463 break;
4464 }
4465 return NOTIFY_OK;
4466 }
4467 #endif