]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/trace/ring_buffer.c
ASoC: pcm512x: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE
[mirror_ubuntu-focal-kernel.git] / kernel / trace / ring_buffer.c
1 /*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/trace_events.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/sched/clock.h>
10 #include <linux/trace_seq.h>
11 #include <linux/spinlock.h>
12 #include <linux/irq_work.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h> /* for self test */
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
22 #include <linux/hash.h>
23 #include <linux/list.h>
24 #include <linux/cpu.h>
25
26 #include <asm/local.h>
27
28 static void update_pages_handler(struct work_struct *work);
29
30 /*
31 * The ring buffer header is special. We must manually up keep it.
32 */
33 int ring_buffer_print_entry_header(struct trace_seq *s)
34 {
35 trace_seq_puts(s, "# compressed entry header\n");
36 trace_seq_puts(s, "\ttype_len : 5 bits\n");
37 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
38 trace_seq_puts(s, "\tarray : 32 bits\n");
39 trace_seq_putc(s, '\n');
40 trace_seq_printf(s, "\tpadding : type == %d\n",
41 RINGBUF_TYPE_PADDING);
42 trace_seq_printf(s, "\ttime_extend : type == %d\n",
43 RINGBUF_TYPE_TIME_EXTEND);
44 trace_seq_printf(s, "\tdata max type_len == %d\n",
45 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
46
47 return !trace_seq_has_overflowed(s);
48 }
49
50 /*
51 * The ring buffer is made up of a list of pages. A separate list of pages is
52 * allocated for each CPU. A writer may only write to a buffer that is
53 * associated with the CPU it is currently executing on. A reader may read
54 * from any per cpu buffer.
55 *
56 * The reader is special. For each per cpu buffer, the reader has its own
57 * reader page. When a reader has read the entire reader page, this reader
58 * page is swapped with another page in the ring buffer.
59 *
60 * Now, as long as the writer is off the reader page, the reader can do what
61 * ever it wants with that page. The writer will never write to that page
62 * again (as long as it is out of the ring buffer).
63 *
64 * Here's some silly ASCII art.
65 *
66 * +------+
67 * |reader| RING BUFFER
68 * |page |
69 * +------+ +---+ +---+ +---+
70 * | |-->| |-->| |
71 * +---+ +---+ +---+
72 * ^ |
73 * | |
74 * +---------------+
75 *
76 *
77 * +------+
78 * |reader| RING BUFFER
79 * |page |------------------v
80 * +------+ +---+ +---+ +---+
81 * | |-->| |-->| |
82 * +---+ +---+ +---+
83 * ^ |
84 * | |
85 * +---------------+
86 *
87 *
88 * +------+
89 * |reader| RING BUFFER
90 * |page |------------------v
91 * +------+ +---+ +---+ +---+
92 * ^ | |-->| |-->| |
93 * | +---+ +---+ +---+
94 * | |
95 * | |
96 * +------------------------------+
97 *
98 *
99 * +------+
100 * |buffer| RING BUFFER
101 * |page |------------------v
102 * +------+ +---+ +---+ +---+
103 * ^ | | | |-->| |
104 * | New +---+ +---+ +---+
105 * | Reader------^ |
106 * | page |
107 * +------------------------------+
108 *
109 *
110 * After we make this swap, the reader can hand this page off to the splice
111 * code and be done with it. It can even allocate a new page if it needs to
112 * and swap that into the ring buffer.
113 *
114 * We will be using cmpxchg soon to make all this lockless.
115 *
116 */
117
118 /* Used for individual buffers (after the counter) */
119 #define RB_BUFFER_OFF (1 << 20)
120
121 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
122
123 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
124 #define RB_ALIGNMENT 4U
125 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
126 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
127
128 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
129 # define RB_FORCE_8BYTE_ALIGNMENT 0
130 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
131 #else
132 # define RB_FORCE_8BYTE_ALIGNMENT 1
133 # define RB_ARCH_ALIGNMENT 8U
134 #endif
135
136 #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
137
138 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
139 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
140
141 enum {
142 RB_LEN_TIME_EXTEND = 8,
143 RB_LEN_TIME_STAMP = 16,
144 };
145
146 #define skip_time_extend(event) \
147 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
148
149 static inline int rb_null_event(struct ring_buffer_event *event)
150 {
151 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
152 }
153
154 static void rb_event_set_padding(struct ring_buffer_event *event)
155 {
156 /* padding has a NULL time_delta */
157 event->type_len = RINGBUF_TYPE_PADDING;
158 event->time_delta = 0;
159 }
160
161 static unsigned
162 rb_event_data_length(struct ring_buffer_event *event)
163 {
164 unsigned length;
165
166 if (event->type_len)
167 length = event->type_len * RB_ALIGNMENT;
168 else
169 length = event->array[0];
170 return length + RB_EVNT_HDR_SIZE;
171 }
172
173 /*
174 * Return the length of the given event. Will return
175 * the length of the time extend if the event is a
176 * time extend.
177 */
178 static inline unsigned
179 rb_event_length(struct ring_buffer_event *event)
180 {
181 switch (event->type_len) {
182 case RINGBUF_TYPE_PADDING:
183 if (rb_null_event(event))
184 /* undefined */
185 return -1;
186 return event->array[0] + RB_EVNT_HDR_SIZE;
187
188 case RINGBUF_TYPE_TIME_EXTEND:
189 return RB_LEN_TIME_EXTEND;
190
191 case RINGBUF_TYPE_TIME_STAMP:
192 return RB_LEN_TIME_STAMP;
193
194 case RINGBUF_TYPE_DATA:
195 return rb_event_data_length(event);
196 default:
197 BUG();
198 }
199 /* not hit */
200 return 0;
201 }
202
203 /*
204 * Return total length of time extend and data,
205 * or just the event length for all other events.
206 */
207 static inline unsigned
208 rb_event_ts_length(struct ring_buffer_event *event)
209 {
210 unsigned len = 0;
211
212 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
213 /* time extends include the data event after it */
214 len = RB_LEN_TIME_EXTEND;
215 event = skip_time_extend(event);
216 }
217 return len + rb_event_length(event);
218 }
219
220 /**
221 * ring_buffer_event_length - return the length of the event
222 * @event: the event to get the length of
223 *
224 * Returns the size of the data load of a data event.
225 * If the event is something other than a data event, it
226 * returns the size of the event itself. With the exception
227 * of a TIME EXTEND, where it still returns the size of the
228 * data load of the data event after it.
229 */
230 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
231 {
232 unsigned length;
233
234 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
235 event = skip_time_extend(event);
236
237 length = rb_event_length(event);
238 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
239 return length;
240 length -= RB_EVNT_HDR_SIZE;
241 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
242 length -= sizeof(event->array[0]);
243 return length;
244 }
245 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
246
247 /* inline for ring buffer fast paths */
248 static __always_inline void *
249 rb_event_data(struct ring_buffer_event *event)
250 {
251 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
252 event = skip_time_extend(event);
253 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
254 /* If length is in len field, then array[0] has the data */
255 if (event->type_len)
256 return (void *)&event->array[0];
257 /* Otherwise length is in array[0] and array[1] has the data */
258 return (void *)&event->array[1];
259 }
260
261 /**
262 * ring_buffer_event_data - return the data of the event
263 * @event: the event to get the data from
264 */
265 void *ring_buffer_event_data(struct ring_buffer_event *event)
266 {
267 return rb_event_data(event);
268 }
269 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
270
271 #define for_each_buffer_cpu(buffer, cpu) \
272 for_each_cpu(cpu, buffer->cpumask)
273
274 #define TS_SHIFT 27
275 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
276 #define TS_DELTA_TEST (~TS_MASK)
277
278 /* Flag when events were overwritten */
279 #define RB_MISSED_EVENTS (1 << 31)
280 /* Missed count stored at end */
281 #define RB_MISSED_STORED (1 << 30)
282
283 struct buffer_data_page {
284 u64 time_stamp; /* page time stamp */
285 local_t commit; /* write committed index */
286 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
287 };
288
289 /*
290 * Note, the buffer_page list must be first. The buffer pages
291 * are allocated in cache lines, which means that each buffer
292 * page will be at the beginning of a cache line, and thus
293 * the least significant bits will be zero. We use this to
294 * add flags in the list struct pointers, to make the ring buffer
295 * lockless.
296 */
297 struct buffer_page {
298 struct list_head list; /* list of buffer pages */
299 local_t write; /* index for next write */
300 unsigned read; /* index for next read */
301 local_t entries; /* entries on this page */
302 unsigned long real_end; /* real end of data */
303 struct buffer_data_page *page; /* Actual data page */
304 };
305
306 /*
307 * The buffer page counters, write and entries, must be reset
308 * atomically when crossing page boundaries. To synchronize this
309 * update, two counters are inserted into the number. One is
310 * the actual counter for the write position or count on the page.
311 *
312 * The other is a counter of updaters. Before an update happens
313 * the update partition of the counter is incremented. This will
314 * allow the updater to update the counter atomically.
315 *
316 * The counter is 20 bits, and the state data is 12.
317 */
318 #define RB_WRITE_MASK 0xfffff
319 #define RB_WRITE_INTCNT (1 << 20)
320
321 static void rb_init_page(struct buffer_data_page *bpage)
322 {
323 local_set(&bpage->commit, 0);
324 }
325
326 /**
327 * ring_buffer_page_len - the size of data on the page.
328 * @page: The page to read
329 *
330 * Returns the amount of data on the page, including buffer page header.
331 */
332 size_t ring_buffer_page_len(void *page)
333 {
334 return local_read(&((struct buffer_data_page *)page)->commit)
335 + BUF_PAGE_HDR_SIZE;
336 }
337
338 /*
339 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
340 * this issue out.
341 */
342 static void free_buffer_page(struct buffer_page *bpage)
343 {
344 free_page((unsigned long)bpage->page);
345 kfree(bpage);
346 }
347
348 /*
349 * We need to fit the time_stamp delta into 27 bits.
350 */
351 static inline int test_time_stamp(u64 delta)
352 {
353 if (delta & TS_DELTA_TEST)
354 return 1;
355 return 0;
356 }
357
358 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
359
360 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
361 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
362
363 int ring_buffer_print_page_header(struct trace_seq *s)
364 {
365 struct buffer_data_page field;
366
367 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
368 "offset:0;\tsize:%u;\tsigned:%u;\n",
369 (unsigned int)sizeof(field.time_stamp),
370 (unsigned int)is_signed_type(u64));
371
372 trace_seq_printf(s, "\tfield: local_t commit;\t"
373 "offset:%u;\tsize:%u;\tsigned:%u;\n",
374 (unsigned int)offsetof(typeof(field), commit),
375 (unsigned int)sizeof(field.commit),
376 (unsigned int)is_signed_type(long));
377
378 trace_seq_printf(s, "\tfield: int overwrite;\t"
379 "offset:%u;\tsize:%u;\tsigned:%u;\n",
380 (unsigned int)offsetof(typeof(field), commit),
381 1,
382 (unsigned int)is_signed_type(long));
383
384 trace_seq_printf(s, "\tfield: char data;\t"
385 "offset:%u;\tsize:%u;\tsigned:%u;\n",
386 (unsigned int)offsetof(typeof(field), data),
387 (unsigned int)BUF_PAGE_SIZE,
388 (unsigned int)is_signed_type(char));
389
390 return !trace_seq_has_overflowed(s);
391 }
392
393 struct rb_irq_work {
394 struct irq_work work;
395 wait_queue_head_t waiters;
396 wait_queue_head_t full_waiters;
397 bool waiters_pending;
398 bool full_waiters_pending;
399 bool wakeup_full;
400 };
401
402 /*
403 * Structure to hold event state and handle nested events.
404 */
405 struct rb_event_info {
406 u64 ts;
407 u64 delta;
408 unsigned long length;
409 struct buffer_page *tail_page;
410 int add_timestamp;
411 };
412
413 /*
414 * Used for which event context the event is in.
415 * NMI = 0
416 * IRQ = 1
417 * SOFTIRQ = 2
418 * NORMAL = 3
419 *
420 * See trace_recursive_lock() comment below for more details.
421 */
422 enum {
423 RB_CTX_NMI,
424 RB_CTX_IRQ,
425 RB_CTX_SOFTIRQ,
426 RB_CTX_NORMAL,
427 RB_CTX_MAX
428 };
429
430 /*
431 * head_page == tail_page && head == tail then buffer is empty.
432 */
433 struct ring_buffer_per_cpu {
434 int cpu;
435 atomic_t record_disabled;
436 struct ring_buffer *buffer;
437 raw_spinlock_t reader_lock; /* serialize readers */
438 arch_spinlock_t lock;
439 struct lock_class_key lock_key;
440 struct buffer_data_page *free_page;
441 unsigned long nr_pages;
442 unsigned int current_context;
443 struct list_head *pages;
444 struct buffer_page *head_page; /* read from head */
445 struct buffer_page *tail_page; /* write to tail */
446 struct buffer_page *commit_page; /* committed pages */
447 struct buffer_page *reader_page;
448 unsigned long lost_events;
449 unsigned long last_overrun;
450 local_t entries_bytes;
451 local_t entries;
452 local_t overrun;
453 local_t commit_overrun;
454 local_t dropped_events;
455 local_t committing;
456 local_t commits;
457 unsigned long read;
458 unsigned long read_bytes;
459 u64 write_stamp;
460 u64 read_stamp;
461 /* ring buffer pages to update, > 0 to add, < 0 to remove */
462 long nr_pages_to_update;
463 struct list_head new_pages; /* new pages to add */
464 struct work_struct update_pages_work;
465 struct completion update_done;
466
467 struct rb_irq_work irq_work;
468 };
469
470 struct ring_buffer {
471 unsigned flags;
472 int cpus;
473 atomic_t record_disabled;
474 atomic_t resize_disabled;
475 cpumask_var_t cpumask;
476
477 struct lock_class_key *reader_lock_key;
478
479 struct mutex mutex;
480
481 struct ring_buffer_per_cpu **buffers;
482
483 struct hlist_node node;
484 u64 (*clock)(void);
485
486 struct rb_irq_work irq_work;
487 };
488
489 struct ring_buffer_iter {
490 struct ring_buffer_per_cpu *cpu_buffer;
491 unsigned long head;
492 struct buffer_page *head_page;
493 struct buffer_page *cache_reader_page;
494 unsigned long cache_read;
495 u64 read_stamp;
496 };
497
498 /*
499 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
500 *
501 * Schedules a delayed work to wake up any task that is blocked on the
502 * ring buffer waiters queue.
503 */
504 static void rb_wake_up_waiters(struct irq_work *work)
505 {
506 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
507
508 wake_up_all(&rbwork->waiters);
509 if (rbwork->wakeup_full) {
510 rbwork->wakeup_full = false;
511 wake_up_all(&rbwork->full_waiters);
512 }
513 }
514
515 /**
516 * ring_buffer_wait - wait for input to the ring buffer
517 * @buffer: buffer to wait on
518 * @cpu: the cpu buffer to wait on
519 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
520 *
521 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
522 * as data is added to any of the @buffer's cpu buffers. Otherwise
523 * it will wait for data to be added to a specific cpu buffer.
524 */
525 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
526 {
527 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
528 DEFINE_WAIT(wait);
529 struct rb_irq_work *work;
530 int ret = 0;
531
532 /*
533 * Depending on what the caller is waiting for, either any
534 * data in any cpu buffer, or a specific buffer, put the
535 * caller on the appropriate wait queue.
536 */
537 if (cpu == RING_BUFFER_ALL_CPUS) {
538 work = &buffer->irq_work;
539 /* Full only makes sense on per cpu reads */
540 full = false;
541 } else {
542 if (!cpumask_test_cpu(cpu, buffer->cpumask))
543 return -ENODEV;
544 cpu_buffer = buffer->buffers[cpu];
545 work = &cpu_buffer->irq_work;
546 }
547
548
549 while (true) {
550 if (full)
551 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
552 else
553 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
554
555 /*
556 * The events can happen in critical sections where
557 * checking a work queue can cause deadlocks.
558 * After adding a task to the queue, this flag is set
559 * only to notify events to try to wake up the queue
560 * using irq_work.
561 *
562 * We don't clear it even if the buffer is no longer
563 * empty. The flag only causes the next event to run
564 * irq_work to do the work queue wake up. The worse
565 * that can happen if we race with !trace_empty() is that
566 * an event will cause an irq_work to try to wake up
567 * an empty queue.
568 *
569 * There's no reason to protect this flag either, as
570 * the work queue and irq_work logic will do the necessary
571 * synchronization for the wake ups. The only thing
572 * that is necessary is that the wake up happens after
573 * a task has been queued. It's OK for spurious wake ups.
574 */
575 if (full)
576 work->full_waiters_pending = true;
577 else
578 work->waiters_pending = true;
579
580 if (signal_pending(current)) {
581 ret = -EINTR;
582 break;
583 }
584
585 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
586 break;
587
588 if (cpu != RING_BUFFER_ALL_CPUS &&
589 !ring_buffer_empty_cpu(buffer, cpu)) {
590 unsigned long flags;
591 bool pagebusy;
592
593 if (!full)
594 break;
595
596 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
597 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
598 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
599
600 if (!pagebusy)
601 break;
602 }
603
604 schedule();
605 }
606
607 if (full)
608 finish_wait(&work->full_waiters, &wait);
609 else
610 finish_wait(&work->waiters, &wait);
611
612 return ret;
613 }
614
615 /**
616 * ring_buffer_poll_wait - poll on buffer input
617 * @buffer: buffer to wait on
618 * @cpu: the cpu buffer to wait on
619 * @filp: the file descriptor
620 * @poll_table: The poll descriptor
621 *
622 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
623 * as data is added to any of the @buffer's cpu buffers. Otherwise
624 * it will wait for data to be added to a specific cpu buffer.
625 *
626 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
627 * zero otherwise.
628 */
629 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
630 struct file *filp, poll_table *poll_table)
631 {
632 struct ring_buffer_per_cpu *cpu_buffer;
633 struct rb_irq_work *work;
634
635 if (cpu == RING_BUFFER_ALL_CPUS)
636 work = &buffer->irq_work;
637 else {
638 if (!cpumask_test_cpu(cpu, buffer->cpumask))
639 return -EINVAL;
640
641 cpu_buffer = buffer->buffers[cpu];
642 work = &cpu_buffer->irq_work;
643 }
644
645 poll_wait(filp, &work->waiters, poll_table);
646 work->waiters_pending = true;
647 /*
648 * There's a tight race between setting the waiters_pending and
649 * checking if the ring buffer is empty. Once the waiters_pending bit
650 * is set, the next event will wake the task up, but we can get stuck
651 * if there's only a single event in.
652 *
653 * FIXME: Ideally, we need a memory barrier on the writer side as well,
654 * but adding a memory barrier to all events will cause too much of a
655 * performance hit in the fast path. We only need a memory barrier when
656 * the buffer goes from empty to having content. But as this race is
657 * extremely small, and it's not a problem if another event comes in, we
658 * will fix it later.
659 */
660 smp_mb();
661
662 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
663 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
664 return POLLIN | POLLRDNORM;
665 return 0;
666 }
667
668 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
669 #define RB_WARN_ON(b, cond) \
670 ({ \
671 int _____ret = unlikely(cond); \
672 if (_____ret) { \
673 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
674 struct ring_buffer_per_cpu *__b = \
675 (void *)b; \
676 atomic_inc(&__b->buffer->record_disabled); \
677 } else \
678 atomic_inc(&b->record_disabled); \
679 WARN_ON(1); \
680 } \
681 _____ret; \
682 })
683
684 /* Up this if you want to test the TIME_EXTENTS and normalization */
685 #define DEBUG_SHIFT 0
686
687 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
688 {
689 /* shift to debug/test normalization and TIME_EXTENTS */
690 return buffer->clock() << DEBUG_SHIFT;
691 }
692
693 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
694 {
695 u64 time;
696
697 preempt_disable_notrace();
698 time = rb_time_stamp(buffer);
699 preempt_enable_no_resched_notrace();
700
701 return time;
702 }
703 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
704
705 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
706 int cpu, u64 *ts)
707 {
708 /* Just stupid testing the normalize function and deltas */
709 *ts >>= DEBUG_SHIFT;
710 }
711 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
712
713 /*
714 * Making the ring buffer lockless makes things tricky.
715 * Although writes only happen on the CPU that they are on,
716 * and they only need to worry about interrupts. Reads can
717 * happen on any CPU.
718 *
719 * The reader page is always off the ring buffer, but when the
720 * reader finishes with a page, it needs to swap its page with
721 * a new one from the buffer. The reader needs to take from
722 * the head (writes go to the tail). But if a writer is in overwrite
723 * mode and wraps, it must push the head page forward.
724 *
725 * Here lies the problem.
726 *
727 * The reader must be careful to replace only the head page, and
728 * not another one. As described at the top of the file in the
729 * ASCII art, the reader sets its old page to point to the next
730 * page after head. It then sets the page after head to point to
731 * the old reader page. But if the writer moves the head page
732 * during this operation, the reader could end up with the tail.
733 *
734 * We use cmpxchg to help prevent this race. We also do something
735 * special with the page before head. We set the LSB to 1.
736 *
737 * When the writer must push the page forward, it will clear the
738 * bit that points to the head page, move the head, and then set
739 * the bit that points to the new head page.
740 *
741 * We also don't want an interrupt coming in and moving the head
742 * page on another writer. Thus we use the second LSB to catch
743 * that too. Thus:
744 *
745 * head->list->prev->next bit 1 bit 0
746 * ------- -------
747 * Normal page 0 0
748 * Points to head page 0 1
749 * New head page 1 0
750 *
751 * Note we can not trust the prev pointer of the head page, because:
752 *
753 * +----+ +-----+ +-----+
754 * | |------>| T |---X--->| N |
755 * | |<------| | | |
756 * +----+ +-----+ +-----+
757 * ^ ^ |
758 * | +-----+ | |
759 * +----------| R |----------+ |
760 * | |<-----------+
761 * +-----+
762 *
763 * Key: ---X--> HEAD flag set in pointer
764 * T Tail page
765 * R Reader page
766 * N Next page
767 *
768 * (see __rb_reserve_next() to see where this happens)
769 *
770 * What the above shows is that the reader just swapped out
771 * the reader page with a page in the buffer, but before it
772 * could make the new header point back to the new page added
773 * it was preempted by a writer. The writer moved forward onto
774 * the new page added by the reader and is about to move forward
775 * again.
776 *
777 * You can see, it is legitimate for the previous pointer of
778 * the head (or any page) not to point back to itself. But only
779 * temporarially.
780 */
781
782 #define RB_PAGE_NORMAL 0UL
783 #define RB_PAGE_HEAD 1UL
784 #define RB_PAGE_UPDATE 2UL
785
786
787 #define RB_FLAG_MASK 3UL
788
789 /* PAGE_MOVED is not part of the mask */
790 #define RB_PAGE_MOVED 4UL
791
792 /*
793 * rb_list_head - remove any bit
794 */
795 static struct list_head *rb_list_head(struct list_head *list)
796 {
797 unsigned long val = (unsigned long)list;
798
799 return (struct list_head *)(val & ~RB_FLAG_MASK);
800 }
801
802 /*
803 * rb_is_head_page - test if the given page is the head page
804 *
805 * Because the reader may move the head_page pointer, we can
806 * not trust what the head page is (it may be pointing to
807 * the reader page). But if the next page is a header page,
808 * its flags will be non zero.
809 */
810 static inline int
811 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
812 struct buffer_page *page, struct list_head *list)
813 {
814 unsigned long val;
815
816 val = (unsigned long)list->next;
817
818 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
819 return RB_PAGE_MOVED;
820
821 return val & RB_FLAG_MASK;
822 }
823
824 /*
825 * rb_is_reader_page
826 *
827 * The unique thing about the reader page, is that, if the
828 * writer is ever on it, the previous pointer never points
829 * back to the reader page.
830 */
831 static bool rb_is_reader_page(struct buffer_page *page)
832 {
833 struct list_head *list = page->list.prev;
834
835 return rb_list_head(list->next) != &page->list;
836 }
837
838 /*
839 * rb_set_list_to_head - set a list_head to be pointing to head.
840 */
841 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
842 struct list_head *list)
843 {
844 unsigned long *ptr;
845
846 ptr = (unsigned long *)&list->next;
847 *ptr |= RB_PAGE_HEAD;
848 *ptr &= ~RB_PAGE_UPDATE;
849 }
850
851 /*
852 * rb_head_page_activate - sets up head page
853 */
854 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
855 {
856 struct buffer_page *head;
857
858 head = cpu_buffer->head_page;
859 if (!head)
860 return;
861
862 /*
863 * Set the previous list pointer to have the HEAD flag.
864 */
865 rb_set_list_to_head(cpu_buffer, head->list.prev);
866 }
867
868 static void rb_list_head_clear(struct list_head *list)
869 {
870 unsigned long *ptr = (unsigned long *)&list->next;
871
872 *ptr &= ~RB_FLAG_MASK;
873 }
874
875 /*
876 * rb_head_page_dactivate - clears head page ptr (for free list)
877 */
878 static void
879 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
880 {
881 struct list_head *hd;
882
883 /* Go through the whole list and clear any pointers found. */
884 rb_list_head_clear(cpu_buffer->pages);
885
886 list_for_each(hd, cpu_buffer->pages)
887 rb_list_head_clear(hd);
888 }
889
890 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
891 struct buffer_page *head,
892 struct buffer_page *prev,
893 int old_flag, int new_flag)
894 {
895 struct list_head *list;
896 unsigned long val = (unsigned long)&head->list;
897 unsigned long ret;
898
899 list = &prev->list;
900
901 val &= ~RB_FLAG_MASK;
902
903 ret = cmpxchg((unsigned long *)&list->next,
904 val | old_flag, val | new_flag);
905
906 /* check if the reader took the page */
907 if ((ret & ~RB_FLAG_MASK) != val)
908 return RB_PAGE_MOVED;
909
910 return ret & RB_FLAG_MASK;
911 }
912
913 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
914 struct buffer_page *head,
915 struct buffer_page *prev,
916 int old_flag)
917 {
918 return rb_head_page_set(cpu_buffer, head, prev,
919 old_flag, RB_PAGE_UPDATE);
920 }
921
922 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
923 struct buffer_page *head,
924 struct buffer_page *prev,
925 int old_flag)
926 {
927 return rb_head_page_set(cpu_buffer, head, prev,
928 old_flag, RB_PAGE_HEAD);
929 }
930
931 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
932 struct buffer_page *head,
933 struct buffer_page *prev,
934 int old_flag)
935 {
936 return rb_head_page_set(cpu_buffer, head, prev,
937 old_flag, RB_PAGE_NORMAL);
938 }
939
940 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
941 struct buffer_page **bpage)
942 {
943 struct list_head *p = rb_list_head((*bpage)->list.next);
944
945 *bpage = list_entry(p, struct buffer_page, list);
946 }
947
948 static struct buffer_page *
949 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
950 {
951 struct buffer_page *head;
952 struct buffer_page *page;
953 struct list_head *list;
954 int i;
955
956 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
957 return NULL;
958
959 /* sanity check */
960 list = cpu_buffer->pages;
961 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
962 return NULL;
963
964 page = head = cpu_buffer->head_page;
965 /*
966 * It is possible that the writer moves the header behind
967 * where we started, and we miss in one loop.
968 * A second loop should grab the header, but we'll do
969 * three loops just because I'm paranoid.
970 */
971 for (i = 0; i < 3; i++) {
972 do {
973 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
974 cpu_buffer->head_page = page;
975 return page;
976 }
977 rb_inc_page(cpu_buffer, &page);
978 } while (page != head);
979 }
980
981 RB_WARN_ON(cpu_buffer, 1);
982
983 return NULL;
984 }
985
986 static int rb_head_page_replace(struct buffer_page *old,
987 struct buffer_page *new)
988 {
989 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
990 unsigned long val;
991 unsigned long ret;
992
993 val = *ptr & ~RB_FLAG_MASK;
994 val |= RB_PAGE_HEAD;
995
996 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
997
998 return ret == val;
999 }
1000
1001 /*
1002 * rb_tail_page_update - move the tail page forward
1003 */
1004 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1005 struct buffer_page *tail_page,
1006 struct buffer_page *next_page)
1007 {
1008 unsigned long old_entries;
1009 unsigned long old_write;
1010
1011 /*
1012 * The tail page now needs to be moved forward.
1013 *
1014 * We need to reset the tail page, but without messing
1015 * with possible erasing of data brought in by interrupts
1016 * that have moved the tail page and are currently on it.
1017 *
1018 * We add a counter to the write field to denote this.
1019 */
1020 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1021 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1022
1023 /*
1024 * Just make sure we have seen our old_write and synchronize
1025 * with any interrupts that come in.
1026 */
1027 barrier();
1028
1029 /*
1030 * If the tail page is still the same as what we think
1031 * it is, then it is up to us to update the tail
1032 * pointer.
1033 */
1034 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1035 /* Zero the write counter */
1036 unsigned long val = old_write & ~RB_WRITE_MASK;
1037 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1038
1039 /*
1040 * This will only succeed if an interrupt did
1041 * not come in and change it. In which case, we
1042 * do not want to modify it.
1043 *
1044 * We add (void) to let the compiler know that we do not care
1045 * about the return value of these functions. We use the
1046 * cmpxchg to only update if an interrupt did not already
1047 * do it for us. If the cmpxchg fails, we don't care.
1048 */
1049 (void)local_cmpxchg(&next_page->write, old_write, val);
1050 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1051
1052 /*
1053 * No need to worry about races with clearing out the commit.
1054 * it only can increment when a commit takes place. But that
1055 * only happens in the outer most nested commit.
1056 */
1057 local_set(&next_page->page->commit, 0);
1058
1059 /* Again, either we update tail_page or an interrupt does */
1060 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1061 }
1062 }
1063
1064 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1065 struct buffer_page *bpage)
1066 {
1067 unsigned long val = (unsigned long)bpage;
1068
1069 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1070 return 1;
1071
1072 return 0;
1073 }
1074
1075 /**
1076 * rb_check_list - make sure a pointer to a list has the last bits zero
1077 */
1078 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1079 struct list_head *list)
1080 {
1081 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1082 return 1;
1083 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1084 return 1;
1085 return 0;
1086 }
1087
1088 /**
1089 * rb_check_pages - integrity check of buffer pages
1090 * @cpu_buffer: CPU buffer with pages to test
1091 *
1092 * As a safety measure we check to make sure the data pages have not
1093 * been corrupted.
1094 */
1095 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1096 {
1097 struct list_head *head = cpu_buffer->pages;
1098 struct buffer_page *bpage, *tmp;
1099
1100 /* Reset the head page if it exists */
1101 if (cpu_buffer->head_page)
1102 rb_set_head_page(cpu_buffer);
1103
1104 rb_head_page_deactivate(cpu_buffer);
1105
1106 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1107 return -1;
1108 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1109 return -1;
1110
1111 if (rb_check_list(cpu_buffer, head))
1112 return -1;
1113
1114 list_for_each_entry_safe(bpage, tmp, head, list) {
1115 if (RB_WARN_ON(cpu_buffer,
1116 bpage->list.next->prev != &bpage->list))
1117 return -1;
1118 if (RB_WARN_ON(cpu_buffer,
1119 bpage->list.prev->next != &bpage->list))
1120 return -1;
1121 if (rb_check_list(cpu_buffer, &bpage->list))
1122 return -1;
1123 }
1124
1125 rb_head_page_activate(cpu_buffer);
1126
1127 return 0;
1128 }
1129
1130 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1131 {
1132 struct buffer_page *bpage, *tmp;
1133 long i;
1134
1135 for (i = 0; i < nr_pages; i++) {
1136 struct page *page;
1137 /*
1138 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1139 * gracefully without invoking oom-killer and the system is not
1140 * destabilized.
1141 */
1142 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1143 GFP_KERNEL | __GFP_RETRY_MAYFAIL,
1144 cpu_to_node(cpu));
1145 if (!bpage)
1146 goto free_pages;
1147
1148 list_add(&bpage->list, pages);
1149
1150 page = alloc_pages_node(cpu_to_node(cpu),
1151 GFP_KERNEL | __GFP_RETRY_MAYFAIL, 0);
1152 if (!page)
1153 goto free_pages;
1154 bpage->page = page_address(page);
1155 rb_init_page(bpage->page);
1156 }
1157
1158 return 0;
1159
1160 free_pages:
1161 list_for_each_entry_safe(bpage, tmp, pages, list) {
1162 list_del_init(&bpage->list);
1163 free_buffer_page(bpage);
1164 }
1165
1166 return -ENOMEM;
1167 }
1168
1169 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1170 unsigned long nr_pages)
1171 {
1172 LIST_HEAD(pages);
1173
1174 WARN_ON(!nr_pages);
1175
1176 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1177 return -ENOMEM;
1178
1179 /*
1180 * The ring buffer page list is a circular list that does not
1181 * start and end with a list head. All page list items point to
1182 * other pages.
1183 */
1184 cpu_buffer->pages = pages.next;
1185 list_del(&pages);
1186
1187 cpu_buffer->nr_pages = nr_pages;
1188
1189 rb_check_pages(cpu_buffer);
1190
1191 return 0;
1192 }
1193
1194 static struct ring_buffer_per_cpu *
1195 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1196 {
1197 struct ring_buffer_per_cpu *cpu_buffer;
1198 struct buffer_page *bpage;
1199 struct page *page;
1200 int ret;
1201
1202 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1203 GFP_KERNEL, cpu_to_node(cpu));
1204 if (!cpu_buffer)
1205 return NULL;
1206
1207 cpu_buffer->cpu = cpu;
1208 cpu_buffer->buffer = buffer;
1209 raw_spin_lock_init(&cpu_buffer->reader_lock);
1210 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1211 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1212 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1213 init_completion(&cpu_buffer->update_done);
1214 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1215 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1216 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1217
1218 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1219 GFP_KERNEL, cpu_to_node(cpu));
1220 if (!bpage)
1221 goto fail_free_buffer;
1222
1223 rb_check_bpage(cpu_buffer, bpage);
1224
1225 cpu_buffer->reader_page = bpage;
1226 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1227 if (!page)
1228 goto fail_free_reader;
1229 bpage->page = page_address(page);
1230 rb_init_page(bpage->page);
1231
1232 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1233 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1234
1235 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1236 if (ret < 0)
1237 goto fail_free_reader;
1238
1239 cpu_buffer->head_page
1240 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1241 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1242
1243 rb_head_page_activate(cpu_buffer);
1244
1245 return cpu_buffer;
1246
1247 fail_free_reader:
1248 free_buffer_page(cpu_buffer->reader_page);
1249
1250 fail_free_buffer:
1251 kfree(cpu_buffer);
1252 return NULL;
1253 }
1254
1255 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1256 {
1257 struct list_head *head = cpu_buffer->pages;
1258 struct buffer_page *bpage, *tmp;
1259
1260 free_buffer_page(cpu_buffer->reader_page);
1261
1262 rb_head_page_deactivate(cpu_buffer);
1263
1264 if (head) {
1265 list_for_each_entry_safe(bpage, tmp, head, list) {
1266 list_del_init(&bpage->list);
1267 free_buffer_page(bpage);
1268 }
1269 bpage = list_entry(head, struct buffer_page, list);
1270 free_buffer_page(bpage);
1271 }
1272
1273 kfree(cpu_buffer);
1274 }
1275
1276 /**
1277 * __ring_buffer_alloc - allocate a new ring_buffer
1278 * @size: the size in bytes per cpu that is needed.
1279 * @flags: attributes to set for the ring buffer.
1280 *
1281 * Currently the only flag that is available is the RB_FL_OVERWRITE
1282 * flag. This flag means that the buffer will overwrite old data
1283 * when the buffer wraps. If this flag is not set, the buffer will
1284 * drop data when the tail hits the head.
1285 */
1286 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1287 struct lock_class_key *key)
1288 {
1289 struct ring_buffer *buffer;
1290 long nr_pages;
1291 int bsize;
1292 int cpu;
1293 int ret;
1294
1295 /* keep it in its own cache line */
1296 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1297 GFP_KERNEL);
1298 if (!buffer)
1299 return NULL;
1300
1301 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1302 goto fail_free_buffer;
1303
1304 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1305 buffer->flags = flags;
1306 buffer->clock = trace_clock_local;
1307 buffer->reader_lock_key = key;
1308
1309 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1310 init_waitqueue_head(&buffer->irq_work.waiters);
1311
1312 /* need at least two pages */
1313 if (nr_pages < 2)
1314 nr_pages = 2;
1315
1316 buffer->cpus = nr_cpu_ids;
1317
1318 bsize = sizeof(void *) * nr_cpu_ids;
1319 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1320 GFP_KERNEL);
1321 if (!buffer->buffers)
1322 goto fail_free_cpumask;
1323
1324 cpu = raw_smp_processor_id();
1325 cpumask_set_cpu(cpu, buffer->cpumask);
1326 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1327 if (!buffer->buffers[cpu])
1328 goto fail_free_buffers;
1329
1330 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1331 if (ret < 0)
1332 goto fail_free_buffers;
1333
1334 mutex_init(&buffer->mutex);
1335
1336 return buffer;
1337
1338 fail_free_buffers:
1339 for_each_buffer_cpu(buffer, cpu) {
1340 if (buffer->buffers[cpu])
1341 rb_free_cpu_buffer(buffer->buffers[cpu]);
1342 }
1343 kfree(buffer->buffers);
1344
1345 fail_free_cpumask:
1346 free_cpumask_var(buffer->cpumask);
1347
1348 fail_free_buffer:
1349 kfree(buffer);
1350 return NULL;
1351 }
1352 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1353
1354 /**
1355 * ring_buffer_free - free a ring buffer.
1356 * @buffer: the buffer to free.
1357 */
1358 void
1359 ring_buffer_free(struct ring_buffer *buffer)
1360 {
1361 int cpu;
1362
1363 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1364
1365 for_each_buffer_cpu(buffer, cpu)
1366 rb_free_cpu_buffer(buffer->buffers[cpu]);
1367
1368 kfree(buffer->buffers);
1369 free_cpumask_var(buffer->cpumask);
1370
1371 kfree(buffer);
1372 }
1373 EXPORT_SYMBOL_GPL(ring_buffer_free);
1374
1375 void ring_buffer_set_clock(struct ring_buffer *buffer,
1376 u64 (*clock)(void))
1377 {
1378 buffer->clock = clock;
1379 }
1380
1381 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1382
1383 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1384 {
1385 return local_read(&bpage->entries) & RB_WRITE_MASK;
1386 }
1387
1388 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1389 {
1390 return local_read(&bpage->write) & RB_WRITE_MASK;
1391 }
1392
1393 static int
1394 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1395 {
1396 struct list_head *tail_page, *to_remove, *next_page;
1397 struct buffer_page *to_remove_page, *tmp_iter_page;
1398 struct buffer_page *last_page, *first_page;
1399 unsigned long nr_removed;
1400 unsigned long head_bit;
1401 int page_entries;
1402
1403 head_bit = 0;
1404
1405 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1406 atomic_inc(&cpu_buffer->record_disabled);
1407 /*
1408 * We don't race with the readers since we have acquired the reader
1409 * lock. We also don't race with writers after disabling recording.
1410 * This makes it easy to figure out the first and the last page to be
1411 * removed from the list. We unlink all the pages in between including
1412 * the first and last pages. This is done in a busy loop so that we
1413 * lose the least number of traces.
1414 * The pages are freed after we restart recording and unlock readers.
1415 */
1416 tail_page = &cpu_buffer->tail_page->list;
1417
1418 /*
1419 * tail page might be on reader page, we remove the next page
1420 * from the ring buffer
1421 */
1422 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1423 tail_page = rb_list_head(tail_page->next);
1424 to_remove = tail_page;
1425
1426 /* start of pages to remove */
1427 first_page = list_entry(rb_list_head(to_remove->next),
1428 struct buffer_page, list);
1429
1430 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1431 to_remove = rb_list_head(to_remove)->next;
1432 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1433 }
1434
1435 next_page = rb_list_head(to_remove)->next;
1436
1437 /*
1438 * Now we remove all pages between tail_page and next_page.
1439 * Make sure that we have head_bit value preserved for the
1440 * next page
1441 */
1442 tail_page->next = (struct list_head *)((unsigned long)next_page |
1443 head_bit);
1444 next_page = rb_list_head(next_page);
1445 next_page->prev = tail_page;
1446
1447 /* make sure pages points to a valid page in the ring buffer */
1448 cpu_buffer->pages = next_page;
1449
1450 /* update head page */
1451 if (head_bit)
1452 cpu_buffer->head_page = list_entry(next_page,
1453 struct buffer_page, list);
1454
1455 /*
1456 * change read pointer to make sure any read iterators reset
1457 * themselves
1458 */
1459 cpu_buffer->read = 0;
1460
1461 /* pages are removed, resume tracing and then free the pages */
1462 atomic_dec(&cpu_buffer->record_disabled);
1463 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1464
1465 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1466
1467 /* last buffer page to remove */
1468 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1469 list);
1470 tmp_iter_page = first_page;
1471
1472 do {
1473 to_remove_page = tmp_iter_page;
1474 rb_inc_page(cpu_buffer, &tmp_iter_page);
1475
1476 /* update the counters */
1477 page_entries = rb_page_entries(to_remove_page);
1478 if (page_entries) {
1479 /*
1480 * If something was added to this page, it was full
1481 * since it is not the tail page. So we deduct the
1482 * bytes consumed in ring buffer from here.
1483 * Increment overrun to account for the lost events.
1484 */
1485 local_add(page_entries, &cpu_buffer->overrun);
1486 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1487 }
1488
1489 /*
1490 * We have already removed references to this list item, just
1491 * free up the buffer_page and its page
1492 */
1493 free_buffer_page(to_remove_page);
1494 nr_removed--;
1495
1496 } while (to_remove_page != last_page);
1497
1498 RB_WARN_ON(cpu_buffer, nr_removed);
1499
1500 return nr_removed == 0;
1501 }
1502
1503 static int
1504 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1505 {
1506 struct list_head *pages = &cpu_buffer->new_pages;
1507 int retries, success;
1508
1509 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1510 /*
1511 * We are holding the reader lock, so the reader page won't be swapped
1512 * in the ring buffer. Now we are racing with the writer trying to
1513 * move head page and the tail page.
1514 * We are going to adapt the reader page update process where:
1515 * 1. We first splice the start and end of list of new pages between
1516 * the head page and its previous page.
1517 * 2. We cmpxchg the prev_page->next to point from head page to the
1518 * start of new pages list.
1519 * 3. Finally, we update the head->prev to the end of new list.
1520 *
1521 * We will try this process 10 times, to make sure that we don't keep
1522 * spinning.
1523 */
1524 retries = 10;
1525 success = 0;
1526 while (retries--) {
1527 struct list_head *head_page, *prev_page, *r;
1528 struct list_head *last_page, *first_page;
1529 struct list_head *head_page_with_bit;
1530
1531 head_page = &rb_set_head_page(cpu_buffer)->list;
1532 if (!head_page)
1533 break;
1534 prev_page = head_page->prev;
1535
1536 first_page = pages->next;
1537 last_page = pages->prev;
1538
1539 head_page_with_bit = (struct list_head *)
1540 ((unsigned long)head_page | RB_PAGE_HEAD);
1541
1542 last_page->next = head_page_with_bit;
1543 first_page->prev = prev_page;
1544
1545 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1546
1547 if (r == head_page_with_bit) {
1548 /*
1549 * yay, we replaced the page pointer to our new list,
1550 * now, we just have to update to head page's prev
1551 * pointer to point to end of list
1552 */
1553 head_page->prev = last_page;
1554 success = 1;
1555 break;
1556 }
1557 }
1558
1559 if (success)
1560 INIT_LIST_HEAD(pages);
1561 /*
1562 * If we weren't successful in adding in new pages, warn and stop
1563 * tracing
1564 */
1565 RB_WARN_ON(cpu_buffer, !success);
1566 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1567
1568 /* free pages if they weren't inserted */
1569 if (!success) {
1570 struct buffer_page *bpage, *tmp;
1571 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1572 list) {
1573 list_del_init(&bpage->list);
1574 free_buffer_page(bpage);
1575 }
1576 }
1577 return success;
1578 }
1579
1580 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1581 {
1582 int success;
1583
1584 if (cpu_buffer->nr_pages_to_update > 0)
1585 success = rb_insert_pages(cpu_buffer);
1586 else
1587 success = rb_remove_pages(cpu_buffer,
1588 -cpu_buffer->nr_pages_to_update);
1589
1590 if (success)
1591 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1592 }
1593
1594 static void update_pages_handler(struct work_struct *work)
1595 {
1596 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1597 struct ring_buffer_per_cpu, update_pages_work);
1598 rb_update_pages(cpu_buffer);
1599 complete(&cpu_buffer->update_done);
1600 }
1601
1602 /**
1603 * ring_buffer_resize - resize the ring buffer
1604 * @buffer: the buffer to resize.
1605 * @size: the new size.
1606 * @cpu_id: the cpu buffer to resize
1607 *
1608 * Minimum size is 2 * BUF_PAGE_SIZE.
1609 *
1610 * Returns 0 on success and < 0 on failure.
1611 */
1612 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1613 int cpu_id)
1614 {
1615 struct ring_buffer_per_cpu *cpu_buffer;
1616 unsigned long nr_pages;
1617 int cpu, err = 0;
1618
1619 /*
1620 * Always succeed at resizing a non-existent buffer:
1621 */
1622 if (!buffer)
1623 return size;
1624
1625 /* Make sure the requested buffer exists */
1626 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1627 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1628 return size;
1629
1630 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1631
1632 /* we need a minimum of two pages */
1633 if (nr_pages < 2)
1634 nr_pages = 2;
1635
1636 size = nr_pages * BUF_PAGE_SIZE;
1637
1638 /*
1639 * Don't succeed if resizing is disabled, as a reader might be
1640 * manipulating the ring buffer and is expecting a sane state while
1641 * this is true.
1642 */
1643 if (atomic_read(&buffer->resize_disabled))
1644 return -EBUSY;
1645
1646 /* prevent another thread from changing buffer sizes */
1647 mutex_lock(&buffer->mutex);
1648
1649 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1650 /* calculate the pages to update */
1651 for_each_buffer_cpu(buffer, cpu) {
1652 cpu_buffer = buffer->buffers[cpu];
1653
1654 cpu_buffer->nr_pages_to_update = nr_pages -
1655 cpu_buffer->nr_pages;
1656 /*
1657 * nothing more to do for removing pages or no update
1658 */
1659 if (cpu_buffer->nr_pages_to_update <= 0)
1660 continue;
1661 /*
1662 * to add pages, make sure all new pages can be
1663 * allocated without receiving ENOMEM
1664 */
1665 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1666 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1667 &cpu_buffer->new_pages, cpu)) {
1668 /* not enough memory for new pages */
1669 err = -ENOMEM;
1670 goto out_err;
1671 }
1672 }
1673
1674 get_online_cpus();
1675 /*
1676 * Fire off all the required work handlers
1677 * We can't schedule on offline CPUs, but it's not necessary
1678 * since we can change their buffer sizes without any race.
1679 */
1680 for_each_buffer_cpu(buffer, cpu) {
1681 cpu_buffer = buffer->buffers[cpu];
1682 if (!cpu_buffer->nr_pages_to_update)
1683 continue;
1684
1685 /* Can't run something on an offline CPU. */
1686 if (!cpu_online(cpu)) {
1687 rb_update_pages(cpu_buffer);
1688 cpu_buffer->nr_pages_to_update = 0;
1689 } else {
1690 schedule_work_on(cpu,
1691 &cpu_buffer->update_pages_work);
1692 }
1693 }
1694
1695 /* wait for all the updates to complete */
1696 for_each_buffer_cpu(buffer, cpu) {
1697 cpu_buffer = buffer->buffers[cpu];
1698 if (!cpu_buffer->nr_pages_to_update)
1699 continue;
1700
1701 if (cpu_online(cpu))
1702 wait_for_completion(&cpu_buffer->update_done);
1703 cpu_buffer->nr_pages_to_update = 0;
1704 }
1705
1706 put_online_cpus();
1707 } else {
1708 /* Make sure this CPU has been intitialized */
1709 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1710 goto out;
1711
1712 cpu_buffer = buffer->buffers[cpu_id];
1713
1714 if (nr_pages == cpu_buffer->nr_pages)
1715 goto out;
1716
1717 cpu_buffer->nr_pages_to_update = nr_pages -
1718 cpu_buffer->nr_pages;
1719
1720 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1721 if (cpu_buffer->nr_pages_to_update > 0 &&
1722 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1723 &cpu_buffer->new_pages, cpu_id)) {
1724 err = -ENOMEM;
1725 goto out_err;
1726 }
1727
1728 get_online_cpus();
1729
1730 /* Can't run something on an offline CPU. */
1731 if (!cpu_online(cpu_id))
1732 rb_update_pages(cpu_buffer);
1733 else {
1734 schedule_work_on(cpu_id,
1735 &cpu_buffer->update_pages_work);
1736 wait_for_completion(&cpu_buffer->update_done);
1737 }
1738
1739 cpu_buffer->nr_pages_to_update = 0;
1740 put_online_cpus();
1741 }
1742
1743 out:
1744 /*
1745 * The ring buffer resize can happen with the ring buffer
1746 * enabled, so that the update disturbs the tracing as little
1747 * as possible. But if the buffer is disabled, we do not need
1748 * to worry about that, and we can take the time to verify
1749 * that the buffer is not corrupt.
1750 */
1751 if (atomic_read(&buffer->record_disabled)) {
1752 atomic_inc(&buffer->record_disabled);
1753 /*
1754 * Even though the buffer was disabled, we must make sure
1755 * that it is truly disabled before calling rb_check_pages.
1756 * There could have been a race between checking
1757 * record_disable and incrementing it.
1758 */
1759 synchronize_sched();
1760 for_each_buffer_cpu(buffer, cpu) {
1761 cpu_buffer = buffer->buffers[cpu];
1762 rb_check_pages(cpu_buffer);
1763 }
1764 atomic_dec(&buffer->record_disabled);
1765 }
1766
1767 mutex_unlock(&buffer->mutex);
1768 return size;
1769
1770 out_err:
1771 for_each_buffer_cpu(buffer, cpu) {
1772 struct buffer_page *bpage, *tmp;
1773
1774 cpu_buffer = buffer->buffers[cpu];
1775 cpu_buffer->nr_pages_to_update = 0;
1776
1777 if (list_empty(&cpu_buffer->new_pages))
1778 continue;
1779
1780 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1781 list) {
1782 list_del_init(&bpage->list);
1783 free_buffer_page(bpage);
1784 }
1785 }
1786 mutex_unlock(&buffer->mutex);
1787 return err;
1788 }
1789 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1790
1791 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1792 {
1793 mutex_lock(&buffer->mutex);
1794 if (val)
1795 buffer->flags |= RB_FL_OVERWRITE;
1796 else
1797 buffer->flags &= ~RB_FL_OVERWRITE;
1798 mutex_unlock(&buffer->mutex);
1799 }
1800 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1801
1802 static __always_inline void *
1803 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1804 {
1805 return bpage->data + index;
1806 }
1807
1808 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1809 {
1810 return bpage->page->data + index;
1811 }
1812
1813 static __always_inline struct ring_buffer_event *
1814 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1815 {
1816 return __rb_page_index(cpu_buffer->reader_page,
1817 cpu_buffer->reader_page->read);
1818 }
1819
1820 static __always_inline struct ring_buffer_event *
1821 rb_iter_head_event(struct ring_buffer_iter *iter)
1822 {
1823 return __rb_page_index(iter->head_page, iter->head);
1824 }
1825
1826 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
1827 {
1828 return local_read(&bpage->page->commit);
1829 }
1830
1831 /* Size is determined by what has been committed */
1832 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
1833 {
1834 return rb_page_commit(bpage);
1835 }
1836
1837 static __always_inline unsigned
1838 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1839 {
1840 return rb_page_commit(cpu_buffer->commit_page);
1841 }
1842
1843 static __always_inline unsigned
1844 rb_event_index(struct ring_buffer_event *event)
1845 {
1846 unsigned long addr = (unsigned long)event;
1847
1848 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1849 }
1850
1851 static void rb_inc_iter(struct ring_buffer_iter *iter)
1852 {
1853 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1854
1855 /*
1856 * The iterator could be on the reader page (it starts there).
1857 * But the head could have moved, since the reader was
1858 * found. Check for this case and assign the iterator
1859 * to the head page instead of next.
1860 */
1861 if (iter->head_page == cpu_buffer->reader_page)
1862 iter->head_page = rb_set_head_page(cpu_buffer);
1863 else
1864 rb_inc_page(cpu_buffer, &iter->head_page);
1865
1866 iter->read_stamp = iter->head_page->page->time_stamp;
1867 iter->head = 0;
1868 }
1869
1870 /*
1871 * rb_handle_head_page - writer hit the head page
1872 *
1873 * Returns: +1 to retry page
1874 * 0 to continue
1875 * -1 on error
1876 */
1877 static int
1878 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1879 struct buffer_page *tail_page,
1880 struct buffer_page *next_page)
1881 {
1882 struct buffer_page *new_head;
1883 int entries;
1884 int type;
1885 int ret;
1886
1887 entries = rb_page_entries(next_page);
1888
1889 /*
1890 * The hard part is here. We need to move the head
1891 * forward, and protect against both readers on
1892 * other CPUs and writers coming in via interrupts.
1893 */
1894 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1895 RB_PAGE_HEAD);
1896
1897 /*
1898 * type can be one of four:
1899 * NORMAL - an interrupt already moved it for us
1900 * HEAD - we are the first to get here.
1901 * UPDATE - we are the interrupt interrupting
1902 * a current move.
1903 * MOVED - a reader on another CPU moved the next
1904 * pointer to its reader page. Give up
1905 * and try again.
1906 */
1907
1908 switch (type) {
1909 case RB_PAGE_HEAD:
1910 /*
1911 * We changed the head to UPDATE, thus
1912 * it is our responsibility to update
1913 * the counters.
1914 */
1915 local_add(entries, &cpu_buffer->overrun);
1916 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1917
1918 /*
1919 * The entries will be zeroed out when we move the
1920 * tail page.
1921 */
1922
1923 /* still more to do */
1924 break;
1925
1926 case RB_PAGE_UPDATE:
1927 /*
1928 * This is an interrupt that interrupt the
1929 * previous update. Still more to do.
1930 */
1931 break;
1932 case RB_PAGE_NORMAL:
1933 /*
1934 * An interrupt came in before the update
1935 * and processed this for us.
1936 * Nothing left to do.
1937 */
1938 return 1;
1939 case RB_PAGE_MOVED:
1940 /*
1941 * The reader is on another CPU and just did
1942 * a swap with our next_page.
1943 * Try again.
1944 */
1945 return 1;
1946 default:
1947 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1948 return -1;
1949 }
1950
1951 /*
1952 * Now that we are here, the old head pointer is
1953 * set to UPDATE. This will keep the reader from
1954 * swapping the head page with the reader page.
1955 * The reader (on another CPU) will spin till
1956 * we are finished.
1957 *
1958 * We just need to protect against interrupts
1959 * doing the job. We will set the next pointer
1960 * to HEAD. After that, we set the old pointer
1961 * to NORMAL, but only if it was HEAD before.
1962 * otherwise we are an interrupt, and only
1963 * want the outer most commit to reset it.
1964 */
1965 new_head = next_page;
1966 rb_inc_page(cpu_buffer, &new_head);
1967
1968 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1969 RB_PAGE_NORMAL);
1970
1971 /*
1972 * Valid returns are:
1973 * HEAD - an interrupt came in and already set it.
1974 * NORMAL - One of two things:
1975 * 1) We really set it.
1976 * 2) A bunch of interrupts came in and moved
1977 * the page forward again.
1978 */
1979 switch (ret) {
1980 case RB_PAGE_HEAD:
1981 case RB_PAGE_NORMAL:
1982 /* OK */
1983 break;
1984 default:
1985 RB_WARN_ON(cpu_buffer, 1);
1986 return -1;
1987 }
1988
1989 /*
1990 * It is possible that an interrupt came in,
1991 * set the head up, then more interrupts came in
1992 * and moved it again. When we get back here,
1993 * the page would have been set to NORMAL but we
1994 * just set it back to HEAD.
1995 *
1996 * How do you detect this? Well, if that happened
1997 * the tail page would have moved.
1998 */
1999 if (ret == RB_PAGE_NORMAL) {
2000 struct buffer_page *buffer_tail_page;
2001
2002 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2003 /*
2004 * If the tail had moved passed next, then we need
2005 * to reset the pointer.
2006 */
2007 if (buffer_tail_page != tail_page &&
2008 buffer_tail_page != next_page)
2009 rb_head_page_set_normal(cpu_buffer, new_head,
2010 next_page,
2011 RB_PAGE_HEAD);
2012 }
2013
2014 /*
2015 * If this was the outer most commit (the one that
2016 * changed the original pointer from HEAD to UPDATE),
2017 * then it is up to us to reset it to NORMAL.
2018 */
2019 if (type == RB_PAGE_HEAD) {
2020 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2021 tail_page,
2022 RB_PAGE_UPDATE);
2023 if (RB_WARN_ON(cpu_buffer,
2024 ret != RB_PAGE_UPDATE))
2025 return -1;
2026 }
2027
2028 return 0;
2029 }
2030
2031 static inline void
2032 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2033 unsigned long tail, struct rb_event_info *info)
2034 {
2035 struct buffer_page *tail_page = info->tail_page;
2036 struct ring_buffer_event *event;
2037 unsigned long length = info->length;
2038
2039 /*
2040 * Only the event that crossed the page boundary
2041 * must fill the old tail_page with padding.
2042 */
2043 if (tail >= BUF_PAGE_SIZE) {
2044 /*
2045 * If the page was filled, then we still need
2046 * to update the real_end. Reset it to zero
2047 * and the reader will ignore it.
2048 */
2049 if (tail == BUF_PAGE_SIZE)
2050 tail_page->real_end = 0;
2051
2052 local_sub(length, &tail_page->write);
2053 return;
2054 }
2055
2056 event = __rb_page_index(tail_page, tail);
2057
2058 /* account for padding bytes */
2059 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2060
2061 /*
2062 * Save the original length to the meta data.
2063 * This will be used by the reader to add lost event
2064 * counter.
2065 */
2066 tail_page->real_end = tail;
2067
2068 /*
2069 * If this event is bigger than the minimum size, then
2070 * we need to be careful that we don't subtract the
2071 * write counter enough to allow another writer to slip
2072 * in on this page.
2073 * We put in a discarded commit instead, to make sure
2074 * that this space is not used again.
2075 *
2076 * If we are less than the minimum size, we don't need to
2077 * worry about it.
2078 */
2079 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2080 /* No room for any events */
2081
2082 /* Mark the rest of the page with padding */
2083 rb_event_set_padding(event);
2084
2085 /* Set the write back to the previous setting */
2086 local_sub(length, &tail_page->write);
2087 return;
2088 }
2089
2090 /* Put in a discarded event */
2091 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2092 event->type_len = RINGBUF_TYPE_PADDING;
2093 /* time delta must be non zero */
2094 event->time_delta = 1;
2095
2096 /* Set write to end of buffer */
2097 length = (tail + length) - BUF_PAGE_SIZE;
2098 local_sub(length, &tail_page->write);
2099 }
2100
2101 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2102
2103 /*
2104 * This is the slow path, force gcc not to inline it.
2105 */
2106 static noinline struct ring_buffer_event *
2107 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2108 unsigned long tail, struct rb_event_info *info)
2109 {
2110 struct buffer_page *tail_page = info->tail_page;
2111 struct buffer_page *commit_page = cpu_buffer->commit_page;
2112 struct ring_buffer *buffer = cpu_buffer->buffer;
2113 struct buffer_page *next_page;
2114 int ret;
2115
2116 next_page = tail_page;
2117
2118 rb_inc_page(cpu_buffer, &next_page);
2119
2120 /*
2121 * If for some reason, we had an interrupt storm that made
2122 * it all the way around the buffer, bail, and warn
2123 * about it.
2124 */
2125 if (unlikely(next_page == commit_page)) {
2126 local_inc(&cpu_buffer->commit_overrun);
2127 goto out_reset;
2128 }
2129
2130 /*
2131 * This is where the fun begins!
2132 *
2133 * We are fighting against races between a reader that
2134 * could be on another CPU trying to swap its reader
2135 * page with the buffer head.
2136 *
2137 * We are also fighting against interrupts coming in and
2138 * moving the head or tail on us as well.
2139 *
2140 * If the next page is the head page then we have filled
2141 * the buffer, unless the commit page is still on the
2142 * reader page.
2143 */
2144 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2145
2146 /*
2147 * If the commit is not on the reader page, then
2148 * move the header page.
2149 */
2150 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2151 /*
2152 * If we are not in overwrite mode,
2153 * this is easy, just stop here.
2154 */
2155 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2156 local_inc(&cpu_buffer->dropped_events);
2157 goto out_reset;
2158 }
2159
2160 ret = rb_handle_head_page(cpu_buffer,
2161 tail_page,
2162 next_page);
2163 if (ret < 0)
2164 goto out_reset;
2165 if (ret)
2166 goto out_again;
2167 } else {
2168 /*
2169 * We need to be careful here too. The
2170 * commit page could still be on the reader
2171 * page. We could have a small buffer, and
2172 * have filled up the buffer with events
2173 * from interrupts and such, and wrapped.
2174 *
2175 * Note, if the tail page is also the on the
2176 * reader_page, we let it move out.
2177 */
2178 if (unlikely((cpu_buffer->commit_page !=
2179 cpu_buffer->tail_page) &&
2180 (cpu_buffer->commit_page ==
2181 cpu_buffer->reader_page))) {
2182 local_inc(&cpu_buffer->commit_overrun);
2183 goto out_reset;
2184 }
2185 }
2186 }
2187
2188 rb_tail_page_update(cpu_buffer, tail_page, next_page);
2189
2190 out_again:
2191
2192 rb_reset_tail(cpu_buffer, tail, info);
2193
2194 /* Commit what we have for now. */
2195 rb_end_commit(cpu_buffer);
2196 /* rb_end_commit() decs committing */
2197 local_inc(&cpu_buffer->committing);
2198
2199 /* fail and let the caller try again */
2200 return ERR_PTR(-EAGAIN);
2201
2202 out_reset:
2203 /* reset write */
2204 rb_reset_tail(cpu_buffer, tail, info);
2205
2206 return NULL;
2207 }
2208
2209 /* Slow path, do not inline */
2210 static noinline struct ring_buffer_event *
2211 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
2212 {
2213 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2214
2215 /* Not the first event on the page? */
2216 if (rb_event_index(event)) {
2217 event->time_delta = delta & TS_MASK;
2218 event->array[0] = delta >> TS_SHIFT;
2219 } else {
2220 /* nope, just zero it */
2221 event->time_delta = 0;
2222 event->array[0] = 0;
2223 }
2224
2225 return skip_time_extend(event);
2226 }
2227
2228 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2229 struct ring_buffer_event *event);
2230
2231 /**
2232 * rb_update_event - update event type and data
2233 * @event: the event to update
2234 * @type: the type of event
2235 * @length: the size of the event field in the ring buffer
2236 *
2237 * Update the type and data fields of the event. The length
2238 * is the actual size that is written to the ring buffer,
2239 * and with this, we can determine what to place into the
2240 * data field.
2241 */
2242 static void
2243 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2244 struct ring_buffer_event *event,
2245 struct rb_event_info *info)
2246 {
2247 unsigned length = info->length;
2248 u64 delta = info->delta;
2249
2250 /* Only a commit updates the timestamp */
2251 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2252 delta = 0;
2253
2254 /*
2255 * If we need to add a timestamp, then we
2256 * add it to the start of the resevered space.
2257 */
2258 if (unlikely(info->add_timestamp)) {
2259 event = rb_add_time_stamp(event, delta);
2260 length -= RB_LEN_TIME_EXTEND;
2261 delta = 0;
2262 }
2263
2264 event->time_delta = delta;
2265 length -= RB_EVNT_HDR_SIZE;
2266 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2267 event->type_len = 0;
2268 event->array[0] = length;
2269 } else
2270 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2271 }
2272
2273 static unsigned rb_calculate_event_length(unsigned length)
2274 {
2275 struct ring_buffer_event event; /* Used only for sizeof array */
2276
2277 /* zero length can cause confusions */
2278 if (!length)
2279 length++;
2280
2281 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2282 length += sizeof(event.array[0]);
2283
2284 length += RB_EVNT_HDR_SIZE;
2285 length = ALIGN(length, RB_ARCH_ALIGNMENT);
2286
2287 /*
2288 * In case the time delta is larger than the 27 bits for it
2289 * in the header, we need to add a timestamp. If another
2290 * event comes in when trying to discard this one to increase
2291 * the length, then the timestamp will be added in the allocated
2292 * space of this event. If length is bigger than the size needed
2293 * for the TIME_EXTEND, then padding has to be used. The events
2294 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2295 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2296 * As length is a multiple of 4, we only need to worry if it
2297 * is 12 (RB_LEN_TIME_EXTEND + 4).
2298 */
2299 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2300 length += RB_ALIGNMENT;
2301
2302 return length;
2303 }
2304
2305 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2306 static inline bool sched_clock_stable(void)
2307 {
2308 return true;
2309 }
2310 #endif
2311
2312 static inline int
2313 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2314 struct ring_buffer_event *event)
2315 {
2316 unsigned long new_index, old_index;
2317 struct buffer_page *bpage;
2318 unsigned long index;
2319 unsigned long addr;
2320
2321 new_index = rb_event_index(event);
2322 old_index = new_index + rb_event_ts_length(event);
2323 addr = (unsigned long)event;
2324 addr &= PAGE_MASK;
2325
2326 bpage = READ_ONCE(cpu_buffer->tail_page);
2327
2328 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2329 unsigned long write_mask =
2330 local_read(&bpage->write) & ~RB_WRITE_MASK;
2331 unsigned long event_length = rb_event_length(event);
2332 /*
2333 * This is on the tail page. It is possible that
2334 * a write could come in and move the tail page
2335 * and write to the next page. That is fine
2336 * because we just shorten what is on this page.
2337 */
2338 old_index += write_mask;
2339 new_index += write_mask;
2340 index = local_cmpxchg(&bpage->write, old_index, new_index);
2341 if (index == old_index) {
2342 /* update counters */
2343 local_sub(event_length, &cpu_buffer->entries_bytes);
2344 return 1;
2345 }
2346 }
2347
2348 /* could not discard */
2349 return 0;
2350 }
2351
2352 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2353 {
2354 local_inc(&cpu_buffer->committing);
2355 local_inc(&cpu_buffer->commits);
2356 }
2357
2358 static __always_inline void
2359 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2360 {
2361 unsigned long max_count;
2362
2363 /*
2364 * We only race with interrupts and NMIs on this CPU.
2365 * If we own the commit event, then we can commit
2366 * all others that interrupted us, since the interruptions
2367 * are in stack format (they finish before they come
2368 * back to us). This allows us to do a simple loop to
2369 * assign the commit to the tail.
2370 */
2371 again:
2372 max_count = cpu_buffer->nr_pages * 100;
2373
2374 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2375 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2376 return;
2377 if (RB_WARN_ON(cpu_buffer,
2378 rb_is_reader_page(cpu_buffer->tail_page)))
2379 return;
2380 local_set(&cpu_buffer->commit_page->page->commit,
2381 rb_page_write(cpu_buffer->commit_page));
2382 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2383 /* Only update the write stamp if the page has an event */
2384 if (rb_page_write(cpu_buffer->commit_page))
2385 cpu_buffer->write_stamp =
2386 cpu_buffer->commit_page->page->time_stamp;
2387 /* add barrier to keep gcc from optimizing too much */
2388 barrier();
2389 }
2390 while (rb_commit_index(cpu_buffer) !=
2391 rb_page_write(cpu_buffer->commit_page)) {
2392
2393 local_set(&cpu_buffer->commit_page->page->commit,
2394 rb_page_write(cpu_buffer->commit_page));
2395 RB_WARN_ON(cpu_buffer,
2396 local_read(&cpu_buffer->commit_page->page->commit) &
2397 ~RB_WRITE_MASK);
2398 barrier();
2399 }
2400
2401 /* again, keep gcc from optimizing */
2402 barrier();
2403
2404 /*
2405 * If an interrupt came in just after the first while loop
2406 * and pushed the tail page forward, we will be left with
2407 * a dangling commit that will never go forward.
2408 */
2409 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2410 goto again;
2411 }
2412
2413 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2414 {
2415 unsigned long commits;
2416
2417 if (RB_WARN_ON(cpu_buffer,
2418 !local_read(&cpu_buffer->committing)))
2419 return;
2420
2421 again:
2422 commits = local_read(&cpu_buffer->commits);
2423 /* synchronize with interrupts */
2424 barrier();
2425 if (local_read(&cpu_buffer->committing) == 1)
2426 rb_set_commit_to_write(cpu_buffer);
2427
2428 local_dec(&cpu_buffer->committing);
2429
2430 /* synchronize with interrupts */
2431 barrier();
2432
2433 /*
2434 * Need to account for interrupts coming in between the
2435 * updating of the commit page and the clearing of the
2436 * committing counter.
2437 */
2438 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2439 !local_read(&cpu_buffer->committing)) {
2440 local_inc(&cpu_buffer->committing);
2441 goto again;
2442 }
2443 }
2444
2445 static inline void rb_event_discard(struct ring_buffer_event *event)
2446 {
2447 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2448 event = skip_time_extend(event);
2449
2450 /* array[0] holds the actual length for the discarded event */
2451 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2452 event->type_len = RINGBUF_TYPE_PADDING;
2453 /* time delta must be non zero */
2454 if (!event->time_delta)
2455 event->time_delta = 1;
2456 }
2457
2458 static __always_inline bool
2459 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2460 struct ring_buffer_event *event)
2461 {
2462 unsigned long addr = (unsigned long)event;
2463 unsigned long index;
2464
2465 index = rb_event_index(event);
2466 addr &= PAGE_MASK;
2467
2468 return cpu_buffer->commit_page->page == (void *)addr &&
2469 rb_commit_index(cpu_buffer) == index;
2470 }
2471
2472 static __always_inline void
2473 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2474 struct ring_buffer_event *event)
2475 {
2476 u64 delta;
2477
2478 /*
2479 * The event first in the commit queue updates the
2480 * time stamp.
2481 */
2482 if (rb_event_is_commit(cpu_buffer, event)) {
2483 /*
2484 * A commit event that is first on a page
2485 * updates the write timestamp with the page stamp
2486 */
2487 if (!rb_event_index(event))
2488 cpu_buffer->write_stamp =
2489 cpu_buffer->commit_page->page->time_stamp;
2490 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2491 delta = event->array[0];
2492 delta <<= TS_SHIFT;
2493 delta += event->time_delta;
2494 cpu_buffer->write_stamp += delta;
2495 } else
2496 cpu_buffer->write_stamp += event->time_delta;
2497 }
2498 }
2499
2500 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2501 struct ring_buffer_event *event)
2502 {
2503 local_inc(&cpu_buffer->entries);
2504 rb_update_write_stamp(cpu_buffer, event);
2505 rb_end_commit(cpu_buffer);
2506 }
2507
2508 static __always_inline void
2509 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2510 {
2511 bool pagebusy;
2512
2513 if (buffer->irq_work.waiters_pending) {
2514 buffer->irq_work.waiters_pending = false;
2515 /* irq_work_queue() supplies it's own memory barriers */
2516 irq_work_queue(&buffer->irq_work.work);
2517 }
2518
2519 if (cpu_buffer->irq_work.waiters_pending) {
2520 cpu_buffer->irq_work.waiters_pending = false;
2521 /* irq_work_queue() supplies it's own memory barriers */
2522 irq_work_queue(&cpu_buffer->irq_work.work);
2523 }
2524
2525 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
2526
2527 if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2528 cpu_buffer->irq_work.wakeup_full = true;
2529 cpu_buffer->irq_work.full_waiters_pending = false;
2530 /* irq_work_queue() supplies it's own memory barriers */
2531 irq_work_queue(&cpu_buffer->irq_work.work);
2532 }
2533 }
2534
2535 /*
2536 * The lock and unlock are done within a preempt disable section.
2537 * The current_context per_cpu variable can only be modified
2538 * by the current task between lock and unlock. But it can
2539 * be modified more than once via an interrupt. There are four
2540 * different contexts that we need to consider.
2541 *
2542 * Normal context.
2543 * SoftIRQ context
2544 * IRQ context
2545 * NMI context
2546 *
2547 * If for some reason the ring buffer starts to recurse, we
2548 * only allow that to happen at most 4 times (one for each
2549 * context). If it happens 5 times, then we consider this a
2550 * recusive loop and do not let it go further.
2551 */
2552
2553 static __always_inline int
2554 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2555 {
2556 if (cpu_buffer->current_context >= 4)
2557 return 1;
2558
2559 cpu_buffer->current_context++;
2560 /* Interrupts must see this update */
2561 barrier();
2562
2563 return 0;
2564 }
2565
2566 static __always_inline void
2567 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2568 {
2569 /* Don't let the dec leak out */
2570 barrier();
2571 cpu_buffer->current_context--;
2572 }
2573
2574 /**
2575 * ring_buffer_unlock_commit - commit a reserved
2576 * @buffer: The buffer to commit to
2577 * @event: The event pointer to commit.
2578 *
2579 * This commits the data to the ring buffer, and releases any locks held.
2580 *
2581 * Must be paired with ring_buffer_lock_reserve.
2582 */
2583 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2584 struct ring_buffer_event *event)
2585 {
2586 struct ring_buffer_per_cpu *cpu_buffer;
2587 int cpu = raw_smp_processor_id();
2588
2589 cpu_buffer = buffer->buffers[cpu];
2590
2591 rb_commit(cpu_buffer, event);
2592
2593 rb_wakeups(buffer, cpu_buffer);
2594
2595 trace_recursive_unlock(cpu_buffer);
2596
2597 preempt_enable_notrace();
2598
2599 return 0;
2600 }
2601 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2602
2603 static noinline void
2604 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2605 struct rb_event_info *info)
2606 {
2607 WARN_ONCE(info->delta > (1ULL << 59),
2608 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2609 (unsigned long long)info->delta,
2610 (unsigned long long)info->ts,
2611 (unsigned long long)cpu_buffer->write_stamp,
2612 sched_clock_stable() ? "" :
2613 "If you just came from a suspend/resume,\n"
2614 "please switch to the trace global clock:\n"
2615 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2616 info->add_timestamp = 1;
2617 }
2618
2619 static struct ring_buffer_event *
2620 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2621 struct rb_event_info *info)
2622 {
2623 struct ring_buffer_event *event;
2624 struct buffer_page *tail_page;
2625 unsigned long tail, write;
2626
2627 /*
2628 * If the time delta since the last event is too big to
2629 * hold in the time field of the event, then we append a
2630 * TIME EXTEND event ahead of the data event.
2631 */
2632 if (unlikely(info->add_timestamp))
2633 info->length += RB_LEN_TIME_EXTEND;
2634
2635 /* Don't let the compiler play games with cpu_buffer->tail_page */
2636 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2637 write = local_add_return(info->length, &tail_page->write);
2638
2639 /* set write to only the index of the write */
2640 write &= RB_WRITE_MASK;
2641 tail = write - info->length;
2642
2643 /*
2644 * If this is the first commit on the page, then it has the same
2645 * timestamp as the page itself.
2646 */
2647 if (!tail)
2648 info->delta = 0;
2649
2650 /* See if we shot pass the end of this buffer page */
2651 if (unlikely(write > BUF_PAGE_SIZE))
2652 return rb_move_tail(cpu_buffer, tail, info);
2653
2654 /* We reserved something on the buffer */
2655
2656 event = __rb_page_index(tail_page, tail);
2657 rb_update_event(cpu_buffer, event, info);
2658
2659 local_inc(&tail_page->entries);
2660
2661 /*
2662 * If this is the first commit on the page, then update
2663 * its timestamp.
2664 */
2665 if (!tail)
2666 tail_page->page->time_stamp = info->ts;
2667
2668 /* account for these added bytes */
2669 local_add(info->length, &cpu_buffer->entries_bytes);
2670
2671 return event;
2672 }
2673
2674 static __always_inline struct ring_buffer_event *
2675 rb_reserve_next_event(struct ring_buffer *buffer,
2676 struct ring_buffer_per_cpu *cpu_buffer,
2677 unsigned long length)
2678 {
2679 struct ring_buffer_event *event;
2680 struct rb_event_info info;
2681 int nr_loops = 0;
2682 u64 diff;
2683
2684 rb_start_commit(cpu_buffer);
2685
2686 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2687 /*
2688 * Due to the ability to swap a cpu buffer from a buffer
2689 * it is possible it was swapped before we committed.
2690 * (committing stops a swap). We check for it here and
2691 * if it happened, we have to fail the write.
2692 */
2693 barrier();
2694 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
2695 local_dec(&cpu_buffer->committing);
2696 local_dec(&cpu_buffer->commits);
2697 return NULL;
2698 }
2699 #endif
2700
2701 info.length = rb_calculate_event_length(length);
2702 again:
2703 info.add_timestamp = 0;
2704 info.delta = 0;
2705
2706 /*
2707 * We allow for interrupts to reenter here and do a trace.
2708 * If one does, it will cause this original code to loop
2709 * back here. Even with heavy interrupts happening, this
2710 * should only happen a few times in a row. If this happens
2711 * 1000 times in a row, there must be either an interrupt
2712 * storm or we have something buggy.
2713 * Bail!
2714 */
2715 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2716 goto out_fail;
2717
2718 info.ts = rb_time_stamp(cpu_buffer->buffer);
2719 diff = info.ts - cpu_buffer->write_stamp;
2720
2721 /* make sure this diff is calculated here */
2722 barrier();
2723
2724 /* Did the write stamp get updated already? */
2725 if (likely(info.ts >= cpu_buffer->write_stamp)) {
2726 info.delta = diff;
2727 if (unlikely(test_time_stamp(info.delta)))
2728 rb_handle_timestamp(cpu_buffer, &info);
2729 }
2730
2731 event = __rb_reserve_next(cpu_buffer, &info);
2732
2733 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2734 if (info.add_timestamp)
2735 info.length -= RB_LEN_TIME_EXTEND;
2736 goto again;
2737 }
2738
2739 if (!event)
2740 goto out_fail;
2741
2742 return event;
2743
2744 out_fail:
2745 rb_end_commit(cpu_buffer);
2746 return NULL;
2747 }
2748
2749 /**
2750 * ring_buffer_lock_reserve - reserve a part of the buffer
2751 * @buffer: the ring buffer to reserve from
2752 * @length: the length of the data to reserve (excluding event header)
2753 *
2754 * Returns a reseverd event on the ring buffer to copy directly to.
2755 * The user of this interface will need to get the body to write into
2756 * and can use the ring_buffer_event_data() interface.
2757 *
2758 * The length is the length of the data needed, not the event length
2759 * which also includes the event header.
2760 *
2761 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2762 * If NULL is returned, then nothing has been allocated or locked.
2763 */
2764 struct ring_buffer_event *
2765 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2766 {
2767 struct ring_buffer_per_cpu *cpu_buffer;
2768 struct ring_buffer_event *event;
2769 int cpu;
2770
2771 /* If we are tracing schedule, we don't want to recurse */
2772 preempt_disable_notrace();
2773
2774 if (unlikely(atomic_read(&buffer->record_disabled)))
2775 goto out;
2776
2777 cpu = raw_smp_processor_id();
2778
2779 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2780 goto out;
2781
2782 cpu_buffer = buffer->buffers[cpu];
2783
2784 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2785 goto out;
2786
2787 if (unlikely(length > BUF_MAX_DATA_SIZE))
2788 goto out;
2789
2790 if (unlikely(trace_recursive_lock(cpu_buffer)))
2791 goto out;
2792
2793 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2794 if (!event)
2795 goto out_unlock;
2796
2797 return event;
2798
2799 out_unlock:
2800 trace_recursive_unlock(cpu_buffer);
2801 out:
2802 preempt_enable_notrace();
2803 return NULL;
2804 }
2805 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2806
2807 /*
2808 * Decrement the entries to the page that an event is on.
2809 * The event does not even need to exist, only the pointer
2810 * to the page it is on. This may only be called before the commit
2811 * takes place.
2812 */
2813 static inline void
2814 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2815 struct ring_buffer_event *event)
2816 {
2817 unsigned long addr = (unsigned long)event;
2818 struct buffer_page *bpage = cpu_buffer->commit_page;
2819 struct buffer_page *start;
2820
2821 addr &= PAGE_MASK;
2822
2823 /* Do the likely case first */
2824 if (likely(bpage->page == (void *)addr)) {
2825 local_dec(&bpage->entries);
2826 return;
2827 }
2828
2829 /*
2830 * Because the commit page may be on the reader page we
2831 * start with the next page and check the end loop there.
2832 */
2833 rb_inc_page(cpu_buffer, &bpage);
2834 start = bpage;
2835 do {
2836 if (bpage->page == (void *)addr) {
2837 local_dec(&bpage->entries);
2838 return;
2839 }
2840 rb_inc_page(cpu_buffer, &bpage);
2841 } while (bpage != start);
2842
2843 /* commit not part of this buffer?? */
2844 RB_WARN_ON(cpu_buffer, 1);
2845 }
2846
2847 /**
2848 * ring_buffer_commit_discard - discard an event that has not been committed
2849 * @buffer: the ring buffer
2850 * @event: non committed event to discard
2851 *
2852 * Sometimes an event that is in the ring buffer needs to be ignored.
2853 * This function lets the user discard an event in the ring buffer
2854 * and then that event will not be read later.
2855 *
2856 * This function only works if it is called before the the item has been
2857 * committed. It will try to free the event from the ring buffer
2858 * if another event has not been added behind it.
2859 *
2860 * If another event has been added behind it, it will set the event
2861 * up as discarded, and perform the commit.
2862 *
2863 * If this function is called, do not call ring_buffer_unlock_commit on
2864 * the event.
2865 */
2866 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2867 struct ring_buffer_event *event)
2868 {
2869 struct ring_buffer_per_cpu *cpu_buffer;
2870 int cpu;
2871
2872 /* The event is discarded regardless */
2873 rb_event_discard(event);
2874
2875 cpu = smp_processor_id();
2876 cpu_buffer = buffer->buffers[cpu];
2877
2878 /*
2879 * This must only be called if the event has not been
2880 * committed yet. Thus we can assume that preemption
2881 * is still disabled.
2882 */
2883 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2884
2885 rb_decrement_entry(cpu_buffer, event);
2886 if (rb_try_to_discard(cpu_buffer, event))
2887 goto out;
2888
2889 /*
2890 * The commit is still visible by the reader, so we
2891 * must still update the timestamp.
2892 */
2893 rb_update_write_stamp(cpu_buffer, event);
2894 out:
2895 rb_end_commit(cpu_buffer);
2896
2897 trace_recursive_unlock(cpu_buffer);
2898
2899 preempt_enable_notrace();
2900
2901 }
2902 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2903
2904 /**
2905 * ring_buffer_write - write data to the buffer without reserving
2906 * @buffer: The ring buffer to write to.
2907 * @length: The length of the data being written (excluding the event header)
2908 * @data: The data to write to the buffer.
2909 *
2910 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2911 * one function. If you already have the data to write to the buffer, it
2912 * may be easier to simply call this function.
2913 *
2914 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2915 * and not the length of the event which would hold the header.
2916 */
2917 int ring_buffer_write(struct ring_buffer *buffer,
2918 unsigned long length,
2919 void *data)
2920 {
2921 struct ring_buffer_per_cpu *cpu_buffer;
2922 struct ring_buffer_event *event;
2923 void *body;
2924 int ret = -EBUSY;
2925 int cpu;
2926
2927 preempt_disable_notrace();
2928
2929 if (atomic_read(&buffer->record_disabled))
2930 goto out;
2931
2932 cpu = raw_smp_processor_id();
2933
2934 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2935 goto out;
2936
2937 cpu_buffer = buffer->buffers[cpu];
2938
2939 if (atomic_read(&cpu_buffer->record_disabled))
2940 goto out;
2941
2942 if (length > BUF_MAX_DATA_SIZE)
2943 goto out;
2944
2945 if (unlikely(trace_recursive_lock(cpu_buffer)))
2946 goto out;
2947
2948 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2949 if (!event)
2950 goto out_unlock;
2951
2952 body = rb_event_data(event);
2953
2954 memcpy(body, data, length);
2955
2956 rb_commit(cpu_buffer, event);
2957
2958 rb_wakeups(buffer, cpu_buffer);
2959
2960 ret = 0;
2961
2962 out_unlock:
2963 trace_recursive_unlock(cpu_buffer);
2964
2965 out:
2966 preempt_enable_notrace();
2967
2968 return ret;
2969 }
2970 EXPORT_SYMBOL_GPL(ring_buffer_write);
2971
2972 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2973 {
2974 struct buffer_page *reader = cpu_buffer->reader_page;
2975 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2976 struct buffer_page *commit = cpu_buffer->commit_page;
2977
2978 /* In case of error, head will be NULL */
2979 if (unlikely(!head))
2980 return true;
2981
2982 return reader->read == rb_page_commit(reader) &&
2983 (commit == reader ||
2984 (commit == head &&
2985 head->read == rb_page_commit(commit)));
2986 }
2987
2988 /**
2989 * ring_buffer_record_disable - stop all writes into the buffer
2990 * @buffer: The ring buffer to stop writes to.
2991 *
2992 * This prevents all writes to the buffer. Any attempt to write
2993 * to the buffer after this will fail and return NULL.
2994 *
2995 * The caller should call synchronize_sched() after this.
2996 */
2997 void ring_buffer_record_disable(struct ring_buffer *buffer)
2998 {
2999 atomic_inc(&buffer->record_disabled);
3000 }
3001 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3002
3003 /**
3004 * ring_buffer_record_enable - enable writes to the buffer
3005 * @buffer: The ring buffer to enable writes
3006 *
3007 * Note, multiple disables will need the same number of enables
3008 * to truly enable the writing (much like preempt_disable).
3009 */
3010 void ring_buffer_record_enable(struct ring_buffer *buffer)
3011 {
3012 atomic_dec(&buffer->record_disabled);
3013 }
3014 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3015
3016 /**
3017 * ring_buffer_record_off - stop all writes into the buffer
3018 * @buffer: The ring buffer to stop writes to.
3019 *
3020 * This prevents all writes to the buffer. Any attempt to write
3021 * to the buffer after this will fail and return NULL.
3022 *
3023 * This is different than ring_buffer_record_disable() as
3024 * it works like an on/off switch, where as the disable() version
3025 * must be paired with a enable().
3026 */
3027 void ring_buffer_record_off(struct ring_buffer *buffer)
3028 {
3029 unsigned int rd;
3030 unsigned int new_rd;
3031
3032 do {
3033 rd = atomic_read(&buffer->record_disabled);
3034 new_rd = rd | RB_BUFFER_OFF;
3035 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3036 }
3037 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3038
3039 /**
3040 * ring_buffer_record_on - restart writes into the buffer
3041 * @buffer: The ring buffer to start writes to.
3042 *
3043 * This enables all writes to the buffer that was disabled by
3044 * ring_buffer_record_off().
3045 *
3046 * This is different than ring_buffer_record_enable() as
3047 * it works like an on/off switch, where as the enable() version
3048 * must be paired with a disable().
3049 */
3050 void ring_buffer_record_on(struct ring_buffer *buffer)
3051 {
3052 unsigned int rd;
3053 unsigned int new_rd;
3054
3055 do {
3056 rd = atomic_read(&buffer->record_disabled);
3057 new_rd = rd & ~RB_BUFFER_OFF;
3058 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3059 }
3060 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3061
3062 /**
3063 * ring_buffer_record_is_on - return true if the ring buffer can write
3064 * @buffer: The ring buffer to see if write is enabled
3065 *
3066 * Returns true if the ring buffer is in a state that it accepts writes.
3067 */
3068 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3069 {
3070 return !atomic_read(&buffer->record_disabled);
3071 }
3072
3073 /**
3074 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3075 * @buffer: The ring buffer to stop writes to.
3076 * @cpu: The CPU buffer to stop
3077 *
3078 * This prevents all writes to the buffer. Any attempt to write
3079 * to the buffer after this will fail and return NULL.
3080 *
3081 * The caller should call synchronize_sched() after this.
3082 */
3083 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3084 {
3085 struct ring_buffer_per_cpu *cpu_buffer;
3086
3087 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3088 return;
3089
3090 cpu_buffer = buffer->buffers[cpu];
3091 atomic_inc(&cpu_buffer->record_disabled);
3092 }
3093 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3094
3095 /**
3096 * ring_buffer_record_enable_cpu - enable writes to the buffer
3097 * @buffer: The ring buffer to enable writes
3098 * @cpu: The CPU to enable.
3099 *
3100 * Note, multiple disables will need the same number of enables
3101 * to truly enable the writing (much like preempt_disable).
3102 */
3103 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3104 {
3105 struct ring_buffer_per_cpu *cpu_buffer;
3106
3107 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3108 return;
3109
3110 cpu_buffer = buffer->buffers[cpu];
3111 atomic_dec(&cpu_buffer->record_disabled);
3112 }
3113 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3114
3115 /*
3116 * The total entries in the ring buffer is the running counter
3117 * of entries entered into the ring buffer, minus the sum of
3118 * the entries read from the ring buffer and the number of
3119 * entries that were overwritten.
3120 */
3121 static inline unsigned long
3122 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3123 {
3124 return local_read(&cpu_buffer->entries) -
3125 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3126 }
3127
3128 /**
3129 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3130 * @buffer: The ring buffer
3131 * @cpu: The per CPU buffer to read from.
3132 */
3133 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3134 {
3135 unsigned long flags;
3136 struct ring_buffer_per_cpu *cpu_buffer;
3137 struct buffer_page *bpage;
3138 u64 ret = 0;
3139
3140 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3141 return 0;
3142
3143 cpu_buffer = buffer->buffers[cpu];
3144 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3145 /*
3146 * if the tail is on reader_page, oldest time stamp is on the reader
3147 * page
3148 */
3149 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3150 bpage = cpu_buffer->reader_page;
3151 else
3152 bpage = rb_set_head_page(cpu_buffer);
3153 if (bpage)
3154 ret = bpage->page->time_stamp;
3155 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3156
3157 return ret;
3158 }
3159 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3160
3161 /**
3162 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3163 * @buffer: The ring buffer
3164 * @cpu: The per CPU buffer to read from.
3165 */
3166 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3167 {
3168 struct ring_buffer_per_cpu *cpu_buffer;
3169 unsigned long ret;
3170
3171 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3172 return 0;
3173
3174 cpu_buffer = buffer->buffers[cpu];
3175 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3176
3177 return ret;
3178 }
3179 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3180
3181 /**
3182 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3183 * @buffer: The ring buffer
3184 * @cpu: The per CPU buffer to get the entries from.
3185 */
3186 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3187 {
3188 struct ring_buffer_per_cpu *cpu_buffer;
3189
3190 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3191 return 0;
3192
3193 cpu_buffer = buffer->buffers[cpu];
3194
3195 return rb_num_of_entries(cpu_buffer);
3196 }
3197 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3198
3199 /**
3200 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3201 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3202 * @buffer: The ring buffer
3203 * @cpu: The per CPU buffer to get the number of overruns from
3204 */
3205 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3206 {
3207 struct ring_buffer_per_cpu *cpu_buffer;
3208 unsigned long ret;
3209
3210 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3211 return 0;
3212
3213 cpu_buffer = buffer->buffers[cpu];
3214 ret = local_read(&cpu_buffer->overrun);
3215
3216 return ret;
3217 }
3218 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3219
3220 /**
3221 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3222 * commits failing due to the buffer wrapping around while there are uncommitted
3223 * events, such as during an interrupt storm.
3224 * @buffer: The ring buffer
3225 * @cpu: The per CPU buffer to get the number of overruns from
3226 */
3227 unsigned long
3228 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3229 {
3230 struct ring_buffer_per_cpu *cpu_buffer;
3231 unsigned long ret;
3232
3233 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3234 return 0;
3235
3236 cpu_buffer = buffer->buffers[cpu];
3237 ret = local_read(&cpu_buffer->commit_overrun);
3238
3239 return ret;
3240 }
3241 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3242
3243 /**
3244 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3245 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3246 * @buffer: The ring buffer
3247 * @cpu: The per CPU buffer to get the number of overruns from
3248 */
3249 unsigned long
3250 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3251 {
3252 struct ring_buffer_per_cpu *cpu_buffer;
3253 unsigned long ret;
3254
3255 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3256 return 0;
3257
3258 cpu_buffer = buffer->buffers[cpu];
3259 ret = local_read(&cpu_buffer->dropped_events);
3260
3261 return ret;
3262 }
3263 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3264
3265 /**
3266 * ring_buffer_read_events_cpu - get the number of events successfully read
3267 * @buffer: The ring buffer
3268 * @cpu: The per CPU buffer to get the number of events read
3269 */
3270 unsigned long
3271 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3272 {
3273 struct ring_buffer_per_cpu *cpu_buffer;
3274
3275 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3276 return 0;
3277
3278 cpu_buffer = buffer->buffers[cpu];
3279 return cpu_buffer->read;
3280 }
3281 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3282
3283 /**
3284 * ring_buffer_entries - get the number of entries in a buffer
3285 * @buffer: The ring buffer
3286 *
3287 * Returns the total number of entries in the ring buffer
3288 * (all CPU entries)
3289 */
3290 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3291 {
3292 struct ring_buffer_per_cpu *cpu_buffer;
3293 unsigned long entries = 0;
3294 int cpu;
3295
3296 /* if you care about this being correct, lock the buffer */
3297 for_each_buffer_cpu(buffer, cpu) {
3298 cpu_buffer = buffer->buffers[cpu];
3299 entries += rb_num_of_entries(cpu_buffer);
3300 }
3301
3302 return entries;
3303 }
3304 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3305
3306 /**
3307 * ring_buffer_overruns - get the number of overruns in buffer
3308 * @buffer: The ring buffer
3309 *
3310 * Returns the total number of overruns in the ring buffer
3311 * (all CPU entries)
3312 */
3313 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3314 {
3315 struct ring_buffer_per_cpu *cpu_buffer;
3316 unsigned long overruns = 0;
3317 int cpu;
3318
3319 /* if you care about this being correct, lock the buffer */
3320 for_each_buffer_cpu(buffer, cpu) {
3321 cpu_buffer = buffer->buffers[cpu];
3322 overruns += local_read(&cpu_buffer->overrun);
3323 }
3324
3325 return overruns;
3326 }
3327 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3328
3329 static void rb_iter_reset(struct ring_buffer_iter *iter)
3330 {
3331 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3332
3333 /* Iterator usage is expected to have record disabled */
3334 iter->head_page = cpu_buffer->reader_page;
3335 iter->head = cpu_buffer->reader_page->read;
3336
3337 iter->cache_reader_page = iter->head_page;
3338 iter->cache_read = cpu_buffer->read;
3339
3340 if (iter->head)
3341 iter->read_stamp = cpu_buffer->read_stamp;
3342 else
3343 iter->read_stamp = iter->head_page->page->time_stamp;
3344 }
3345
3346 /**
3347 * ring_buffer_iter_reset - reset an iterator
3348 * @iter: The iterator to reset
3349 *
3350 * Resets the iterator, so that it will start from the beginning
3351 * again.
3352 */
3353 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3354 {
3355 struct ring_buffer_per_cpu *cpu_buffer;
3356 unsigned long flags;
3357
3358 if (!iter)
3359 return;
3360
3361 cpu_buffer = iter->cpu_buffer;
3362
3363 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3364 rb_iter_reset(iter);
3365 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3366 }
3367 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3368
3369 /**
3370 * ring_buffer_iter_empty - check if an iterator has no more to read
3371 * @iter: The iterator to check
3372 */
3373 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3374 {
3375 struct ring_buffer_per_cpu *cpu_buffer;
3376 struct buffer_page *reader;
3377 struct buffer_page *head_page;
3378 struct buffer_page *commit_page;
3379 unsigned commit;
3380
3381 cpu_buffer = iter->cpu_buffer;
3382
3383 /* Remember, trace recording is off when iterator is in use */
3384 reader = cpu_buffer->reader_page;
3385 head_page = cpu_buffer->head_page;
3386 commit_page = cpu_buffer->commit_page;
3387 commit = rb_page_commit(commit_page);
3388
3389 return ((iter->head_page == commit_page && iter->head == commit) ||
3390 (iter->head_page == reader && commit_page == head_page &&
3391 head_page->read == commit &&
3392 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3393 }
3394 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3395
3396 static void
3397 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3398 struct ring_buffer_event *event)
3399 {
3400 u64 delta;
3401
3402 switch (event->type_len) {
3403 case RINGBUF_TYPE_PADDING:
3404 return;
3405
3406 case RINGBUF_TYPE_TIME_EXTEND:
3407 delta = event->array[0];
3408 delta <<= TS_SHIFT;
3409 delta += event->time_delta;
3410 cpu_buffer->read_stamp += delta;
3411 return;
3412
3413 case RINGBUF_TYPE_TIME_STAMP:
3414 /* FIXME: not implemented */
3415 return;
3416
3417 case RINGBUF_TYPE_DATA:
3418 cpu_buffer->read_stamp += event->time_delta;
3419 return;
3420
3421 default:
3422 BUG();
3423 }
3424 return;
3425 }
3426
3427 static void
3428 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3429 struct ring_buffer_event *event)
3430 {
3431 u64 delta;
3432
3433 switch (event->type_len) {
3434 case RINGBUF_TYPE_PADDING:
3435 return;
3436
3437 case RINGBUF_TYPE_TIME_EXTEND:
3438 delta = event->array[0];
3439 delta <<= TS_SHIFT;
3440 delta += event->time_delta;
3441 iter->read_stamp += delta;
3442 return;
3443
3444 case RINGBUF_TYPE_TIME_STAMP:
3445 /* FIXME: not implemented */
3446 return;
3447
3448 case RINGBUF_TYPE_DATA:
3449 iter->read_stamp += event->time_delta;
3450 return;
3451
3452 default:
3453 BUG();
3454 }
3455 return;
3456 }
3457
3458 static struct buffer_page *
3459 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3460 {
3461 struct buffer_page *reader = NULL;
3462 unsigned long overwrite;
3463 unsigned long flags;
3464 int nr_loops = 0;
3465 int ret;
3466
3467 local_irq_save(flags);
3468 arch_spin_lock(&cpu_buffer->lock);
3469
3470 again:
3471 /*
3472 * This should normally only loop twice. But because the
3473 * start of the reader inserts an empty page, it causes
3474 * a case where we will loop three times. There should be no
3475 * reason to loop four times (that I know of).
3476 */
3477 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3478 reader = NULL;
3479 goto out;
3480 }
3481
3482 reader = cpu_buffer->reader_page;
3483
3484 /* If there's more to read, return this page */
3485 if (cpu_buffer->reader_page->read < rb_page_size(reader))
3486 goto out;
3487
3488 /* Never should we have an index greater than the size */
3489 if (RB_WARN_ON(cpu_buffer,
3490 cpu_buffer->reader_page->read > rb_page_size(reader)))
3491 goto out;
3492
3493 /* check if we caught up to the tail */
3494 reader = NULL;
3495 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3496 goto out;
3497
3498 /* Don't bother swapping if the ring buffer is empty */
3499 if (rb_num_of_entries(cpu_buffer) == 0)
3500 goto out;
3501
3502 /*
3503 * Reset the reader page to size zero.
3504 */
3505 local_set(&cpu_buffer->reader_page->write, 0);
3506 local_set(&cpu_buffer->reader_page->entries, 0);
3507 local_set(&cpu_buffer->reader_page->page->commit, 0);
3508 cpu_buffer->reader_page->real_end = 0;
3509
3510 spin:
3511 /*
3512 * Splice the empty reader page into the list around the head.
3513 */
3514 reader = rb_set_head_page(cpu_buffer);
3515 if (!reader)
3516 goto out;
3517 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3518 cpu_buffer->reader_page->list.prev = reader->list.prev;
3519
3520 /*
3521 * cpu_buffer->pages just needs to point to the buffer, it
3522 * has no specific buffer page to point to. Lets move it out
3523 * of our way so we don't accidentally swap it.
3524 */
3525 cpu_buffer->pages = reader->list.prev;
3526
3527 /* The reader page will be pointing to the new head */
3528 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3529
3530 /*
3531 * We want to make sure we read the overruns after we set up our
3532 * pointers to the next object. The writer side does a
3533 * cmpxchg to cross pages which acts as the mb on the writer
3534 * side. Note, the reader will constantly fail the swap
3535 * while the writer is updating the pointers, so this
3536 * guarantees that the overwrite recorded here is the one we
3537 * want to compare with the last_overrun.
3538 */
3539 smp_mb();
3540 overwrite = local_read(&(cpu_buffer->overrun));
3541
3542 /*
3543 * Here's the tricky part.
3544 *
3545 * We need to move the pointer past the header page.
3546 * But we can only do that if a writer is not currently
3547 * moving it. The page before the header page has the
3548 * flag bit '1' set if it is pointing to the page we want.
3549 * but if the writer is in the process of moving it
3550 * than it will be '2' or already moved '0'.
3551 */
3552
3553 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3554
3555 /*
3556 * If we did not convert it, then we must try again.
3557 */
3558 if (!ret)
3559 goto spin;
3560
3561 /*
3562 * Yeah! We succeeded in replacing the page.
3563 *
3564 * Now make the new head point back to the reader page.
3565 */
3566 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3567 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3568
3569 /* Finally update the reader page to the new head */
3570 cpu_buffer->reader_page = reader;
3571 cpu_buffer->reader_page->read = 0;
3572
3573 if (overwrite != cpu_buffer->last_overrun) {
3574 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3575 cpu_buffer->last_overrun = overwrite;
3576 }
3577
3578 goto again;
3579
3580 out:
3581 /* Update the read_stamp on the first event */
3582 if (reader && reader->read == 0)
3583 cpu_buffer->read_stamp = reader->page->time_stamp;
3584
3585 arch_spin_unlock(&cpu_buffer->lock);
3586 local_irq_restore(flags);
3587
3588 return reader;
3589 }
3590
3591 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3592 {
3593 struct ring_buffer_event *event;
3594 struct buffer_page *reader;
3595 unsigned length;
3596
3597 reader = rb_get_reader_page(cpu_buffer);
3598
3599 /* This function should not be called when buffer is empty */
3600 if (RB_WARN_ON(cpu_buffer, !reader))
3601 return;
3602
3603 event = rb_reader_event(cpu_buffer);
3604
3605 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3606 cpu_buffer->read++;
3607
3608 rb_update_read_stamp(cpu_buffer, event);
3609
3610 length = rb_event_length(event);
3611 cpu_buffer->reader_page->read += length;
3612 }
3613
3614 static void rb_advance_iter(struct ring_buffer_iter *iter)
3615 {
3616 struct ring_buffer_per_cpu *cpu_buffer;
3617 struct ring_buffer_event *event;
3618 unsigned length;
3619
3620 cpu_buffer = iter->cpu_buffer;
3621
3622 /*
3623 * Check if we are at the end of the buffer.
3624 */
3625 if (iter->head >= rb_page_size(iter->head_page)) {
3626 /* discarded commits can make the page empty */
3627 if (iter->head_page == cpu_buffer->commit_page)
3628 return;
3629 rb_inc_iter(iter);
3630 return;
3631 }
3632
3633 event = rb_iter_head_event(iter);
3634
3635 length = rb_event_length(event);
3636
3637 /*
3638 * This should not be called to advance the header if we are
3639 * at the tail of the buffer.
3640 */
3641 if (RB_WARN_ON(cpu_buffer,
3642 (iter->head_page == cpu_buffer->commit_page) &&
3643 (iter->head + length > rb_commit_index(cpu_buffer))))
3644 return;
3645
3646 rb_update_iter_read_stamp(iter, event);
3647
3648 iter->head += length;
3649
3650 /* check for end of page padding */
3651 if ((iter->head >= rb_page_size(iter->head_page)) &&
3652 (iter->head_page != cpu_buffer->commit_page))
3653 rb_inc_iter(iter);
3654 }
3655
3656 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3657 {
3658 return cpu_buffer->lost_events;
3659 }
3660
3661 static struct ring_buffer_event *
3662 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3663 unsigned long *lost_events)
3664 {
3665 struct ring_buffer_event *event;
3666 struct buffer_page *reader;
3667 int nr_loops = 0;
3668
3669 again:
3670 /*
3671 * We repeat when a time extend is encountered.
3672 * Since the time extend is always attached to a data event,
3673 * we should never loop more than once.
3674 * (We never hit the following condition more than twice).
3675 */
3676 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3677 return NULL;
3678
3679 reader = rb_get_reader_page(cpu_buffer);
3680 if (!reader)
3681 return NULL;
3682
3683 event = rb_reader_event(cpu_buffer);
3684
3685 switch (event->type_len) {
3686 case RINGBUF_TYPE_PADDING:
3687 if (rb_null_event(event))
3688 RB_WARN_ON(cpu_buffer, 1);
3689 /*
3690 * Because the writer could be discarding every
3691 * event it creates (which would probably be bad)
3692 * if we were to go back to "again" then we may never
3693 * catch up, and will trigger the warn on, or lock
3694 * the box. Return the padding, and we will release
3695 * the current locks, and try again.
3696 */
3697 return event;
3698
3699 case RINGBUF_TYPE_TIME_EXTEND:
3700 /* Internal data, OK to advance */
3701 rb_advance_reader(cpu_buffer);
3702 goto again;
3703
3704 case RINGBUF_TYPE_TIME_STAMP:
3705 /* FIXME: not implemented */
3706 rb_advance_reader(cpu_buffer);
3707 goto again;
3708
3709 case RINGBUF_TYPE_DATA:
3710 if (ts) {
3711 *ts = cpu_buffer->read_stamp + event->time_delta;
3712 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3713 cpu_buffer->cpu, ts);
3714 }
3715 if (lost_events)
3716 *lost_events = rb_lost_events(cpu_buffer);
3717 return event;
3718
3719 default:
3720 BUG();
3721 }
3722
3723 return NULL;
3724 }
3725 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3726
3727 static struct ring_buffer_event *
3728 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3729 {
3730 struct ring_buffer *buffer;
3731 struct ring_buffer_per_cpu *cpu_buffer;
3732 struct ring_buffer_event *event;
3733 int nr_loops = 0;
3734
3735 cpu_buffer = iter->cpu_buffer;
3736 buffer = cpu_buffer->buffer;
3737
3738 /*
3739 * Check if someone performed a consuming read to
3740 * the buffer. A consuming read invalidates the iterator
3741 * and we need to reset the iterator in this case.
3742 */
3743 if (unlikely(iter->cache_read != cpu_buffer->read ||
3744 iter->cache_reader_page != cpu_buffer->reader_page))
3745 rb_iter_reset(iter);
3746
3747 again:
3748 if (ring_buffer_iter_empty(iter))
3749 return NULL;
3750
3751 /*
3752 * We repeat when a time extend is encountered or we hit
3753 * the end of the page. Since the time extend is always attached
3754 * to a data event, we should never loop more than three times.
3755 * Once for going to next page, once on time extend, and
3756 * finally once to get the event.
3757 * (We never hit the following condition more than thrice).
3758 */
3759 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3760 return NULL;
3761
3762 if (rb_per_cpu_empty(cpu_buffer))
3763 return NULL;
3764
3765 if (iter->head >= rb_page_size(iter->head_page)) {
3766 rb_inc_iter(iter);
3767 goto again;
3768 }
3769
3770 event = rb_iter_head_event(iter);
3771
3772 switch (event->type_len) {
3773 case RINGBUF_TYPE_PADDING:
3774 if (rb_null_event(event)) {
3775 rb_inc_iter(iter);
3776 goto again;
3777 }
3778 rb_advance_iter(iter);
3779 return event;
3780
3781 case RINGBUF_TYPE_TIME_EXTEND:
3782 /* Internal data, OK to advance */
3783 rb_advance_iter(iter);
3784 goto again;
3785
3786 case RINGBUF_TYPE_TIME_STAMP:
3787 /* FIXME: not implemented */
3788 rb_advance_iter(iter);
3789 goto again;
3790
3791 case RINGBUF_TYPE_DATA:
3792 if (ts) {
3793 *ts = iter->read_stamp + event->time_delta;
3794 ring_buffer_normalize_time_stamp(buffer,
3795 cpu_buffer->cpu, ts);
3796 }
3797 return event;
3798
3799 default:
3800 BUG();
3801 }
3802
3803 return NULL;
3804 }
3805 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3806
3807 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3808 {
3809 if (likely(!in_nmi())) {
3810 raw_spin_lock(&cpu_buffer->reader_lock);
3811 return true;
3812 }
3813
3814 /*
3815 * If an NMI die dumps out the content of the ring buffer
3816 * trylock must be used to prevent a deadlock if the NMI
3817 * preempted a task that holds the ring buffer locks. If
3818 * we get the lock then all is fine, if not, then continue
3819 * to do the read, but this can corrupt the ring buffer,
3820 * so it must be permanently disabled from future writes.
3821 * Reading from NMI is a oneshot deal.
3822 */
3823 if (raw_spin_trylock(&cpu_buffer->reader_lock))
3824 return true;
3825
3826 /* Continue without locking, but disable the ring buffer */
3827 atomic_inc(&cpu_buffer->record_disabled);
3828 return false;
3829 }
3830
3831 static inline void
3832 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
3833 {
3834 if (likely(locked))
3835 raw_spin_unlock(&cpu_buffer->reader_lock);
3836 return;
3837 }
3838
3839 /**
3840 * ring_buffer_peek - peek at the next event to be read
3841 * @buffer: The ring buffer to read
3842 * @cpu: The cpu to peak at
3843 * @ts: The timestamp counter of this event.
3844 * @lost_events: a variable to store if events were lost (may be NULL)
3845 *
3846 * This will return the event that will be read next, but does
3847 * not consume the data.
3848 */
3849 struct ring_buffer_event *
3850 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3851 unsigned long *lost_events)
3852 {
3853 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3854 struct ring_buffer_event *event;
3855 unsigned long flags;
3856 bool dolock;
3857
3858 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3859 return NULL;
3860
3861 again:
3862 local_irq_save(flags);
3863 dolock = rb_reader_lock(cpu_buffer);
3864 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3865 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3866 rb_advance_reader(cpu_buffer);
3867 rb_reader_unlock(cpu_buffer, dolock);
3868 local_irq_restore(flags);
3869
3870 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3871 goto again;
3872
3873 return event;
3874 }
3875
3876 /**
3877 * ring_buffer_iter_peek - peek at the next event to be read
3878 * @iter: The ring buffer iterator
3879 * @ts: The timestamp counter of this event.
3880 *
3881 * This will return the event that will be read next, but does
3882 * not increment the iterator.
3883 */
3884 struct ring_buffer_event *
3885 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3886 {
3887 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3888 struct ring_buffer_event *event;
3889 unsigned long flags;
3890
3891 again:
3892 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3893 event = rb_iter_peek(iter, ts);
3894 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3895
3896 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3897 goto again;
3898
3899 return event;
3900 }
3901
3902 /**
3903 * ring_buffer_consume - return an event and consume it
3904 * @buffer: The ring buffer to get the next event from
3905 * @cpu: the cpu to read the buffer from
3906 * @ts: a variable to store the timestamp (may be NULL)
3907 * @lost_events: a variable to store if events were lost (may be NULL)
3908 *
3909 * Returns the next event in the ring buffer, and that event is consumed.
3910 * Meaning, that sequential reads will keep returning a different event,
3911 * and eventually empty the ring buffer if the producer is slower.
3912 */
3913 struct ring_buffer_event *
3914 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3915 unsigned long *lost_events)
3916 {
3917 struct ring_buffer_per_cpu *cpu_buffer;
3918 struct ring_buffer_event *event = NULL;
3919 unsigned long flags;
3920 bool dolock;
3921
3922 again:
3923 /* might be called in atomic */
3924 preempt_disable();
3925
3926 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3927 goto out;
3928
3929 cpu_buffer = buffer->buffers[cpu];
3930 local_irq_save(flags);
3931 dolock = rb_reader_lock(cpu_buffer);
3932
3933 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3934 if (event) {
3935 cpu_buffer->lost_events = 0;
3936 rb_advance_reader(cpu_buffer);
3937 }
3938
3939 rb_reader_unlock(cpu_buffer, dolock);
3940 local_irq_restore(flags);
3941
3942 out:
3943 preempt_enable();
3944
3945 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3946 goto again;
3947
3948 return event;
3949 }
3950 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3951
3952 /**
3953 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3954 * @buffer: The ring buffer to read from
3955 * @cpu: The cpu buffer to iterate over
3956 *
3957 * This performs the initial preparations necessary to iterate
3958 * through the buffer. Memory is allocated, buffer recording
3959 * is disabled, and the iterator pointer is returned to the caller.
3960 *
3961 * Disabling buffer recordng prevents the reading from being
3962 * corrupted. This is not a consuming read, so a producer is not
3963 * expected.
3964 *
3965 * After a sequence of ring_buffer_read_prepare calls, the user is
3966 * expected to make at least one call to ring_buffer_read_prepare_sync.
3967 * Afterwards, ring_buffer_read_start is invoked to get things going
3968 * for real.
3969 *
3970 * This overall must be paired with ring_buffer_read_finish.
3971 */
3972 struct ring_buffer_iter *
3973 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3974 {
3975 struct ring_buffer_per_cpu *cpu_buffer;
3976 struct ring_buffer_iter *iter;
3977
3978 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3979 return NULL;
3980
3981 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3982 if (!iter)
3983 return NULL;
3984
3985 cpu_buffer = buffer->buffers[cpu];
3986
3987 iter->cpu_buffer = cpu_buffer;
3988
3989 atomic_inc(&buffer->resize_disabled);
3990 atomic_inc(&cpu_buffer->record_disabled);
3991
3992 return iter;
3993 }
3994 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3995
3996 /**
3997 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3998 *
3999 * All previously invoked ring_buffer_read_prepare calls to prepare
4000 * iterators will be synchronized. Afterwards, read_buffer_read_start
4001 * calls on those iterators are allowed.
4002 */
4003 void
4004 ring_buffer_read_prepare_sync(void)
4005 {
4006 synchronize_sched();
4007 }
4008 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4009
4010 /**
4011 * ring_buffer_read_start - start a non consuming read of the buffer
4012 * @iter: The iterator returned by ring_buffer_read_prepare
4013 *
4014 * This finalizes the startup of an iteration through the buffer.
4015 * The iterator comes from a call to ring_buffer_read_prepare and
4016 * an intervening ring_buffer_read_prepare_sync must have been
4017 * performed.
4018 *
4019 * Must be paired with ring_buffer_read_finish.
4020 */
4021 void
4022 ring_buffer_read_start(struct ring_buffer_iter *iter)
4023 {
4024 struct ring_buffer_per_cpu *cpu_buffer;
4025 unsigned long flags;
4026
4027 if (!iter)
4028 return;
4029
4030 cpu_buffer = iter->cpu_buffer;
4031
4032 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4033 arch_spin_lock(&cpu_buffer->lock);
4034 rb_iter_reset(iter);
4035 arch_spin_unlock(&cpu_buffer->lock);
4036 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4037 }
4038 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4039
4040 /**
4041 * ring_buffer_read_finish - finish reading the iterator of the buffer
4042 * @iter: The iterator retrieved by ring_buffer_start
4043 *
4044 * This re-enables the recording to the buffer, and frees the
4045 * iterator.
4046 */
4047 void
4048 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4049 {
4050 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4051 unsigned long flags;
4052
4053 /*
4054 * Ring buffer is disabled from recording, here's a good place
4055 * to check the integrity of the ring buffer.
4056 * Must prevent readers from trying to read, as the check
4057 * clears the HEAD page and readers require it.
4058 */
4059 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4060 rb_check_pages(cpu_buffer);
4061 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4062
4063 atomic_dec(&cpu_buffer->record_disabled);
4064 atomic_dec(&cpu_buffer->buffer->resize_disabled);
4065 kfree(iter);
4066 }
4067 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4068
4069 /**
4070 * ring_buffer_read - read the next item in the ring buffer by the iterator
4071 * @iter: The ring buffer iterator
4072 * @ts: The time stamp of the event read.
4073 *
4074 * This reads the next event in the ring buffer and increments the iterator.
4075 */
4076 struct ring_buffer_event *
4077 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4078 {
4079 struct ring_buffer_event *event;
4080 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4081 unsigned long flags;
4082
4083 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4084 again:
4085 event = rb_iter_peek(iter, ts);
4086 if (!event)
4087 goto out;
4088
4089 if (event->type_len == RINGBUF_TYPE_PADDING)
4090 goto again;
4091
4092 rb_advance_iter(iter);
4093 out:
4094 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4095
4096 return event;
4097 }
4098 EXPORT_SYMBOL_GPL(ring_buffer_read);
4099
4100 /**
4101 * ring_buffer_size - return the size of the ring buffer (in bytes)
4102 * @buffer: The ring buffer.
4103 */
4104 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4105 {
4106 /*
4107 * Earlier, this method returned
4108 * BUF_PAGE_SIZE * buffer->nr_pages
4109 * Since the nr_pages field is now removed, we have converted this to
4110 * return the per cpu buffer value.
4111 */
4112 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4113 return 0;
4114
4115 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4116 }
4117 EXPORT_SYMBOL_GPL(ring_buffer_size);
4118
4119 static void
4120 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4121 {
4122 rb_head_page_deactivate(cpu_buffer);
4123
4124 cpu_buffer->head_page
4125 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4126 local_set(&cpu_buffer->head_page->write, 0);
4127 local_set(&cpu_buffer->head_page->entries, 0);
4128 local_set(&cpu_buffer->head_page->page->commit, 0);
4129
4130 cpu_buffer->head_page->read = 0;
4131
4132 cpu_buffer->tail_page = cpu_buffer->head_page;
4133 cpu_buffer->commit_page = cpu_buffer->head_page;
4134
4135 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4136 INIT_LIST_HEAD(&cpu_buffer->new_pages);
4137 local_set(&cpu_buffer->reader_page->write, 0);
4138 local_set(&cpu_buffer->reader_page->entries, 0);
4139 local_set(&cpu_buffer->reader_page->page->commit, 0);
4140 cpu_buffer->reader_page->read = 0;
4141
4142 local_set(&cpu_buffer->entries_bytes, 0);
4143 local_set(&cpu_buffer->overrun, 0);
4144 local_set(&cpu_buffer->commit_overrun, 0);
4145 local_set(&cpu_buffer->dropped_events, 0);
4146 local_set(&cpu_buffer->entries, 0);
4147 local_set(&cpu_buffer->committing, 0);
4148 local_set(&cpu_buffer->commits, 0);
4149 cpu_buffer->read = 0;
4150 cpu_buffer->read_bytes = 0;
4151
4152 cpu_buffer->write_stamp = 0;
4153 cpu_buffer->read_stamp = 0;
4154
4155 cpu_buffer->lost_events = 0;
4156 cpu_buffer->last_overrun = 0;
4157
4158 rb_head_page_activate(cpu_buffer);
4159 }
4160
4161 /**
4162 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4163 * @buffer: The ring buffer to reset a per cpu buffer of
4164 * @cpu: The CPU buffer to be reset
4165 */
4166 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4167 {
4168 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4169 unsigned long flags;
4170
4171 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4172 return;
4173
4174 atomic_inc(&buffer->resize_disabled);
4175 atomic_inc(&cpu_buffer->record_disabled);
4176
4177 /* Make sure all commits have finished */
4178 synchronize_sched();
4179
4180 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4181
4182 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4183 goto out;
4184
4185 arch_spin_lock(&cpu_buffer->lock);
4186
4187 rb_reset_cpu(cpu_buffer);
4188
4189 arch_spin_unlock(&cpu_buffer->lock);
4190
4191 out:
4192 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4193
4194 atomic_dec(&cpu_buffer->record_disabled);
4195 atomic_dec(&buffer->resize_disabled);
4196 }
4197 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4198
4199 /**
4200 * ring_buffer_reset - reset a ring buffer
4201 * @buffer: The ring buffer to reset all cpu buffers
4202 */
4203 void ring_buffer_reset(struct ring_buffer *buffer)
4204 {
4205 int cpu;
4206
4207 for_each_buffer_cpu(buffer, cpu)
4208 ring_buffer_reset_cpu(buffer, cpu);
4209 }
4210 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4211
4212 /**
4213 * rind_buffer_empty - is the ring buffer empty?
4214 * @buffer: The ring buffer to test
4215 */
4216 bool ring_buffer_empty(struct ring_buffer *buffer)
4217 {
4218 struct ring_buffer_per_cpu *cpu_buffer;
4219 unsigned long flags;
4220 bool dolock;
4221 int cpu;
4222 int ret;
4223
4224 /* yes this is racy, but if you don't like the race, lock the buffer */
4225 for_each_buffer_cpu(buffer, cpu) {
4226 cpu_buffer = buffer->buffers[cpu];
4227 local_irq_save(flags);
4228 dolock = rb_reader_lock(cpu_buffer);
4229 ret = rb_per_cpu_empty(cpu_buffer);
4230 rb_reader_unlock(cpu_buffer, dolock);
4231 local_irq_restore(flags);
4232
4233 if (!ret)
4234 return false;
4235 }
4236
4237 return true;
4238 }
4239 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4240
4241 /**
4242 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4243 * @buffer: The ring buffer
4244 * @cpu: The CPU buffer to test
4245 */
4246 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4247 {
4248 struct ring_buffer_per_cpu *cpu_buffer;
4249 unsigned long flags;
4250 bool dolock;
4251 int ret;
4252
4253 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4254 return true;
4255
4256 cpu_buffer = buffer->buffers[cpu];
4257 local_irq_save(flags);
4258 dolock = rb_reader_lock(cpu_buffer);
4259 ret = rb_per_cpu_empty(cpu_buffer);
4260 rb_reader_unlock(cpu_buffer, dolock);
4261 local_irq_restore(flags);
4262
4263 return ret;
4264 }
4265 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4266
4267 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4268 /**
4269 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4270 * @buffer_a: One buffer to swap with
4271 * @buffer_b: The other buffer to swap with
4272 *
4273 * This function is useful for tracers that want to take a "snapshot"
4274 * of a CPU buffer and has another back up buffer lying around.
4275 * it is expected that the tracer handles the cpu buffer not being
4276 * used at the moment.
4277 */
4278 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4279 struct ring_buffer *buffer_b, int cpu)
4280 {
4281 struct ring_buffer_per_cpu *cpu_buffer_a;
4282 struct ring_buffer_per_cpu *cpu_buffer_b;
4283 int ret = -EINVAL;
4284
4285 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4286 !cpumask_test_cpu(cpu, buffer_b->cpumask))
4287 goto out;
4288
4289 cpu_buffer_a = buffer_a->buffers[cpu];
4290 cpu_buffer_b = buffer_b->buffers[cpu];
4291
4292 /* At least make sure the two buffers are somewhat the same */
4293 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4294 goto out;
4295
4296 ret = -EAGAIN;
4297
4298 if (atomic_read(&buffer_a->record_disabled))
4299 goto out;
4300
4301 if (atomic_read(&buffer_b->record_disabled))
4302 goto out;
4303
4304 if (atomic_read(&cpu_buffer_a->record_disabled))
4305 goto out;
4306
4307 if (atomic_read(&cpu_buffer_b->record_disabled))
4308 goto out;
4309
4310 /*
4311 * We can't do a synchronize_sched here because this
4312 * function can be called in atomic context.
4313 * Normally this will be called from the same CPU as cpu.
4314 * If not it's up to the caller to protect this.
4315 */
4316 atomic_inc(&cpu_buffer_a->record_disabled);
4317 atomic_inc(&cpu_buffer_b->record_disabled);
4318
4319 ret = -EBUSY;
4320 if (local_read(&cpu_buffer_a->committing))
4321 goto out_dec;
4322 if (local_read(&cpu_buffer_b->committing))
4323 goto out_dec;
4324
4325 buffer_a->buffers[cpu] = cpu_buffer_b;
4326 buffer_b->buffers[cpu] = cpu_buffer_a;
4327
4328 cpu_buffer_b->buffer = buffer_a;
4329 cpu_buffer_a->buffer = buffer_b;
4330
4331 ret = 0;
4332
4333 out_dec:
4334 atomic_dec(&cpu_buffer_a->record_disabled);
4335 atomic_dec(&cpu_buffer_b->record_disabled);
4336 out:
4337 return ret;
4338 }
4339 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4340 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4341
4342 /**
4343 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4344 * @buffer: the buffer to allocate for.
4345 * @cpu: the cpu buffer to allocate.
4346 *
4347 * This function is used in conjunction with ring_buffer_read_page.
4348 * When reading a full page from the ring buffer, these functions
4349 * can be used to speed up the process. The calling function should
4350 * allocate a few pages first with this function. Then when it
4351 * needs to get pages from the ring buffer, it passes the result
4352 * of this function into ring_buffer_read_page, which will swap
4353 * the page that was allocated, with the read page of the buffer.
4354 *
4355 * Returns:
4356 * The page allocated, or ERR_PTR
4357 */
4358 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4359 {
4360 struct ring_buffer_per_cpu *cpu_buffer;
4361 struct buffer_data_page *bpage = NULL;
4362 unsigned long flags;
4363 struct page *page;
4364
4365 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4366 return ERR_PTR(-ENODEV);
4367
4368 cpu_buffer = buffer->buffers[cpu];
4369 local_irq_save(flags);
4370 arch_spin_lock(&cpu_buffer->lock);
4371
4372 if (cpu_buffer->free_page) {
4373 bpage = cpu_buffer->free_page;
4374 cpu_buffer->free_page = NULL;
4375 }
4376
4377 arch_spin_unlock(&cpu_buffer->lock);
4378 local_irq_restore(flags);
4379
4380 if (bpage)
4381 goto out;
4382
4383 page = alloc_pages_node(cpu_to_node(cpu),
4384 GFP_KERNEL | __GFP_NORETRY, 0);
4385 if (!page)
4386 return ERR_PTR(-ENOMEM);
4387
4388 bpage = page_address(page);
4389
4390 out:
4391 rb_init_page(bpage);
4392
4393 return bpage;
4394 }
4395 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4396
4397 /**
4398 * ring_buffer_free_read_page - free an allocated read page
4399 * @buffer: the buffer the page was allocate for
4400 * @cpu: the cpu buffer the page came from
4401 * @data: the page to free
4402 *
4403 * Free a page allocated from ring_buffer_alloc_read_page.
4404 */
4405 void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4406 {
4407 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4408 struct buffer_data_page *bpage = data;
4409 unsigned long flags;
4410
4411 local_irq_save(flags);
4412 arch_spin_lock(&cpu_buffer->lock);
4413
4414 if (!cpu_buffer->free_page) {
4415 cpu_buffer->free_page = bpage;
4416 bpage = NULL;
4417 }
4418
4419 arch_spin_unlock(&cpu_buffer->lock);
4420 local_irq_restore(flags);
4421
4422 free_page((unsigned long)bpage);
4423 }
4424 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4425
4426 /**
4427 * ring_buffer_read_page - extract a page from the ring buffer
4428 * @buffer: buffer to extract from
4429 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4430 * @len: amount to extract
4431 * @cpu: the cpu of the buffer to extract
4432 * @full: should the extraction only happen when the page is full.
4433 *
4434 * This function will pull out a page from the ring buffer and consume it.
4435 * @data_page must be the address of the variable that was returned
4436 * from ring_buffer_alloc_read_page. This is because the page might be used
4437 * to swap with a page in the ring buffer.
4438 *
4439 * for example:
4440 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
4441 * if (IS_ERR(rpage))
4442 * return PTR_ERR(rpage);
4443 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4444 * if (ret >= 0)
4445 * process_page(rpage, ret);
4446 *
4447 * When @full is set, the function will not return true unless
4448 * the writer is off the reader page.
4449 *
4450 * Note: it is up to the calling functions to handle sleeps and wakeups.
4451 * The ring buffer can be used anywhere in the kernel and can not
4452 * blindly call wake_up. The layer that uses the ring buffer must be
4453 * responsible for that.
4454 *
4455 * Returns:
4456 * >=0 if data has been transferred, returns the offset of consumed data.
4457 * <0 if no data has been transferred.
4458 */
4459 int ring_buffer_read_page(struct ring_buffer *buffer,
4460 void **data_page, size_t len, int cpu, int full)
4461 {
4462 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4463 struct ring_buffer_event *event;
4464 struct buffer_data_page *bpage;
4465 struct buffer_page *reader;
4466 unsigned long missed_events;
4467 unsigned long flags;
4468 unsigned int commit;
4469 unsigned int read;
4470 u64 save_timestamp;
4471 int ret = -1;
4472
4473 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4474 goto out;
4475
4476 /*
4477 * If len is not big enough to hold the page header, then
4478 * we can not copy anything.
4479 */
4480 if (len <= BUF_PAGE_HDR_SIZE)
4481 goto out;
4482
4483 len -= BUF_PAGE_HDR_SIZE;
4484
4485 if (!data_page)
4486 goto out;
4487
4488 bpage = *data_page;
4489 if (!bpage)
4490 goto out;
4491
4492 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4493
4494 reader = rb_get_reader_page(cpu_buffer);
4495 if (!reader)
4496 goto out_unlock;
4497
4498 event = rb_reader_event(cpu_buffer);
4499
4500 read = reader->read;
4501 commit = rb_page_commit(reader);
4502
4503 /* Check if any events were dropped */
4504 missed_events = cpu_buffer->lost_events;
4505
4506 /*
4507 * If this page has been partially read or
4508 * if len is not big enough to read the rest of the page or
4509 * a writer is still on the page, then
4510 * we must copy the data from the page to the buffer.
4511 * Otherwise, we can simply swap the page with the one passed in.
4512 */
4513 if (read || (len < (commit - read)) ||
4514 cpu_buffer->reader_page == cpu_buffer->commit_page) {
4515 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4516 unsigned int rpos = read;
4517 unsigned int pos = 0;
4518 unsigned int size;
4519
4520 if (full)
4521 goto out_unlock;
4522
4523 if (len > (commit - read))
4524 len = (commit - read);
4525
4526 /* Always keep the time extend and data together */
4527 size = rb_event_ts_length(event);
4528
4529 if (len < size)
4530 goto out_unlock;
4531
4532 /* save the current timestamp, since the user will need it */
4533 save_timestamp = cpu_buffer->read_stamp;
4534
4535 /* Need to copy one event at a time */
4536 do {
4537 /* We need the size of one event, because
4538 * rb_advance_reader only advances by one event,
4539 * whereas rb_event_ts_length may include the size of
4540 * one or two events.
4541 * We have already ensured there's enough space if this
4542 * is a time extend. */
4543 size = rb_event_length(event);
4544 memcpy(bpage->data + pos, rpage->data + rpos, size);
4545
4546 len -= size;
4547
4548 rb_advance_reader(cpu_buffer);
4549 rpos = reader->read;
4550 pos += size;
4551
4552 if (rpos >= commit)
4553 break;
4554
4555 event = rb_reader_event(cpu_buffer);
4556 /* Always keep the time extend and data together */
4557 size = rb_event_ts_length(event);
4558 } while (len >= size);
4559
4560 /* update bpage */
4561 local_set(&bpage->commit, pos);
4562 bpage->time_stamp = save_timestamp;
4563
4564 /* we copied everything to the beginning */
4565 read = 0;
4566 } else {
4567 /* update the entry counter */
4568 cpu_buffer->read += rb_page_entries(reader);
4569 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4570
4571 /* swap the pages */
4572 rb_init_page(bpage);
4573 bpage = reader->page;
4574 reader->page = *data_page;
4575 local_set(&reader->write, 0);
4576 local_set(&reader->entries, 0);
4577 reader->read = 0;
4578 *data_page = bpage;
4579
4580 /*
4581 * Use the real_end for the data size,
4582 * This gives us a chance to store the lost events
4583 * on the page.
4584 */
4585 if (reader->real_end)
4586 local_set(&bpage->commit, reader->real_end);
4587 }
4588 ret = read;
4589
4590 cpu_buffer->lost_events = 0;
4591
4592 commit = local_read(&bpage->commit);
4593 /*
4594 * Set a flag in the commit field if we lost events
4595 */
4596 if (missed_events) {
4597 /* If there is room at the end of the page to save the
4598 * missed events, then record it there.
4599 */
4600 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4601 memcpy(&bpage->data[commit], &missed_events,
4602 sizeof(missed_events));
4603 local_add(RB_MISSED_STORED, &bpage->commit);
4604 commit += sizeof(missed_events);
4605 }
4606 local_add(RB_MISSED_EVENTS, &bpage->commit);
4607 }
4608
4609 /*
4610 * This page may be off to user land. Zero it out here.
4611 */
4612 if (commit < BUF_PAGE_SIZE)
4613 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4614
4615 out_unlock:
4616 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4617
4618 out:
4619 return ret;
4620 }
4621 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4622
4623 /*
4624 * We only allocate new buffers, never free them if the CPU goes down.
4625 * If we were to free the buffer, then the user would lose any trace that was in
4626 * the buffer.
4627 */
4628 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
4629 {
4630 struct ring_buffer *buffer;
4631 long nr_pages_same;
4632 int cpu_i;
4633 unsigned long nr_pages;
4634
4635 buffer = container_of(node, struct ring_buffer, node);
4636 if (cpumask_test_cpu(cpu, buffer->cpumask))
4637 return 0;
4638
4639 nr_pages = 0;
4640 nr_pages_same = 1;
4641 /* check if all cpu sizes are same */
4642 for_each_buffer_cpu(buffer, cpu_i) {
4643 /* fill in the size from first enabled cpu */
4644 if (nr_pages == 0)
4645 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4646 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4647 nr_pages_same = 0;
4648 break;
4649 }
4650 }
4651 /* allocate minimum pages, user can later expand it */
4652 if (!nr_pages_same)
4653 nr_pages = 2;
4654 buffer->buffers[cpu] =
4655 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4656 if (!buffer->buffers[cpu]) {
4657 WARN(1, "failed to allocate ring buffer on CPU %u\n",
4658 cpu);
4659 return -ENOMEM;
4660 }
4661 smp_wmb();
4662 cpumask_set_cpu(cpu, buffer->cpumask);
4663 return 0;
4664 }
4665
4666 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4667 /*
4668 * This is a basic integrity check of the ring buffer.
4669 * Late in the boot cycle this test will run when configured in.
4670 * It will kick off a thread per CPU that will go into a loop
4671 * writing to the per cpu ring buffer various sizes of data.
4672 * Some of the data will be large items, some small.
4673 *
4674 * Another thread is created that goes into a spin, sending out
4675 * IPIs to the other CPUs to also write into the ring buffer.
4676 * this is to test the nesting ability of the buffer.
4677 *
4678 * Basic stats are recorded and reported. If something in the
4679 * ring buffer should happen that's not expected, a big warning
4680 * is displayed and all ring buffers are disabled.
4681 */
4682 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4683
4684 struct rb_test_data {
4685 struct ring_buffer *buffer;
4686 unsigned long events;
4687 unsigned long bytes_written;
4688 unsigned long bytes_alloc;
4689 unsigned long bytes_dropped;
4690 unsigned long events_nested;
4691 unsigned long bytes_written_nested;
4692 unsigned long bytes_alloc_nested;
4693 unsigned long bytes_dropped_nested;
4694 int min_size_nested;
4695 int max_size_nested;
4696 int max_size;
4697 int min_size;
4698 int cpu;
4699 int cnt;
4700 };
4701
4702 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4703
4704 /* 1 meg per cpu */
4705 #define RB_TEST_BUFFER_SIZE 1048576
4706
4707 static char rb_string[] __initdata =
4708 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4709 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4710 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4711
4712 static bool rb_test_started __initdata;
4713
4714 struct rb_item {
4715 int size;
4716 char str[];
4717 };
4718
4719 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4720 {
4721 struct ring_buffer_event *event;
4722 struct rb_item *item;
4723 bool started;
4724 int event_len;
4725 int size;
4726 int len;
4727 int cnt;
4728
4729 /* Have nested writes different that what is written */
4730 cnt = data->cnt + (nested ? 27 : 0);
4731
4732 /* Multiply cnt by ~e, to make some unique increment */
4733 size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4734
4735 len = size + sizeof(struct rb_item);
4736
4737 started = rb_test_started;
4738 /* read rb_test_started before checking buffer enabled */
4739 smp_rmb();
4740
4741 event = ring_buffer_lock_reserve(data->buffer, len);
4742 if (!event) {
4743 /* Ignore dropped events before test starts. */
4744 if (started) {
4745 if (nested)
4746 data->bytes_dropped += len;
4747 else
4748 data->bytes_dropped_nested += len;
4749 }
4750 return len;
4751 }
4752
4753 event_len = ring_buffer_event_length(event);
4754
4755 if (RB_WARN_ON(data->buffer, event_len < len))
4756 goto out;
4757
4758 item = ring_buffer_event_data(event);
4759 item->size = size;
4760 memcpy(item->str, rb_string, size);
4761
4762 if (nested) {
4763 data->bytes_alloc_nested += event_len;
4764 data->bytes_written_nested += len;
4765 data->events_nested++;
4766 if (!data->min_size_nested || len < data->min_size_nested)
4767 data->min_size_nested = len;
4768 if (len > data->max_size_nested)
4769 data->max_size_nested = len;
4770 } else {
4771 data->bytes_alloc += event_len;
4772 data->bytes_written += len;
4773 data->events++;
4774 if (!data->min_size || len < data->min_size)
4775 data->max_size = len;
4776 if (len > data->max_size)
4777 data->max_size = len;
4778 }
4779
4780 out:
4781 ring_buffer_unlock_commit(data->buffer, event);
4782
4783 return 0;
4784 }
4785
4786 static __init int rb_test(void *arg)
4787 {
4788 struct rb_test_data *data = arg;
4789
4790 while (!kthread_should_stop()) {
4791 rb_write_something(data, false);
4792 data->cnt++;
4793
4794 set_current_state(TASK_INTERRUPTIBLE);
4795 /* Now sleep between a min of 100-300us and a max of 1ms */
4796 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4797 }
4798
4799 return 0;
4800 }
4801
4802 static __init void rb_ipi(void *ignore)
4803 {
4804 struct rb_test_data *data;
4805 int cpu = smp_processor_id();
4806
4807 data = &rb_data[cpu];
4808 rb_write_something(data, true);
4809 }
4810
4811 static __init int rb_hammer_test(void *arg)
4812 {
4813 while (!kthread_should_stop()) {
4814
4815 /* Send an IPI to all cpus to write data! */
4816 smp_call_function(rb_ipi, NULL, 1);
4817 /* No sleep, but for non preempt, let others run */
4818 schedule();
4819 }
4820
4821 return 0;
4822 }
4823
4824 static __init int test_ringbuffer(void)
4825 {
4826 struct task_struct *rb_hammer;
4827 struct ring_buffer *buffer;
4828 int cpu;
4829 int ret = 0;
4830
4831 pr_info("Running ring buffer tests...\n");
4832
4833 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4834 if (WARN_ON(!buffer))
4835 return 0;
4836
4837 /* Disable buffer so that threads can't write to it yet */
4838 ring_buffer_record_off(buffer);
4839
4840 for_each_online_cpu(cpu) {
4841 rb_data[cpu].buffer = buffer;
4842 rb_data[cpu].cpu = cpu;
4843 rb_data[cpu].cnt = cpu;
4844 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4845 "rbtester/%d", cpu);
4846 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
4847 pr_cont("FAILED\n");
4848 ret = PTR_ERR(rb_threads[cpu]);
4849 goto out_free;
4850 }
4851
4852 kthread_bind(rb_threads[cpu], cpu);
4853 wake_up_process(rb_threads[cpu]);
4854 }
4855
4856 /* Now create the rb hammer! */
4857 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4858 if (WARN_ON(IS_ERR(rb_hammer))) {
4859 pr_cont("FAILED\n");
4860 ret = PTR_ERR(rb_hammer);
4861 goto out_free;
4862 }
4863
4864 ring_buffer_record_on(buffer);
4865 /*
4866 * Show buffer is enabled before setting rb_test_started.
4867 * Yes there's a small race window where events could be
4868 * dropped and the thread wont catch it. But when a ring
4869 * buffer gets enabled, there will always be some kind of
4870 * delay before other CPUs see it. Thus, we don't care about
4871 * those dropped events. We care about events dropped after
4872 * the threads see that the buffer is active.
4873 */
4874 smp_wmb();
4875 rb_test_started = true;
4876
4877 set_current_state(TASK_INTERRUPTIBLE);
4878 /* Just run for 10 seconds */;
4879 schedule_timeout(10 * HZ);
4880
4881 kthread_stop(rb_hammer);
4882
4883 out_free:
4884 for_each_online_cpu(cpu) {
4885 if (!rb_threads[cpu])
4886 break;
4887 kthread_stop(rb_threads[cpu]);
4888 }
4889 if (ret) {
4890 ring_buffer_free(buffer);
4891 return ret;
4892 }
4893
4894 /* Report! */
4895 pr_info("finished\n");
4896 for_each_online_cpu(cpu) {
4897 struct ring_buffer_event *event;
4898 struct rb_test_data *data = &rb_data[cpu];
4899 struct rb_item *item;
4900 unsigned long total_events;
4901 unsigned long total_dropped;
4902 unsigned long total_written;
4903 unsigned long total_alloc;
4904 unsigned long total_read = 0;
4905 unsigned long total_size = 0;
4906 unsigned long total_len = 0;
4907 unsigned long total_lost = 0;
4908 unsigned long lost;
4909 int big_event_size;
4910 int small_event_size;
4911
4912 ret = -1;
4913
4914 total_events = data->events + data->events_nested;
4915 total_written = data->bytes_written + data->bytes_written_nested;
4916 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4917 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4918
4919 big_event_size = data->max_size + data->max_size_nested;
4920 small_event_size = data->min_size + data->min_size_nested;
4921
4922 pr_info("CPU %d:\n", cpu);
4923 pr_info(" events: %ld\n", total_events);
4924 pr_info(" dropped bytes: %ld\n", total_dropped);
4925 pr_info(" alloced bytes: %ld\n", total_alloc);
4926 pr_info(" written bytes: %ld\n", total_written);
4927 pr_info(" biggest event: %d\n", big_event_size);
4928 pr_info(" smallest event: %d\n", small_event_size);
4929
4930 if (RB_WARN_ON(buffer, total_dropped))
4931 break;
4932
4933 ret = 0;
4934
4935 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4936 total_lost += lost;
4937 item = ring_buffer_event_data(event);
4938 total_len += ring_buffer_event_length(event);
4939 total_size += item->size + sizeof(struct rb_item);
4940 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4941 pr_info("FAILED!\n");
4942 pr_info("buffer had: %.*s\n", item->size, item->str);
4943 pr_info("expected: %.*s\n", item->size, rb_string);
4944 RB_WARN_ON(buffer, 1);
4945 ret = -1;
4946 break;
4947 }
4948 total_read++;
4949 }
4950 if (ret)
4951 break;
4952
4953 ret = -1;
4954
4955 pr_info(" read events: %ld\n", total_read);
4956 pr_info(" lost events: %ld\n", total_lost);
4957 pr_info(" total events: %ld\n", total_lost + total_read);
4958 pr_info(" recorded len bytes: %ld\n", total_len);
4959 pr_info(" recorded size bytes: %ld\n", total_size);
4960 if (total_lost)
4961 pr_info(" With dropped events, record len and size may not match\n"
4962 " alloced and written from above\n");
4963 if (!total_lost) {
4964 if (RB_WARN_ON(buffer, total_len != total_alloc ||
4965 total_size != total_written))
4966 break;
4967 }
4968 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4969 break;
4970
4971 ret = 0;
4972 }
4973 if (!ret)
4974 pr_info("Ring buffer PASSED!\n");
4975
4976 ring_buffer_free(buffer);
4977 return 0;
4978 }
4979
4980 late_initcall(test_ringbuffer);
4981 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */