]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - kernel/trace/ring_buffer.c
9692f100ec1a974cbccf81bf718255f13d1335bf
[mirror_ubuntu-kernels.git] / kernel / trace / ring_buffer.c
1 /*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/init.h>
17 #include <linux/hash.h>
18 #include <linux/list.h>
19 #include <linux/cpu.h>
20 #include <linux/fs.h>
21
22 #include "trace.h"
23
24 /*
25 * The ring buffer header is special. We must manually up keep it.
26 */
27 int ring_buffer_print_entry_header(struct trace_seq *s)
28 {
29 int ret;
30
31 ret = trace_seq_printf(s, "# compressed entry header\n");
32 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
33 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
34 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
35 ret = trace_seq_printf(s, "\n");
36 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
37 RINGBUF_TYPE_PADDING);
38 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39 RINGBUF_TYPE_TIME_EXTEND);
40 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
41 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
42
43 return ret;
44 }
45
46 /*
47 * The ring buffer is made up of a list of pages. A separate list of pages is
48 * allocated for each CPU. A writer may only write to a buffer that is
49 * associated with the CPU it is currently executing on. A reader may read
50 * from any per cpu buffer.
51 *
52 * The reader is special. For each per cpu buffer, the reader has its own
53 * reader page. When a reader has read the entire reader page, this reader
54 * page is swapped with another page in the ring buffer.
55 *
56 * Now, as long as the writer is off the reader page, the reader can do what
57 * ever it wants with that page. The writer will never write to that page
58 * again (as long as it is out of the ring buffer).
59 *
60 * Here's some silly ASCII art.
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |
65 * +------+ +---+ +---+ +---+
66 * | |-->| |-->| |
67 * +---+ +---+ +---+
68 * ^ |
69 * | |
70 * +---------------+
71 *
72 *
73 * +------+
74 * |reader| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * | |-->| |-->| |
78 * +---+ +---+ +---+
79 * ^ |
80 * | |
81 * +---------------+
82 *
83 *
84 * +------+
85 * |reader| RING BUFFER
86 * |page |------------------v
87 * +------+ +---+ +---+ +---+
88 * ^ | |-->| |-->| |
89 * | +---+ +---+ +---+
90 * | |
91 * | |
92 * +------------------------------+
93 *
94 *
95 * +------+
96 * |buffer| RING BUFFER
97 * |page |------------------v
98 * +------+ +---+ +---+ +---+
99 * ^ | | | |-->| |
100 * | New +---+ +---+ +---+
101 * | Reader------^ |
102 * | page |
103 * +------------------------------+
104 *
105 *
106 * After we make this swap, the reader can hand this page off to the splice
107 * code and be done with it. It can even allocate a new page if it needs to
108 * and swap that into the ring buffer.
109 *
110 * We will be using cmpxchg soon to make all this lockless.
111 *
112 */
113
114 /*
115 * A fast way to enable or disable all ring buffers is to
116 * call tracing_on or tracing_off. Turning off the ring buffers
117 * prevents all ring buffers from being recorded to.
118 * Turning this switch on, makes it OK to write to the
119 * ring buffer, if the ring buffer is enabled itself.
120 *
121 * There's three layers that must be on in order to write
122 * to the ring buffer.
123 *
124 * 1) This global flag must be set.
125 * 2) The ring buffer must be enabled for recording.
126 * 3) The per cpu buffer must be enabled for recording.
127 *
128 * In case of an anomaly, this global flag has a bit set that
129 * will permantly disable all ring buffers.
130 */
131
132 /*
133 * Global flag to disable all recording to ring buffers
134 * This has two bits: ON, DISABLED
135 *
136 * ON DISABLED
137 * ---- ----------
138 * 0 0 : ring buffers are off
139 * 1 0 : ring buffers are on
140 * X 1 : ring buffers are permanently disabled
141 */
142
143 enum {
144 RB_BUFFERS_ON_BIT = 0,
145 RB_BUFFERS_DISABLED_BIT = 1,
146 };
147
148 enum {
149 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
150 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
151 };
152
153 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
154
155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
157 /**
158 * tracing_on - enable all tracing buffers
159 *
160 * This function enables all tracing buffers that may have been
161 * disabled with tracing_off.
162 */
163 void tracing_on(void)
164 {
165 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
166 }
167 EXPORT_SYMBOL_GPL(tracing_on);
168
169 /**
170 * tracing_off - turn off all tracing buffers
171 *
172 * This function stops all tracing buffers from recording data.
173 * It does not disable any overhead the tracers themselves may
174 * be causing. This function simply causes all recording to
175 * the ring buffers to fail.
176 */
177 void tracing_off(void)
178 {
179 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180 }
181 EXPORT_SYMBOL_GPL(tracing_off);
182
183 /**
184 * tracing_off_permanent - permanently disable ring buffers
185 *
186 * This function, once called, will disable all ring buffers
187 * permanently.
188 */
189 void tracing_off_permanent(void)
190 {
191 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
192 }
193
194 /**
195 * tracing_is_on - show state of ring buffers enabled
196 */
197 int tracing_is_on(void)
198 {
199 return ring_buffer_flags == RB_BUFFERS_ON;
200 }
201 EXPORT_SYMBOL_GPL(tracing_is_on);
202
203 #include "trace.h"
204
205 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206 #define RB_ALIGNMENT 4U
207 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208
209 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211
212 enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215 };
216
217 static inline int rb_null_event(struct ring_buffer_event *event)
218 {
219 return event->type_len == RINGBUF_TYPE_PADDING
220 && event->time_delta == 0;
221 }
222
223 static inline int rb_discarded_event(struct ring_buffer_event *event)
224 {
225 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
226 }
227
228 static void rb_event_set_padding(struct ring_buffer_event *event)
229 {
230 event->type_len = RINGBUF_TYPE_PADDING;
231 event->time_delta = 0;
232 }
233
234 static unsigned
235 rb_event_data_length(struct ring_buffer_event *event)
236 {
237 unsigned length;
238
239 if (event->type_len)
240 length = event->type_len * RB_ALIGNMENT;
241 else
242 length = event->array[0];
243 return length + RB_EVNT_HDR_SIZE;
244 }
245
246 /* inline for ring buffer fast paths */
247 static unsigned
248 rb_event_length(struct ring_buffer_event *event)
249 {
250 switch (event->type_len) {
251 case RINGBUF_TYPE_PADDING:
252 if (rb_null_event(event))
253 /* undefined */
254 return -1;
255 return event->array[0] + RB_EVNT_HDR_SIZE;
256
257 case RINGBUF_TYPE_TIME_EXTEND:
258 return RB_LEN_TIME_EXTEND;
259
260 case RINGBUF_TYPE_TIME_STAMP:
261 return RB_LEN_TIME_STAMP;
262
263 case RINGBUF_TYPE_DATA:
264 return rb_event_data_length(event);
265 default:
266 BUG();
267 }
268 /* not hit */
269 return 0;
270 }
271
272 /**
273 * ring_buffer_event_length - return the length of the event
274 * @event: the event to get the length of
275 */
276 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
277 {
278 unsigned length = rb_event_length(event);
279 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280 return length;
281 length -= RB_EVNT_HDR_SIZE;
282 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283 length -= sizeof(event->array[0]);
284 return length;
285 }
286 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
287
288 /* inline for ring buffer fast paths */
289 static void *
290 rb_event_data(struct ring_buffer_event *event)
291 {
292 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
293 /* If length is in len field, then array[0] has the data */
294 if (event->type_len)
295 return (void *)&event->array[0];
296 /* Otherwise length is in array[0] and array[1] has the data */
297 return (void *)&event->array[1];
298 }
299
300 /**
301 * ring_buffer_event_data - return the data of the event
302 * @event: the event to get the data from
303 */
304 void *ring_buffer_event_data(struct ring_buffer_event *event)
305 {
306 return rb_event_data(event);
307 }
308 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
309
310 #define for_each_buffer_cpu(buffer, cpu) \
311 for_each_cpu(cpu, buffer->cpumask)
312
313 #define TS_SHIFT 27
314 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
315 #define TS_DELTA_TEST (~TS_MASK)
316
317 struct buffer_data_page {
318 u64 time_stamp; /* page time stamp */
319 local_t commit; /* write committed index */
320 unsigned char data[]; /* data of buffer page */
321 };
322
323 struct buffer_page {
324 local_t write; /* index for next write */
325 unsigned read; /* index for next read */
326 struct list_head list; /* list of free pages */
327 struct buffer_data_page *page; /* Actual data page */
328 };
329
330 static void rb_init_page(struct buffer_data_page *bpage)
331 {
332 local_set(&bpage->commit, 0);
333 }
334
335 /**
336 * ring_buffer_page_len - the size of data on the page.
337 * @page: The page to read
338 *
339 * Returns the amount of data on the page, including buffer page header.
340 */
341 size_t ring_buffer_page_len(void *page)
342 {
343 return local_read(&((struct buffer_data_page *)page)->commit)
344 + BUF_PAGE_HDR_SIZE;
345 }
346
347 /*
348 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
349 * this issue out.
350 */
351 static void free_buffer_page(struct buffer_page *bpage)
352 {
353 free_page((unsigned long)bpage->page);
354 kfree(bpage);
355 }
356
357 /*
358 * We need to fit the time_stamp delta into 27 bits.
359 */
360 static inline int test_time_stamp(u64 delta)
361 {
362 if (delta & TS_DELTA_TEST)
363 return 1;
364 return 0;
365 }
366
367 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
368
369 int ring_buffer_print_page_header(struct trace_seq *s)
370 {
371 struct buffer_data_page field;
372 int ret;
373
374 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
375 "offset:0;\tsize:%u;\n",
376 (unsigned int)sizeof(field.time_stamp));
377
378 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
379 "offset:%u;\tsize:%u;\n",
380 (unsigned int)offsetof(typeof(field), commit),
381 (unsigned int)sizeof(field.commit));
382
383 ret = trace_seq_printf(s, "\tfield: char data;\t"
384 "offset:%u;\tsize:%u;\n",
385 (unsigned int)offsetof(typeof(field), data),
386 (unsigned int)BUF_PAGE_SIZE);
387
388 return ret;
389 }
390
391 /*
392 * head_page == tail_page && head == tail then buffer is empty.
393 */
394 struct ring_buffer_per_cpu {
395 int cpu;
396 struct ring_buffer *buffer;
397 spinlock_t reader_lock; /* serialize readers */
398 raw_spinlock_t lock;
399 struct lock_class_key lock_key;
400 struct list_head pages;
401 struct buffer_page *head_page; /* read from head */
402 struct buffer_page *tail_page; /* write to tail */
403 struct buffer_page *commit_page; /* committed pages */
404 struct buffer_page *reader_page;
405 unsigned long overrun;
406 unsigned long entries;
407 u64 write_stamp;
408 u64 read_stamp;
409 atomic_t record_disabled;
410 };
411
412 struct ring_buffer {
413 unsigned pages;
414 unsigned flags;
415 int cpus;
416 atomic_t record_disabled;
417 cpumask_var_t cpumask;
418
419 struct mutex mutex;
420
421 struct ring_buffer_per_cpu **buffers;
422
423 #ifdef CONFIG_HOTPLUG_CPU
424 struct notifier_block cpu_notify;
425 #endif
426 u64 (*clock)(void);
427 };
428
429 struct ring_buffer_iter {
430 struct ring_buffer_per_cpu *cpu_buffer;
431 unsigned long head;
432 struct buffer_page *head_page;
433 u64 read_stamp;
434 };
435
436 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
437 #define RB_WARN_ON(buffer, cond) \
438 ({ \
439 int _____ret = unlikely(cond); \
440 if (_____ret) { \
441 atomic_inc(&buffer->record_disabled); \
442 WARN_ON(1); \
443 } \
444 _____ret; \
445 })
446
447 /* Up this if you want to test the TIME_EXTENTS and normalization */
448 #define DEBUG_SHIFT 0
449
450 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
451 {
452 u64 time;
453
454 preempt_disable_notrace();
455 /* shift to debug/test normalization and TIME_EXTENTS */
456 time = buffer->clock() << DEBUG_SHIFT;
457 preempt_enable_no_resched_notrace();
458
459 return time;
460 }
461 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
462
463 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
464 int cpu, u64 *ts)
465 {
466 /* Just stupid testing the normalize function and deltas */
467 *ts >>= DEBUG_SHIFT;
468 }
469 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
470
471 /**
472 * check_pages - integrity check of buffer pages
473 * @cpu_buffer: CPU buffer with pages to test
474 *
475 * As a safety measure we check to make sure the data pages have not
476 * been corrupted.
477 */
478 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
479 {
480 struct list_head *head = &cpu_buffer->pages;
481 struct buffer_page *bpage, *tmp;
482
483 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
484 return -1;
485 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
486 return -1;
487
488 list_for_each_entry_safe(bpage, tmp, head, list) {
489 if (RB_WARN_ON(cpu_buffer,
490 bpage->list.next->prev != &bpage->list))
491 return -1;
492 if (RB_WARN_ON(cpu_buffer,
493 bpage->list.prev->next != &bpage->list))
494 return -1;
495 }
496
497 return 0;
498 }
499
500 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
501 unsigned nr_pages)
502 {
503 struct list_head *head = &cpu_buffer->pages;
504 struct buffer_page *bpage, *tmp;
505 unsigned long addr;
506 LIST_HEAD(pages);
507 unsigned i;
508
509 for (i = 0; i < nr_pages; i++) {
510 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
511 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
512 if (!bpage)
513 goto free_pages;
514 list_add(&bpage->list, &pages);
515
516 addr = __get_free_page(GFP_KERNEL);
517 if (!addr)
518 goto free_pages;
519 bpage->page = (void *)addr;
520 rb_init_page(bpage->page);
521 }
522
523 list_splice(&pages, head);
524
525 rb_check_pages(cpu_buffer);
526
527 return 0;
528
529 free_pages:
530 list_for_each_entry_safe(bpage, tmp, &pages, list) {
531 list_del_init(&bpage->list);
532 free_buffer_page(bpage);
533 }
534 return -ENOMEM;
535 }
536
537 static struct ring_buffer_per_cpu *
538 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
539 {
540 struct ring_buffer_per_cpu *cpu_buffer;
541 struct buffer_page *bpage;
542 unsigned long addr;
543 int ret;
544
545 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
546 GFP_KERNEL, cpu_to_node(cpu));
547 if (!cpu_buffer)
548 return NULL;
549
550 cpu_buffer->cpu = cpu;
551 cpu_buffer->buffer = buffer;
552 spin_lock_init(&cpu_buffer->reader_lock);
553 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
554 INIT_LIST_HEAD(&cpu_buffer->pages);
555
556 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
557 GFP_KERNEL, cpu_to_node(cpu));
558 if (!bpage)
559 goto fail_free_buffer;
560
561 cpu_buffer->reader_page = bpage;
562 addr = __get_free_page(GFP_KERNEL);
563 if (!addr)
564 goto fail_free_reader;
565 bpage->page = (void *)addr;
566 rb_init_page(bpage->page);
567
568 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
569
570 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
571 if (ret < 0)
572 goto fail_free_reader;
573
574 cpu_buffer->head_page
575 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
576 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
577
578 return cpu_buffer;
579
580 fail_free_reader:
581 free_buffer_page(cpu_buffer->reader_page);
582
583 fail_free_buffer:
584 kfree(cpu_buffer);
585 return NULL;
586 }
587
588 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
589 {
590 struct list_head *head = &cpu_buffer->pages;
591 struct buffer_page *bpage, *tmp;
592
593 free_buffer_page(cpu_buffer->reader_page);
594
595 list_for_each_entry_safe(bpage, tmp, head, list) {
596 list_del_init(&bpage->list);
597 free_buffer_page(bpage);
598 }
599 kfree(cpu_buffer);
600 }
601
602 /*
603 * Causes compile errors if the struct buffer_page gets bigger
604 * than the struct page.
605 */
606 extern int ring_buffer_page_too_big(void);
607
608 #ifdef CONFIG_HOTPLUG_CPU
609 static int rb_cpu_notify(struct notifier_block *self,
610 unsigned long action, void *hcpu);
611 #endif
612
613 /**
614 * ring_buffer_alloc - allocate a new ring_buffer
615 * @size: the size in bytes per cpu that is needed.
616 * @flags: attributes to set for the ring buffer.
617 *
618 * Currently the only flag that is available is the RB_FL_OVERWRITE
619 * flag. This flag means that the buffer will overwrite old data
620 * when the buffer wraps. If this flag is not set, the buffer will
621 * drop data when the tail hits the head.
622 */
623 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
624 {
625 struct ring_buffer *buffer;
626 int bsize;
627 int cpu;
628
629 /* Paranoid! Optimizes out when all is well */
630 if (sizeof(struct buffer_page) > sizeof(struct page))
631 ring_buffer_page_too_big();
632
633
634 /* keep it in its own cache line */
635 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
636 GFP_KERNEL);
637 if (!buffer)
638 return NULL;
639
640 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
641 goto fail_free_buffer;
642
643 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
644 buffer->flags = flags;
645 buffer->clock = trace_clock_local;
646
647 /* need at least two pages */
648 if (buffer->pages == 1)
649 buffer->pages++;
650
651 /*
652 * In case of non-hotplug cpu, if the ring-buffer is allocated
653 * in early initcall, it will not be notified of secondary cpus.
654 * In that off case, we need to allocate for all possible cpus.
655 */
656 #ifdef CONFIG_HOTPLUG_CPU
657 get_online_cpus();
658 cpumask_copy(buffer->cpumask, cpu_online_mask);
659 #else
660 cpumask_copy(buffer->cpumask, cpu_possible_mask);
661 #endif
662 buffer->cpus = nr_cpu_ids;
663
664 bsize = sizeof(void *) * nr_cpu_ids;
665 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
666 GFP_KERNEL);
667 if (!buffer->buffers)
668 goto fail_free_cpumask;
669
670 for_each_buffer_cpu(buffer, cpu) {
671 buffer->buffers[cpu] =
672 rb_allocate_cpu_buffer(buffer, cpu);
673 if (!buffer->buffers[cpu])
674 goto fail_free_buffers;
675 }
676
677 #ifdef CONFIG_HOTPLUG_CPU
678 buffer->cpu_notify.notifier_call = rb_cpu_notify;
679 buffer->cpu_notify.priority = 0;
680 register_cpu_notifier(&buffer->cpu_notify);
681 #endif
682
683 put_online_cpus();
684 mutex_init(&buffer->mutex);
685
686 return buffer;
687
688 fail_free_buffers:
689 for_each_buffer_cpu(buffer, cpu) {
690 if (buffer->buffers[cpu])
691 rb_free_cpu_buffer(buffer->buffers[cpu]);
692 }
693 kfree(buffer->buffers);
694
695 fail_free_cpumask:
696 free_cpumask_var(buffer->cpumask);
697 put_online_cpus();
698
699 fail_free_buffer:
700 kfree(buffer);
701 return NULL;
702 }
703 EXPORT_SYMBOL_GPL(ring_buffer_alloc);
704
705 /**
706 * ring_buffer_free - free a ring buffer.
707 * @buffer: the buffer to free.
708 */
709 void
710 ring_buffer_free(struct ring_buffer *buffer)
711 {
712 int cpu;
713
714 get_online_cpus();
715
716 #ifdef CONFIG_HOTPLUG_CPU
717 unregister_cpu_notifier(&buffer->cpu_notify);
718 #endif
719
720 for_each_buffer_cpu(buffer, cpu)
721 rb_free_cpu_buffer(buffer->buffers[cpu]);
722
723 put_online_cpus();
724
725 free_cpumask_var(buffer->cpumask);
726
727 kfree(buffer);
728 }
729 EXPORT_SYMBOL_GPL(ring_buffer_free);
730
731 void ring_buffer_set_clock(struct ring_buffer *buffer,
732 u64 (*clock)(void))
733 {
734 buffer->clock = clock;
735 }
736
737 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
738
739 static void
740 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
741 {
742 struct buffer_page *bpage;
743 struct list_head *p;
744 unsigned i;
745
746 atomic_inc(&cpu_buffer->record_disabled);
747 synchronize_sched();
748
749 for (i = 0; i < nr_pages; i++) {
750 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
751 return;
752 p = cpu_buffer->pages.next;
753 bpage = list_entry(p, struct buffer_page, list);
754 list_del_init(&bpage->list);
755 free_buffer_page(bpage);
756 }
757 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
758 return;
759
760 rb_reset_cpu(cpu_buffer);
761
762 rb_check_pages(cpu_buffer);
763
764 atomic_dec(&cpu_buffer->record_disabled);
765
766 }
767
768 static void
769 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
770 struct list_head *pages, unsigned nr_pages)
771 {
772 struct buffer_page *bpage;
773 struct list_head *p;
774 unsigned i;
775
776 atomic_inc(&cpu_buffer->record_disabled);
777 synchronize_sched();
778
779 for (i = 0; i < nr_pages; i++) {
780 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
781 return;
782 p = pages->next;
783 bpage = list_entry(p, struct buffer_page, list);
784 list_del_init(&bpage->list);
785 list_add_tail(&bpage->list, &cpu_buffer->pages);
786 }
787 rb_reset_cpu(cpu_buffer);
788
789 rb_check_pages(cpu_buffer);
790
791 atomic_dec(&cpu_buffer->record_disabled);
792 }
793
794 /**
795 * ring_buffer_resize - resize the ring buffer
796 * @buffer: the buffer to resize.
797 * @size: the new size.
798 *
799 * The tracer is responsible for making sure that the buffer is
800 * not being used while changing the size.
801 * Note: We may be able to change the above requirement by using
802 * RCU synchronizations.
803 *
804 * Minimum size is 2 * BUF_PAGE_SIZE.
805 *
806 * Returns -1 on failure.
807 */
808 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
809 {
810 struct ring_buffer_per_cpu *cpu_buffer;
811 unsigned nr_pages, rm_pages, new_pages;
812 struct buffer_page *bpage, *tmp;
813 unsigned long buffer_size;
814 unsigned long addr;
815 LIST_HEAD(pages);
816 int i, cpu;
817
818 /*
819 * Always succeed at resizing a non-existent buffer:
820 */
821 if (!buffer)
822 return size;
823
824 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
825 size *= BUF_PAGE_SIZE;
826 buffer_size = buffer->pages * BUF_PAGE_SIZE;
827
828 /* we need a minimum of two pages */
829 if (size < BUF_PAGE_SIZE * 2)
830 size = BUF_PAGE_SIZE * 2;
831
832 if (size == buffer_size)
833 return size;
834
835 mutex_lock(&buffer->mutex);
836 get_online_cpus();
837
838 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
839
840 if (size < buffer_size) {
841
842 /* easy case, just free pages */
843 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
844 goto out_fail;
845
846 rm_pages = buffer->pages - nr_pages;
847
848 for_each_buffer_cpu(buffer, cpu) {
849 cpu_buffer = buffer->buffers[cpu];
850 rb_remove_pages(cpu_buffer, rm_pages);
851 }
852 goto out;
853 }
854
855 /*
856 * This is a bit more difficult. We only want to add pages
857 * when we can allocate enough for all CPUs. We do this
858 * by allocating all the pages and storing them on a local
859 * link list. If we succeed in our allocation, then we
860 * add these pages to the cpu_buffers. Otherwise we just free
861 * them all and return -ENOMEM;
862 */
863 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
864 goto out_fail;
865
866 new_pages = nr_pages - buffer->pages;
867
868 for_each_buffer_cpu(buffer, cpu) {
869 for (i = 0; i < new_pages; i++) {
870 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
871 cache_line_size()),
872 GFP_KERNEL, cpu_to_node(cpu));
873 if (!bpage)
874 goto free_pages;
875 list_add(&bpage->list, &pages);
876 addr = __get_free_page(GFP_KERNEL);
877 if (!addr)
878 goto free_pages;
879 bpage->page = (void *)addr;
880 rb_init_page(bpage->page);
881 }
882 }
883
884 for_each_buffer_cpu(buffer, cpu) {
885 cpu_buffer = buffer->buffers[cpu];
886 rb_insert_pages(cpu_buffer, &pages, new_pages);
887 }
888
889 if (RB_WARN_ON(buffer, !list_empty(&pages)))
890 goto out_fail;
891
892 out:
893 buffer->pages = nr_pages;
894 put_online_cpus();
895 mutex_unlock(&buffer->mutex);
896
897 return size;
898
899 free_pages:
900 list_for_each_entry_safe(bpage, tmp, &pages, list) {
901 list_del_init(&bpage->list);
902 free_buffer_page(bpage);
903 }
904 put_online_cpus();
905 mutex_unlock(&buffer->mutex);
906 return -ENOMEM;
907
908 /*
909 * Something went totally wrong, and we are too paranoid
910 * to even clean up the mess.
911 */
912 out_fail:
913 put_online_cpus();
914 mutex_unlock(&buffer->mutex);
915 return -1;
916 }
917 EXPORT_SYMBOL_GPL(ring_buffer_resize);
918
919 static inline void *
920 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
921 {
922 return bpage->data + index;
923 }
924
925 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
926 {
927 return bpage->page->data + index;
928 }
929
930 static inline struct ring_buffer_event *
931 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
932 {
933 return __rb_page_index(cpu_buffer->reader_page,
934 cpu_buffer->reader_page->read);
935 }
936
937 static inline struct ring_buffer_event *
938 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
939 {
940 return __rb_page_index(cpu_buffer->head_page,
941 cpu_buffer->head_page->read);
942 }
943
944 static inline struct ring_buffer_event *
945 rb_iter_head_event(struct ring_buffer_iter *iter)
946 {
947 return __rb_page_index(iter->head_page, iter->head);
948 }
949
950 static inline unsigned rb_page_write(struct buffer_page *bpage)
951 {
952 return local_read(&bpage->write);
953 }
954
955 static inline unsigned rb_page_commit(struct buffer_page *bpage)
956 {
957 return local_read(&bpage->page->commit);
958 }
959
960 /* Size is determined by what has been commited */
961 static inline unsigned rb_page_size(struct buffer_page *bpage)
962 {
963 return rb_page_commit(bpage);
964 }
965
966 static inline unsigned
967 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
968 {
969 return rb_page_commit(cpu_buffer->commit_page);
970 }
971
972 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
973 {
974 return rb_page_commit(cpu_buffer->head_page);
975 }
976
977 /*
978 * When the tail hits the head and the buffer is in overwrite mode,
979 * the head jumps to the next page and all content on the previous
980 * page is discarded. But before doing so, we update the overrun
981 * variable of the buffer.
982 */
983 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
984 {
985 struct ring_buffer_event *event;
986 unsigned long head;
987
988 for (head = 0; head < rb_head_size(cpu_buffer);
989 head += rb_event_length(event)) {
990
991 event = __rb_page_index(cpu_buffer->head_page, head);
992 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
993 return;
994 /* Only count data entries */
995 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
996 continue;
997 cpu_buffer->overrun++;
998 cpu_buffer->entries--;
999 }
1000 }
1001
1002 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
1003 struct buffer_page **bpage)
1004 {
1005 struct list_head *p = (*bpage)->list.next;
1006
1007 if (p == &cpu_buffer->pages)
1008 p = p->next;
1009
1010 *bpage = list_entry(p, struct buffer_page, list);
1011 }
1012
1013 static inline unsigned
1014 rb_event_index(struct ring_buffer_event *event)
1015 {
1016 unsigned long addr = (unsigned long)event;
1017
1018 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1019 }
1020
1021 static int
1022 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1023 struct ring_buffer_event *event)
1024 {
1025 unsigned long addr = (unsigned long)event;
1026 unsigned long index;
1027
1028 index = rb_event_index(event);
1029 addr &= PAGE_MASK;
1030
1031 return cpu_buffer->commit_page->page == (void *)addr &&
1032 rb_commit_index(cpu_buffer) == index;
1033 }
1034
1035 static void
1036 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1037 struct ring_buffer_event *event)
1038 {
1039 unsigned long addr = (unsigned long)event;
1040 unsigned long index;
1041
1042 index = rb_event_index(event);
1043 addr &= PAGE_MASK;
1044
1045 while (cpu_buffer->commit_page->page != (void *)addr) {
1046 if (RB_WARN_ON(cpu_buffer,
1047 cpu_buffer->commit_page == cpu_buffer->tail_page))
1048 return;
1049 cpu_buffer->commit_page->page->commit =
1050 cpu_buffer->commit_page->write;
1051 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1052 cpu_buffer->write_stamp =
1053 cpu_buffer->commit_page->page->time_stamp;
1054 }
1055
1056 /* Now set the commit to the event's index */
1057 local_set(&cpu_buffer->commit_page->page->commit, index);
1058 }
1059
1060 static void
1061 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1062 {
1063 /*
1064 * We only race with interrupts and NMIs on this CPU.
1065 * If we own the commit event, then we can commit
1066 * all others that interrupted us, since the interruptions
1067 * are in stack format (they finish before they come
1068 * back to us). This allows us to do a simple loop to
1069 * assign the commit to the tail.
1070 */
1071 again:
1072 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1073 cpu_buffer->commit_page->page->commit =
1074 cpu_buffer->commit_page->write;
1075 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1076 cpu_buffer->write_stamp =
1077 cpu_buffer->commit_page->page->time_stamp;
1078 /* add barrier to keep gcc from optimizing too much */
1079 barrier();
1080 }
1081 while (rb_commit_index(cpu_buffer) !=
1082 rb_page_write(cpu_buffer->commit_page)) {
1083 cpu_buffer->commit_page->page->commit =
1084 cpu_buffer->commit_page->write;
1085 barrier();
1086 }
1087
1088 /* again, keep gcc from optimizing */
1089 barrier();
1090
1091 /*
1092 * If an interrupt came in just after the first while loop
1093 * and pushed the tail page forward, we will be left with
1094 * a dangling commit that will never go forward.
1095 */
1096 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1097 goto again;
1098 }
1099
1100 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1101 {
1102 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1103 cpu_buffer->reader_page->read = 0;
1104 }
1105
1106 static void rb_inc_iter(struct ring_buffer_iter *iter)
1107 {
1108 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1109
1110 /*
1111 * The iterator could be on the reader page (it starts there).
1112 * But the head could have moved, since the reader was
1113 * found. Check for this case and assign the iterator
1114 * to the head page instead of next.
1115 */
1116 if (iter->head_page == cpu_buffer->reader_page)
1117 iter->head_page = cpu_buffer->head_page;
1118 else
1119 rb_inc_page(cpu_buffer, &iter->head_page);
1120
1121 iter->read_stamp = iter->head_page->page->time_stamp;
1122 iter->head = 0;
1123 }
1124
1125 /**
1126 * ring_buffer_update_event - update event type and data
1127 * @event: the even to update
1128 * @type: the type of event
1129 * @length: the size of the event field in the ring buffer
1130 *
1131 * Update the type and data fields of the event. The length
1132 * is the actual size that is written to the ring buffer,
1133 * and with this, we can determine what to place into the
1134 * data field.
1135 */
1136 static void
1137 rb_update_event(struct ring_buffer_event *event,
1138 unsigned type, unsigned length)
1139 {
1140 event->type_len = type;
1141
1142 switch (type) {
1143
1144 case RINGBUF_TYPE_PADDING:
1145 case RINGBUF_TYPE_TIME_EXTEND:
1146 case RINGBUF_TYPE_TIME_STAMP:
1147 break;
1148
1149 case 0:
1150 length -= RB_EVNT_HDR_SIZE;
1151 if (length > RB_MAX_SMALL_DATA)
1152 event->array[0] = length;
1153 else
1154 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1155 break;
1156 default:
1157 BUG();
1158 }
1159 }
1160
1161 static unsigned rb_calculate_event_length(unsigned length)
1162 {
1163 struct ring_buffer_event event; /* Used only for sizeof array */
1164
1165 /* zero length can cause confusions */
1166 if (!length)
1167 length = 1;
1168
1169 if (length > RB_MAX_SMALL_DATA)
1170 length += sizeof(event.array[0]);
1171
1172 length += RB_EVNT_HDR_SIZE;
1173 length = ALIGN(length, RB_ALIGNMENT);
1174
1175 return length;
1176 }
1177
1178 static struct ring_buffer_event *
1179 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1180 unsigned type, unsigned long length, u64 *ts)
1181 {
1182 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
1183 unsigned long tail, write;
1184 struct ring_buffer *buffer = cpu_buffer->buffer;
1185 struct ring_buffer_event *event;
1186 unsigned long flags;
1187 bool lock_taken = false;
1188
1189 commit_page = cpu_buffer->commit_page;
1190 /* we just need to protect against interrupts */
1191 barrier();
1192 tail_page = cpu_buffer->tail_page;
1193 write = local_add_return(length, &tail_page->write);
1194 tail = write - length;
1195
1196 /* See if we shot pass the end of this buffer page */
1197 if (write > BUF_PAGE_SIZE) {
1198 struct buffer_page *next_page = tail_page;
1199
1200 local_irq_save(flags);
1201 /*
1202 * Since the write to the buffer is still not
1203 * fully lockless, we must be careful with NMIs.
1204 * The locks in the writers are taken when a write
1205 * crosses to a new page. The locks protect against
1206 * races with the readers (this will soon be fixed
1207 * with a lockless solution).
1208 *
1209 * Because we can not protect against NMIs, and we
1210 * want to keep traces reentrant, we need to manage
1211 * what happens when we are in an NMI.
1212 *
1213 * NMIs can happen after we take the lock.
1214 * If we are in an NMI, only take the lock
1215 * if it is not already taken. Otherwise
1216 * simply fail.
1217 */
1218 if (unlikely(in_nmi())) {
1219 if (!__raw_spin_trylock(&cpu_buffer->lock))
1220 goto out_reset;
1221 } else
1222 __raw_spin_lock(&cpu_buffer->lock);
1223
1224 lock_taken = true;
1225
1226 rb_inc_page(cpu_buffer, &next_page);
1227
1228 head_page = cpu_buffer->head_page;
1229 reader_page = cpu_buffer->reader_page;
1230
1231 /* we grabbed the lock before incrementing */
1232 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1233 goto out_reset;
1234
1235 /*
1236 * If for some reason, we had an interrupt storm that made
1237 * it all the way around the buffer, bail, and warn
1238 * about it.
1239 */
1240 if (unlikely(next_page == commit_page)) {
1241 /* This can easily happen on small ring buffers */
1242 WARN_ON_ONCE(buffer->pages > 2);
1243 goto out_reset;
1244 }
1245
1246 if (next_page == head_page) {
1247 if (!(buffer->flags & RB_FL_OVERWRITE))
1248 goto out_reset;
1249
1250 /* tail_page has not moved yet? */
1251 if (tail_page == cpu_buffer->tail_page) {
1252 /* count overflows */
1253 rb_update_overflow(cpu_buffer);
1254
1255 rb_inc_page(cpu_buffer, &head_page);
1256 cpu_buffer->head_page = head_page;
1257 cpu_buffer->head_page->read = 0;
1258 }
1259 }
1260
1261 /*
1262 * If the tail page is still the same as what we think
1263 * it is, then it is up to us to update the tail
1264 * pointer.
1265 */
1266 if (tail_page == cpu_buffer->tail_page) {
1267 local_set(&next_page->write, 0);
1268 local_set(&next_page->page->commit, 0);
1269 cpu_buffer->tail_page = next_page;
1270
1271 /* reread the time stamp */
1272 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
1273 cpu_buffer->tail_page->page->time_stamp = *ts;
1274 }
1275
1276 /*
1277 * The actual tail page has moved forward.
1278 */
1279 if (tail < BUF_PAGE_SIZE) {
1280 /* Mark the rest of the page with padding */
1281 event = __rb_page_index(tail_page, tail);
1282 rb_event_set_padding(event);
1283 }
1284
1285 if (tail <= BUF_PAGE_SIZE)
1286 /* Set the write back to the previous setting */
1287 local_set(&tail_page->write, tail);
1288
1289 /*
1290 * If this was a commit entry that failed,
1291 * increment that too
1292 */
1293 if (tail_page == cpu_buffer->commit_page &&
1294 tail == rb_commit_index(cpu_buffer)) {
1295 rb_set_commit_to_write(cpu_buffer);
1296 }
1297
1298 __raw_spin_unlock(&cpu_buffer->lock);
1299 local_irq_restore(flags);
1300
1301 /* fail and let the caller try again */
1302 return ERR_PTR(-EAGAIN);
1303 }
1304
1305 /* We reserved something on the buffer */
1306
1307 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1308 return NULL;
1309
1310 event = __rb_page_index(tail_page, tail);
1311 rb_update_event(event, type, length);
1312
1313 /*
1314 * If this is a commit and the tail is zero, then update
1315 * this page's time stamp.
1316 */
1317 if (!tail && rb_is_commit(cpu_buffer, event))
1318 cpu_buffer->commit_page->page->time_stamp = *ts;
1319
1320 return event;
1321
1322 out_reset:
1323 /* reset write */
1324 if (tail <= BUF_PAGE_SIZE)
1325 local_set(&tail_page->write, tail);
1326
1327 if (likely(lock_taken))
1328 __raw_spin_unlock(&cpu_buffer->lock);
1329 local_irq_restore(flags);
1330 return NULL;
1331 }
1332
1333 static int
1334 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1335 u64 *ts, u64 *delta)
1336 {
1337 struct ring_buffer_event *event;
1338 static int once;
1339 int ret;
1340
1341 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1342 printk(KERN_WARNING "Delta way too big! %llu"
1343 " ts=%llu write stamp = %llu\n",
1344 (unsigned long long)*delta,
1345 (unsigned long long)*ts,
1346 (unsigned long long)cpu_buffer->write_stamp);
1347 WARN_ON(1);
1348 }
1349
1350 /*
1351 * The delta is too big, we to add a
1352 * new timestamp.
1353 */
1354 event = __rb_reserve_next(cpu_buffer,
1355 RINGBUF_TYPE_TIME_EXTEND,
1356 RB_LEN_TIME_EXTEND,
1357 ts);
1358 if (!event)
1359 return -EBUSY;
1360
1361 if (PTR_ERR(event) == -EAGAIN)
1362 return -EAGAIN;
1363
1364 /* Only a commited time event can update the write stamp */
1365 if (rb_is_commit(cpu_buffer, event)) {
1366 /*
1367 * If this is the first on the page, then we need to
1368 * update the page itself, and just put in a zero.
1369 */
1370 if (rb_event_index(event)) {
1371 event->time_delta = *delta & TS_MASK;
1372 event->array[0] = *delta >> TS_SHIFT;
1373 } else {
1374 cpu_buffer->commit_page->page->time_stamp = *ts;
1375 event->time_delta = 0;
1376 event->array[0] = 0;
1377 }
1378 cpu_buffer->write_stamp = *ts;
1379 /* let the caller know this was the commit */
1380 ret = 1;
1381 } else {
1382 /* Darn, this is just wasted space */
1383 event->time_delta = 0;
1384 event->array[0] = 0;
1385 ret = 0;
1386 }
1387
1388 *delta = 0;
1389
1390 return ret;
1391 }
1392
1393 static struct ring_buffer_event *
1394 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1395 unsigned type, unsigned long length)
1396 {
1397 struct ring_buffer_event *event;
1398 u64 ts, delta;
1399 int commit = 0;
1400 int nr_loops = 0;
1401
1402 again:
1403 /*
1404 * We allow for interrupts to reenter here and do a trace.
1405 * If one does, it will cause this original code to loop
1406 * back here. Even with heavy interrupts happening, this
1407 * should only happen a few times in a row. If this happens
1408 * 1000 times in a row, there must be either an interrupt
1409 * storm or we have something buggy.
1410 * Bail!
1411 */
1412 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1413 return NULL;
1414
1415 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1416
1417 /*
1418 * Only the first commit can update the timestamp.
1419 * Yes there is a race here. If an interrupt comes in
1420 * just after the conditional and it traces too, then it
1421 * will also check the deltas. More than one timestamp may
1422 * also be made. But only the entry that did the actual
1423 * commit will be something other than zero.
1424 */
1425 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1426 rb_page_write(cpu_buffer->tail_page) ==
1427 rb_commit_index(cpu_buffer)) {
1428
1429 delta = ts - cpu_buffer->write_stamp;
1430
1431 /* make sure this delta is calculated here */
1432 barrier();
1433
1434 /* Did the write stamp get updated already? */
1435 if (unlikely(ts < cpu_buffer->write_stamp))
1436 delta = 0;
1437
1438 if (test_time_stamp(delta)) {
1439
1440 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1441
1442 if (commit == -EBUSY)
1443 return NULL;
1444
1445 if (commit == -EAGAIN)
1446 goto again;
1447
1448 RB_WARN_ON(cpu_buffer, commit < 0);
1449 }
1450 } else
1451 /* Non commits have zero deltas */
1452 delta = 0;
1453
1454 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1455 if (PTR_ERR(event) == -EAGAIN)
1456 goto again;
1457
1458 if (!event) {
1459 if (unlikely(commit))
1460 /*
1461 * Ouch! We needed a timestamp and it was commited. But
1462 * we didn't get our event reserved.
1463 */
1464 rb_set_commit_to_write(cpu_buffer);
1465 return NULL;
1466 }
1467
1468 /*
1469 * If the timestamp was commited, make the commit our entry
1470 * now so that we will update it when needed.
1471 */
1472 if (commit)
1473 rb_set_commit_event(cpu_buffer, event);
1474 else if (!rb_is_commit(cpu_buffer, event))
1475 delta = 0;
1476
1477 event->time_delta = delta;
1478
1479 return event;
1480 }
1481
1482 #define TRACE_RECURSIVE_DEPTH 16
1483
1484 static int trace_recursive_lock(void)
1485 {
1486 current->trace_recursion++;
1487
1488 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1489 return 0;
1490
1491 /* Disable all tracing before we do anything else */
1492 tracing_off_permanent();
1493
1494 printk_once(KERN_WARNING "Tracing recursion: depth[%d]:"
1495 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1496 current->trace_recursion,
1497 hardirq_count() >> HARDIRQ_SHIFT,
1498 softirq_count() >> SOFTIRQ_SHIFT,
1499 in_nmi());
1500
1501 WARN_ON_ONCE(1);
1502 return -1;
1503 }
1504
1505 static void trace_recursive_unlock(void)
1506 {
1507 WARN_ON_ONCE(!current->trace_recursion);
1508
1509 current->trace_recursion--;
1510 }
1511
1512 static DEFINE_PER_CPU(int, rb_need_resched);
1513
1514 /**
1515 * ring_buffer_lock_reserve - reserve a part of the buffer
1516 * @buffer: the ring buffer to reserve from
1517 * @length: the length of the data to reserve (excluding event header)
1518 *
1519 * Returns a reseverd event on the ring buffer to copy directly to.
1520 * The user of this interface will need to get the body to write into
1521 * and can use the ring_buffer_event_data() interface.
1522 *
1523 * The length is the length of the data needed, not the event length
1524 * which also includes the event header.
1525 *
1526 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1527 * If NULL is returned, then nothing has been allocated or locked.
1528 */
1529 struct ring_buffer_event *
1530 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1531 {
1532 struct ring_buffer_per_cpu *cpu_buffer;
1533 struct ring_buffer_event *event;
1534 int cpu, resched;
1535
1536 if (ring_buffer_flags != RB_BUFFERS_ON)
1537 return NULL;
1538
1539 if (atomic_read(&buffer->record_disabled))
1540 return NULL;
1541
1542 /* If we are tracing schedule, we don't want to recurse */
1543 resched = ftrace_preempt_disable();
1544
1545 if (trace_recursive_lock())
1546 goto out_nocheck;
1547
1548 cpu = raw_smp_processor_id();
1549
1550 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1551 goto out;
1552
1553 cpu_buffer = buffer->buffers[cpu];
1554
1555 if (atomic_read(&cpu_buffer->record_disabled))
1556 goto out;
1557
1558 length = rb_calculate_event_length(length);
1559 if (length > BUF_PAGE_SIZE)
1560 goto out;
1561
1562 event = rb_reserve_next_event(cpu_buffer, 0, length);
1563 if (!event)
1564 goto out;
1565
1566 /*
1567 * Need to store resched state on this cpu.
1568 * Only the first needs to.
1569 */
1570
1571 if (preempt_count() == 1)
1572 per_cpu(rb_need_resched, cpu) = resched;
1573
1574 return event;
1575
1576 out:
1577 trace_recursive_unlock();
1578
1579 out_nocheck:
1580 ftrace_preempt_enable(resched);
1581 return NULL;
1582 }
1583 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1584
1585 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1586 struct ring_buffer_event *event)
1587 {
1588 cpu_buffer->entries++;
1589
1590 /* Only process further if we own the commit */
1591 if (!rb_is_commit(cpu_buffer, event))
1592 return;
1593
1594 cpu_buffer->write_stamp += event->time_delta;
1595
1596 rb_set_commit_to_write(cpu_buffer);
1597 }
1598
1599 /**
1600 * ring_buffer_unlock_commit - commit a reserved
1601 * @buffer: The buffer to commit to
1602 * @event: The event pointer to commit.
1603 *
1604 * This commits the data to the ring buffer, and releases any locks held.
1605 *
1606 * Must be paired with ring_buffer_lock_reserve.
1607 */
1608 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1609 struct ring_buffer_event *event)
1610 {
1611 struct ring_buffer_per_cpu *cpu_buffer;
1612 int cpu = raw_smp_processor_id();
1613
1614 cpu_buffer = buffer->buffers[cpu];
1615
1616 rb_commit(cpu_buffer, event);
1617
1618 trace_recursive_unlock();
1619
1620 /*
1621 * Only the last preempt count needs to restore preemption.
1622 */
1623 if (preempt_count() == 1)
1624 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1625 else
1626 preempt_enable_no_resched_notrace();
1627
1628 return 0;
1629 }
1630 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1631
1632 static inline void rb_event_discard(struct ring_buffer_event *event)
1633 {
1634 /* array[0] holds the actual length for the discarded event */
1635 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1636 event->type_len = RINGBUF_TYPE_PADDING;
1637 /* time delta must be non zero */
1638 if (!event->time_delta)
1639 event->time_delta = 1;
1640 }
1641
1642 /**
1643 * ring_buffer_event_discard - discard any event in the ring buffer
1644 * @event: the event to discard
1645 *
1646 * Sometimes a event that is in the ring buffer needs to be ignored.
1647 * This function lets the user discard an event in the ring buffer
1648 * and then that event will not be read later.
1649 *
1650 * Note, it is up to the user to be careful with this, and protect
1651 * against races. If the user discards an event that has been consumed
1652 * it is possible that it could corrupt the ring buffer.
1653 */
1654 void ring_buffer_event_discard(struct ring_buffer_event *event)
1655 {
1656 rb_event_discard(event);
1657 }
1658 EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1659
1660 /**
1661 * ring_buffer_commit_discard - discard an event that has not been committed
1662 * @buffer: the ring buffer
1663 * @event: non committed event to discard
1664 *
1665 * This is similar to ring_buffer_event_discard but must only be
1666 * performed on an event that has not been committed yet. The difference
1667 * is that this will also try to free the event from the ring buffer
1668 * if another event has not been added behind it.
1669 *
1670 * If another event has been added behind it, it will set the event
1671 * up as discarded, and perform the commit.
1672 *
1673 * If this function is called, do not call ring_buffer_unlock_commit on
1674 * the event.
1675 */
1676 void ring_buffer_discard_commit(struct ring_buffer *buffer,
1677 struct ring_buffer_event *event)
1678 {
1679 struct ring_buffer_per_cpu *cpu_buffer;
1680 unsigned long new_index, old_index;
1681 struct buffer_page *bpage;
1682 unsigned long index;
1683 unsigned long addr;
1684 int cpu;
1685
1686 /* The event is discarded regardless */
1687 rb_event_discard(event);
1688
1689 /*
1690 * This must only be called if the event has not been
1691 * committed yet. Thus we can assume that preemption
1692 * is still disabled.
1693 */
1694 RB_WARN_ON(buffer, !preempt_count());
1695
1696 cpu = smp_processor_id();
1697 cpu_buffer = buffer->buffers[cpu];
1698
1699 new_index = rb_event_index(event);
1700 old_index = new_index + rb_event_length(event);
1701 addr = (unsigned long)event;
1702 addr &= PAGE_MASK;
1703
1704 bpage = cpu_buffer->tail_page;
1705
1706 if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
1707 /*
1708 * This is on the tail page. It is possible that
1709 * a write could come in and move the tail page
1710 * and write to the next page. That is fine
1711 * because we just shorten what is on this page.
1712 */
1713 index = local_cmpxchg(&bpage->write, old_index, new_index);
1714 if (index == old_index)
1715 goto out;
1716 }
1717
1718 /*
1719 * The commit is still visible by the reader, so we
1720 * must increment entries.
1721 */
1722 cpu_buffer->entries++;
1723 out:
1724 /*
1725 * If a write came in and pushed the tail page
1726 * we still need to update the commit pointer
1727 * if we were the commit.
1728 */
1729 if (rb_is_commit(cpu_buffer, event))
1730 rb_set_commit_to_write(cpu_buffer);
1731
1732 trace_recursive_unlock();
1733
1734 /*
1735 * Only the last preempt count needs to restore preemption.
1736 */
1737 if (preempt_count() == 1)
1738 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1739 else
1740 preempt_enable_no_resched_notrace();
1741
1742 }
1743 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1744
1745 /**
1746 * ring_buffer_write - write data to the buffer without reserving
1747 * @buffer: The ring buffer to write to.
1748 * @length: The length of the data being written (excluding the event header)
1749 * @data: The data to write to the buffer.
1750 *
1751 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1752 * one function. If you already have the data to write to the buffer, it
1753 * may be easier to simply call this function.
1754 *
1755 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1756 * and not the length of the event which would hold the header.
1757 */
1758 int ring_buffer_write(struct ring_buffer *buffer,
1759 unsigned long length,
1760 void *data)
1761 {
1762 struct ring_buffer_per_cpu *cpu_buffer;
1763 struct ring_buffer_event *event;
1764 unsigned long event_length;
1765 void *body;
1766 int ret = -EBUSY;
1767 int cpu, resched;
1768
1769 if (ring_buffer_flags != RB_BUFFERS_ON)
1770 return -EBUSY;
1771
1772 if (atomic_read(&buffer->record_disabled))
1773 return -EBUSY;
1774
1775 resched = ftrace_preempt_disable();
1776
1777 cpu = raw_smp_processor_id();
1778
1779 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1780 goto out;
1781
1782 cpu_buffer = buffer->buffers[cpu];
1783
1784 if (atomic_read(&cpu_buffer->record_disabled))
1785 goto out;
1786
1787 event_length = rb_calculate_event_length(length);
1788 event = rb_reserve_next_event(cpu_buffer, 0, event_length);
1789 if (!event)
1790 goto out;
1791
1792 body = rb_event_data(event);
1793
1794 memcpy(body, data, length);
1795
1796 rb_commit(cpu_buffer, event);
1797
1798 ret = 0;
1799 out:
1800 ftrace_preempt_enable(resched);
1801
1802 return ret;
1803 }
1804 EXPORT_SYMBOL_GPL(ring_buffer_write);
1805
1806 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1807 {
1808 struct buffer_page *reader = cpu_buffer->reader_page;
1809 struct buffer_page *head = cpu_buffer->head_page;
1810 struct buffer_page *commit = cpu_buffer->commit_page;
1811
1812 return reader->read == rb_page_commit(reader) &&
1813 (commit == reader ||
1814 (commit == head &&
1815 head->read == rb_page_commit(commit)));
1816 }
1817
1818 /**
1819 * ring_buffer_record_disable - stop all writes into the buffer
1820 * @buffer: The ring buffer to stop writes to.
1821 *
1822 * This prevents all writes to the buffer. Any attempt to write
1823 * to the buffer after this will fail and return NULL.
1824 *
1825 * The caller should call synchronize_sched() after this.
1826 */
1827 void ring_buffer_record_disable(struct ring_buffer *buffer)
1828 {
1829 atomic_inc(&buffer->record_disabled);
1830 }
1831 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1832
1833 /**
1834 * ring_buffer_record_enable - enable writes to the buffer
1835 * @buffer: The ring buffer to enable writes
1836 *
1837 * Note, multiple disables will need the same number of enables
1838 * to truely enable the writing (much like preempt_disable).
1839 */
1840 void ring_buffer_record_enable(struct ring_buffer *buffer)
1841 {
1842 atomic_dec(&buffer->record_disabled);
1843 }
1844 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1845
1846 /**
1847 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1848 * @buffer: The ring buffer to stop writes to.
1849 * @cpu: The CPU buffer to stop
1850 *
1851 * This prevents all writes to the buffer. Any attempt to write
1852 * to the buffer after this will fail and return NULL.
1853 *
1854 * The caller should call synchronize_sched() after this.
1855 */
1856 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1857 {
1858 struct ring_buffer_per_cpu *cpu_buffer;
1859
1860 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1861 return;
1862
1863 cpu_buffer = buffer->buffers[cpu];
1864 atomic_inc(&cpu_buffer->record_disabled);
1865 }
1866 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1867
1868 /**
1869 * ring_buffer_record_enable_cpu - enable writes to the buffer
1870 * @buffer: The ring buffer to enable writes
1871 * @cpu: The CPU to enable.
1872 *
1873 * Note, multiple disables will need the same number of enables
1874 * to truely enable the writing (much like preempt_disable).
1875 */
1876 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1877 {
1878 struct ring_buffer_per_cpu *cpu_buffer;
1879
1880 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1881 return;
1882
1883 cpu_buffer = buffer->buffers[cpu];
1884 atomic_dec(&cpu_buffer->record_disabled);
1885 }
1886 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1887
1888 /**
1889 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1890 * @buffer: The ring buffer
1891 * @cpu: The per CPU buffer to get the entries from.
1892 */
1893 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1894 {
1895 struct ring_buffer_per_cpu *cpu_buffer;
1896 unsigned long ret;
1897
1898 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1899 return 0;
1900
1901 cpu_buffer = buffer->buffers[cpu];
1902 ret = cpu_buffer->entries;
1903
1904 return ret;
1905 }
1906 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1907
1908 /**
1909 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1910 * @buffer: The ring buffer
1911 * @cpu: The per CPU buffer to get the number of overruns from
1912 */
1913 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1914 {
1915 struct ring_buffer_per_cpu *cpu_buffer;
1916 unsigned long ret;
1917
1918 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1919 return 0;
1920
1921 cpu_buffer = buffer->buffers[cpu];
1922 ret = cpu_buffer->overrun;
1923
1924 return ret;
1925 }
1926 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1927
1928 /**
1929 * ring_buffer_entries - get the number of entries in a buffer
1930 * @buffer: The ring buffer
1931 *
1932 * Returns the total number of entries in the ring buffer
1933 * (all CPU entries)
1934 */
1935 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1936 {
1937 struct ring_buffer_per_cpu *cpu_buffer;
1938 unsigned long entries = 0;
1939 int cpu;
1940
1941 /* if you care about this being correct, lock the buffer */
1942 for_each_buffer_cpu(buffer, cpu) {
1943 cpu_buffer = buffer->buffers[cpu];
1944 entries += cpu_buffer->entries;
1945 }
1946
1947 return entries;
1948 }
1949 EXPORT_SYMBOL_GPL(ring_buffer_entries);
1950
1951 /**
1952 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1953 * @buffer: The ring buffer
1954 *
1955 * Returns the total number of overruns in the ring buffer
1956 * (all CPU entries)
1957 */
1958 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1959 {
1960 struct ring_buffer_per_cpu *cpu_buffer;
1961 unsigned long overruns = 0;
1962 int cpu;
1963
1964 /* if you care about this being correct, lock the buffer */
1965 for_each_buffer_cpu(buffer, cpu) {
1966 cpu_buffer = buffer->buffers[cpu];
1967 overruns += cpu_buffer->overrun;
1968 }
1969
1970 return overruns;
1971 }
1972 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1973
1974 static void rb_iter_reset(struct ring_buffer_iter *iter)
1975 {
1976 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1977
1978 /* Iterator usage is expected to have record disabled */
1979 if (list_empty(&cpu_buffer->reader_page->list)) {
1980 iter->head_page = cpu_buffer->head_page;
1981 iter->head = cpu_buffer->head_page->read;
1982 } else {
1983 iter->head_page = cpu_buffer->reader_page;
1984 iter->head = cpu_buffer->reader_page->read;
1985 }
1986 if (iter->head)
1987 iter->read_stamp = cpu_buffer->read_stamp;
1988 else
1989 iter->read_stamp = iter->head_page->page->time_stamp;
1990 }
1991
1992 /**
1993 * ring_buffer_iter_reset - reset an iterator
1994 * @iter: The iterator to reset
1995 *
1996 * Resets the iterator, so that it will start from the beginning
1997 * again.
1998 */
1999 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2000 {
2001 struct ring_buffer_per_cpu *cpu_buffer;
2002 unsigned long flags;
2003
2004 if (!iter)
2005 return;
2006
2007 cpu_buffer = iter->cpu_buffer;
2008
2009 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2010 rb_iter_reset(iter);
2011 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2012 }
2013 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2014
2015 /**
2016 * ring_buffer_iter_empty - check if an iterator has no more to read
2017 * @iter: The iterator to check
2018 */
2019 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2020 {
2021 struct ring_buffer_per_cpu *cpu_buffer;
2022
2023 cpu_buffer = iter->cpu_buffer;
2024
2025 return iter->head_page == cpu_buffer->commit_page &&
2026 iter->head == rb_commit_index(cpu_buffer);
2027 }
2028 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2029
2030 static void
2031 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2032 struct ring_buffer_event *event)
2033 {
2034 u64 delta;
2035
2036 switch (event->type_len) {
2037 case RINGBUF_TYPE_PADDING:
2038 return;
2039
2040 case RINGBUF_TYPE_TIME_EXTEND:
2041 delta = event->array[0];
2042 delta <<= TS_SHIFT;
2043 delta += event->time_delta;
2044 cpu_buffer->read_stamp += delta;
2045 return;
2046
2047 case RINGBUF_TYPE_TIME_STAMP:
2048 /* FIXME: not implemented */
2049 return;
2050
2051 case RINGBUF_TYPE_DATA:
2052 cpu_buffer->read_stamp += event->time_delta;
2053 return;
2054
2055 default:
2056 BUG();
2057 }
2058 return;
2059 }
2060
2061 static void
2062 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2063 struct ring_buffer_event *event)
2064 {
2065 u64 delta;
2066
2067 switch (event->type_len) {
2068 case RINGBUF_TYPE_PADDING:
2069 return;
2070
2071 case RINGBUF_TYPE_TIME_EXTEND:
2072 delta = event->array[0];
2073 delta <<= TS_SHIFT;
2074 delta += event->time_delta;
2075 iter->read_stamp += delta;
2076 return;
2077
2078 case RINGBUF_TYPE_TIME_STAMP:
2079 /* FIXME: not implemented */
2080 return;
2081
2082 case RINGBUF_TYPE_DATA:
2083 iter->read_stamp += event->time_delta;
2084 return;
2085
2086 default:
2087 BUG();
2088 }
2089 return;
2090 }
2091
2092 static struct buffer_page *
2093 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2094 {
2095 struct buffer_page *reader = NULL;
2096 unsigned long flags;
2097 int nr_loops = 0;
2098
2099 local_irq_save(flags);
2100 __raw_spin_lock(&cpu_buffer->lock);
2101
2102 again:
2103 /*
2104 * This should normally only loop twice. But because the
2105 * start of the reader inserts an empty page, it causes
2106 * a case where we will loop three times. There should be no
2107 * reason to loop four times (that I know of).
2108 */
2109 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2110 reader = NULL;
2111 goto out;
2112 }
2113
2114 reader = cpu_buffer->reader_page;
2115
2116 /* If there's more to read, return this page */
2117 if (cpu_buffer->reader_page->read < rb_page_size(reader))
2118 goto out;
2119
2120 /* Never should we have an index greater than the size */
2121 if (RB_WARN_ON(cpu_buffer,
2122 cpu_buffer->reader_page->read > rb_page_size(reader)))
2123 goto out;
2124
2125 /* check if we caught up to the tail */
2126 reader = NULL;
2127 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2128 goto out;
2129
2130 /*
2131 * Splice the empty reader page into the list around the head.
2132 * Reset the reader page to size zero.
2133 */
2134
2135 reader = cpu_buffer->head_page;
2136 cpu_buffer->reader_page->list.next = reader->list.next;
2137 cpu_buffer->reader_page->list.prev = reader->list.prev;
2138
2139 local_set(&cpu_buffer->reader_page->write, 0);
2140 local_set(&cpu_buffer->reader_page->page->commit, 0);
2141
2142 /* Make the reader page now replace the head */
2143 reader->list.prev->next = &cpu_buffer->reader_page->list;
2144 reader->list.next->prev = &cpu_buffer->reader_page->list;
2145
2146 /*
2147 * If the tail is on the reader, then we must set the head
2148 * to the inserted page, otherwise we set it one before.
2149 */
2150 cpu_buffer->head_page = cpu_buffer->reader_page;
2151
2152 if (cpu_buffer->commit_page != reader)
2153 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2154
2155 /* Finally update the reader page to the new head */
2156 cpu_buffer->reader_page = reader;
2157 rb_reset_reader_page(cpu_buffer);
2158
2159 goto again;
2160
2161 out:
2162 __raw_spin_unlock(&cpu_buffer->lock);
2163 local_irq_restore(flags);
2164
2165 return reader;
2166 }
2167
2168 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2169 {
2170 struct ring_buffer_event *event;
2171 struct buffer_page *reader;
2172 unsigned length;
2173
2174 reader = rb_get_reader_page(cpu_buffer);
2175
2176 /* This function should not be called when buffer is empty */
2177 if (RB_WARN_ON(cpu_buffer, !reader))
2178 return;
2179
2180 event = rb_reader_event(cpu_buffer);
2181
2182 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2183 || rb_discarded_event(event))
2184 cpu_buffer->entries--;
2185
2186 rb_update_read_stamp(cpu_buffer, event);
2187
2188 length = rb_event_length(event);
2189 cpu_buffer->reader_page->read += length;
2190 }
2191
2192 static void rb_advance_iter(struct ring_buffer_iter *iter)
2193 {
2194 struct ring_buffer *buffer;
2195 struct ring_buffer_per_cpu *cpu_buffer;
2196 struct ring_buffer_event *event;
2197 unsigned length;
2198
2199 cpu_buffer = iter->cpu_buffer;
2200 buffer = cpu_buffer->buffer;
2201
2202 /*
2203 * Check if we are at the end of the buffer.
2204 */
2205 if (iter->head >= rb_page_size(iter->head_page)) {
2206 if (RB_WARN_ON(buffer,
2207 iter->head_page == cpu_buffer->commit_page))
2208 return;
2209 rb_inc_iter(iter);
2210 return;
2211 }
2212
2213 event = rb_iter_head_event(iter);
2214
2215 length = rb_event_length(event);
2216
2217 /*
2218 * This should not be called to advance the header if we are
2219 * at the tail of the buffer.
2220 */
2221 if (RB_WARN_ON(cpu_buffer,
2222 (iter->head_page == cpu_buffer->commit_page) &&
2223 (iter->head + length > rb_commit_index(cpu_buffer))))
2224 return;
2225
2226 rb_update_iter_read_stamp(iter, event);
2227
2228 iter->head += length;
2229
2230 /* check for end of page padding */
2231 if ((iter->head >= rb_page_size(iter->head_page)) &&
2232 (iter->head_page != cpu_buffer->commit_page))
2233 rb_advance_iter(iter);
2234 }
2235
2236 static struct ring_buffer_event *
2237 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2238 {
2239 struct ring_buffer_per_cpu *cpu_buffer;
2240 struct ring_buffer_event *event;
2241 struct buffer_page *reader;
2242 int nr_loops = 0;
2243
2244 cpu_buffer = buffer->buffers[cpu];
2245
2246 again:
2247 /*
2248 * We repeat when a timestamp is encountered. It is possible
2249 * to get multiple timestamps from an interrupt entering just
2250 * as one timestamp is about to be written. The max times
2251 * that this can happen is the number of nested interrupts we
2252 * can have. Nesting 10 deep of interrupts is clearly
2253 * an anomaly.
2254 */
2255 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2256 return NULL;
2257
2258 reader = rb_get_reader_page(cpu_buffer);
2259 if (!reader)
2260 return NULL;
2261
2262 event = rb_reader_event(cpu_buffer);
2263
2264 switch (event->type_len) {
2265 case RINGBUF_TYPE_PADDING:
2266 if (rb_null_event(event))
2267 RB_WARN_ON(cpu_buffer, 1);
2268 /*
2269 * Because the writer could be discarding every
2270 * event it creates (which would probably be bad)
2271 * if we were to go back to "again" then we may never
2272 * catch up, and will trigger the warn on, or lock
2273 * the box. Return the padding, and we will release
2274 * the current locks, and try again.
2275 */
2276 rb_advance_reader(cpu_buffer);
2277 return event;
2278
2279 case RINGBUF_TYPE_TIME_EXTEND:
2280 /* Internal data, OK to advance */
2281 rb_advance_reader(cpu_buffer);
2282 goto again;
2283
2284 case RINGBUF_TYPE_TIME_STAMP:
2285 /* FIXME: not implemented */
2286 rb_advance_reader(cpu_buffer);
2287 goto again;
2288
2289 case RINGBUF_TYPE_DATA:
2290 if (ts) {
2291 *ts = cpu_buffer->read_stamp + event->time_delta;
2292 ring_buffer_normalize_time_stamp(buffer,
2293 cpu_buffer->cpu, ts);
2294 }
2295 return event;
2296
2297 default:
2298 BUG();
2299 }
2300
2301 return NULL;
2302 }
2303 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2304
2305 static struct ring_buffer_event *
2306 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2307 {
2308 struct ring_buffer *buffer;
2309 struct ring_buffer_per_cpu *cpu_buffer;
2310 struct ring_buffer_event *event;
2311 int nr_loops = 0;
2312
2313 if (ring_buffer_iter_empty(iter))
2314 return NULL;
2315
2316 cpu_buffer = iter->cpu_buffer;
2317 buffer = cpu_buffer->buffer;
2318
2319 again:
2320 /*
2321 * We repeat when a timestamp is encountered. It is possible
2322 * to get multiple timestamps from an interrupt entering just
2323 * as one timestamp is about to be written. The max times
2324 * that this can happen is the number of nested interrupts we
2325 * can have. Nesting 10 deep of interrupts is clearly
2326 * an anomaly.
2327 */
2328 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2329 return NULL;
2330
2331 if (rb_per_cpu_empty(cpu_buffer))
2332 return NULL;
2333
2334 event = rb_iter_head_event(iter);
2335
2336 switch (event->type_len) {
2337 case RINGBUF_TYPE_PADDING:
2338 if (rb_null_event(event)) {
2339 rb_inc_iter(iter);
2340 goto again;
2341 }
2342 rb_advance_iter(iter);
2343 return event;
2344
2345 case RINGBUF_TYPE_TIME_EXTEND:
2346 /* Internal data, OK to advance */
2347 rb_advance_iter(iter);
2348 goto again;
2349
2350 case RINGBUF_TYPE_TIME_STAMP:
2351 /* FIXME: not implemented */
2352 rb_advance_iter(iter);
2353 goto again;
2354
2355 case RINGBUF_TYPE_DATA:
2356 if (ts) {
2357 *ts = iter->read_stamp + event->time_delta;
2358 ring_buffer_normalize_time_stamp(buffer,
2359 cpu_buffer->cpu, ts);
2360 }
2361 return event;
2362
2363 default:
2364 BUG();
2365 }
2366
2367 return NULL;
2368 }
2369 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2370
2371 /**
2372 * ring_buffer_peek - peek at the next event to be read
2373 * @buffer: The ring buffer to read
2374 * @cpu: The cpu to peak at
2375 * @ts: The timestamp counter of this event.
2376 *
2377 * This will return the event that will be read next, but does
2378 * not consume the data.
2379 */
2380 struct ring_buffer_event *
2381 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2382 {
2383 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2384 struct ring_buffer_event *event;
2385 unsigned long flags;
2386
2387 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2388 return NULL;
2389
2390 again:
2391 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2392 event = rb_buffer_peek(buffer, cpu, ts);
2393 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2394
2395 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2396 cpu_relax();
2397 goto again;
2398 }
2399
2400 return event;
2401 }
2402
2403 /**
2404 * ring_buffer_iter_peek - peek at the next event to be read
2405 * @iter: The ring buffer iterator
2406 * @ts: The timestamp counter of this event.
2407 *
2408 * This will return the event that will be read next, but does
2409 * not increment the iterator.
2410 */
2411 struct ring_buffer_event *
2412 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2413 {
2414 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2415 struct ring_buffer_event *event;
2416 unsigned long flags;
2417
2418 again:
2419 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2420 event = rb_iter_peek(iter, ts);
2421 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2422
2423 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2424 cpu_relax();
2425 goto again;
2426 }
2427
2428 return event;
2429 }
2430
2431 /**
2432 * ring_buffer_consume - return an event and consume it
2433 * @buffer: The ring buffer to get the next event from
2434 *
2435 * Returns the next event in the ring buffer, and that event is consumed.
2436 * Meaning, that sequential reads will keep returning a different event,
2437 * and eventually empty the ring buffer if the producer is slower.
2438 */
2439 struct ring_buffer_event *
2440 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2441 {
2442 struct ring_buffer_per_cpu *cpu_buffer;
2443 struct ring_buffer_event *event = NULL;
2444 unsigned long flags;
2445
2446 again:
2447 /* might be called in atomic */
2448 preempt_disable();
2449
2450 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2451 goto out;
2452
2453 cpu_buffer = buffer->buffers[cpu];
2454 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2455
2456 event = rb_buffer_peek(buffer, cpu, ts);
2457 if (!event)
2458 goto out_unlock;
2459
2460 rb_advance_reader(cpu_buffer);
2461
2462 out_unlock:
2463 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2464
2465 out:
2466 preempt_enable();
2467
2468 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2469 cpu_relax();
2470 goto again;
2471 }
2472
2473 return event;
2474 }
2475 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2476
2477 /**
2478 * ring_buffer_read_start - start a non consuming read of the buffer
2479 * @buffer: The ring buffer to read from
2480 * @cpu: The cpu buffer to iterate over
2481 *
2482 * This starts up an iteration through the buffer. It also disables
2483 * the recording to the buffer until the reading is finished.
2484 * This prevents the reading from being corrupted. This is not
2485 * a consuming read, so a producer is not expected.
2486 *
2487 * Must be paired with ring_buffer_finish.
2488 */
2489 struct ring_buffer_iter *
2490 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2491 {
2492 struct ring_buffer_per_cpu *cpu_buffer;
2493 struct ring_buffer_iter *iter;
2494 unsigned long flags;
2495
2496 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2497 return NULL;
2498
2499 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2500 if (!iter)
2501 return NULL;
2502
2503 cpu_buffer = buffer->buffers[cpu];
2504
2505 iter->cpu_buffer = cpu_buffer;
2506
2507 atomic_inc(&cpu_buffer->record_disabled);
2508 synchronize_sched();
2509
2510 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2511 __raw_spin_lock(&cpu_buffer->lock);
2512 rb_iter_reset(iter);
2513 __raw_spin_unlock(&cpu_buffer->lock);
2514 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2515
2516 return iter;
2517 }
2518 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2519
2520 /**
2521 * ring_buffer_finish - finish reading the iterator of the buffer
2522 * @iter: The iterator retrieved by ring_buffer_start
2523 *
2524 * This re-enables the recording to the buffer, and frees the
2525 * iterator.
2526 */
2527 void
2528 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2529 {
2530 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2531
2532 atomic_dec(&cpu_buffer->record_disabled);
2533 kfree(iter);
2534 }
2535 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2536
2537 /**
2538 * ring_buffer_read - read the next item in the ring buffer by the iterator
2539 * @iter: The ring buffer iterator
2540 * @ts: The time stamp of the event read.
2541 *
2542 * This reads the next event in the ring buffer and increments the iterator.
2543 */
2544 struct ring_buffer_event *
2545 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2546 {
2547 struct ring_buffer_event *event;
2548 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2549 unsigned long flags;
2550
2551 again:
2552 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2553 event = rb_iter_peek(iter, ts);
2554 if (!event)
2555 goto out;
2556
2557 rb_advance_iter(iter);
2558 out:
2559 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2560
2561 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2562 cpu_relax();
2563 goto again;
2564 }
2565
2566 return event;
2567 }
2568 EXPORT_SYMBOL_GPL(ring_buffer_read);
2569
2570 /**
2571 * ring_buffer_size - return the size of the ring buffer (in bytes)
2572 * @buffer: The ring buffer.
2573 */
2574 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2575 {
2576 return BUF_PAGE_SIZE * buffer->pages;
2577 }
2578 EXPORT_SYMBOL_GPL(ring_buffer_size);
2579
2580 static void
2581 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2582 {
2583 cpu_buffer->head_page
2584 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2585 local_set(&cpu_buffer->head_page->write, 0);
2586 local_set(&cpu_buffer->head_page->page->commit, 0);
2587
2588 cpu_buffer->head_page->read = 0;
2589
2590 cpu_buffer->tail_page = cpu_buffer->head_page;
2591 cpu_buffer->commit_page = cpu_buffer->head_page;
2592
2593 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2594 local_set(&cpu_buffer->reader_page->write, 0);
2595 local_set(&cpu_buffer->reader_page->page->commit, 0);
2596 cpu_buffer->reader_page->read = 0;
2597
2598 cpu_buffer->overrun = 0;
2599 cpu_buffer->entries = 0;
2600
2601 cpu_buffer->write_stamp = 0;
2602 cpu_buffer->read_stamp = 0;
2603 }
2604
2605 /**
2606 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2607 * @buffer: The ring buffer to reset a per cpu buffer of
2608 * @cpu: The CPU buffer to be reset
2609 */
2610 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2611 {
2612 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2613 unsigned long flags;
2614
2615 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2616 return;
2617
2618 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2619
2620 __raw_spin_lock(&cpu_buffer->lock);
2621
2622 rb_reset_cpu(cpu_buffer);
2623
2624 __raw_spin_unlock(&cpu_buffer->lock);
2625
2626 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2627 }
2628 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2629
2630 /**
2631 * ring_buffer_reset - reset a ring buffer
2632 * @buffer: The ring buffer to reset all cpu buffers
2633 */
2634 void ring_buffer_reset(struct ring_buffer *buffer)
2635 {
2636 int cpu;
2637
2638 for_each_buffer_cpu(buffer, cpu)
2639 ring_buffer_reset_cpu(buffer, cpu);
2640 }
2641 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2642
2643 /**
2644 * rind_buffer_empty - is the ring buffer empty?
2645 * @buffer: The ring buffer to test
2646 */
2647 int ring_buffer_empty(struct ring_buffer *buffer)
2648 {
2649 struct ring_buffer_per_cpu *cpu_buffer;
2650 int cpu;
2651
2652 /* yes this is racy, but if you don't like the race, lock the buffer */
2653 for_each_buffer_cpu(buffer, cpu) {
2654 cpu_buffer = buffer->buffers[cpu];
2655 if (!rb_per_cpu_empty(cpu_buffer))
2656 return 0;
2657 }
2658
2659 return 1;
2660 }
2661 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2662
2663 /**
2664 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2665 * @buffer: The ring buffer
2666 * @cpu: The CPU buffer to test
2667 */
2668 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2669 {
2670 struct ring_buffer_per_cpu *cpu_buffer;
2671 int ret;
2672
2673 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2674 return 1;
2675
2676 cpu_buffer = buffer->buffers[cpu];
2677 ret = rb_per_cpu_empty(cpu_buffer);
2678
2679
2680 return ret;
2681 }
2682 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2683
2684 /**
2685 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2686 * @buffer_a: One buffer to swap with
2687 * @buffer_b: The other buffer to swap with
2688 *
2689 * This function is useful for tracers that want to take a "snapshot"
2690 * of a CPU buffer and has another back up buffer lying around.
2691 * it is expected that the tracer handles the cpu buffer not being
2692 * used at the moment.
2693 */
2694 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2695 struct ring_buffer *buffer_b, int cpu)
2696 {
2697 struct ring_buffer_per_cpu *cpu_buffer_a;
2698 struct ring_buffer_per_cpu *cpu_buffer_b;
2699 int ret = -EINVAL;
2700
2701 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2702 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2703 goto out;
2704
2705 /* At least make sure the two buffers are somewhat the same */
2706 if (buffer_a->pages != buffer_b->pages)
2707 goto out;
2708
2709 ret = -EAGAIN;
2710
2711 if (ring_buffer_flags != RB_BUFFERS_ON)
2712 goto out;
2713
2714 if (atomic_read(&buffer_a->record_disabled))
2715 goto out;
2716
2717 if (atomic_read(&buffer_b->record_disabled))
2718 goto out;
2719
2720 cpu_buffer_a = buffer_a->buffers[cpu];
2721 cpu_buffer_b = buffer_b->buffers[cpu];
2722
2723 if (atomic_read(&cpu_buffer_a->record_disabled))
2724 goto out;
2725
2726 if (atomic_read(&cpu_buffer_b->record_disabled))
2727 goto out;
2728
2729 /*
2730 * We can't do a synchronize_sched here because this
2731 * function can be called in atomic context.
2732 * Normally this will be called from the same CPU as cpu.
2733 * If not it's up to the caller to protect this.
2734 */
2735 atomic_inc(&cpu_buffer_a->record_disabled);
2736 atomic_inc(&cpu_buffer_b->record_disabled);
2737
2738 buffer_a->buffers[cpu] = cpu_buffer_b;
2739 buffer_b->buffers[cpu] = cpu_buffer_a;
2740
2741 cpu_buffer_b->buffer = buffer_a;
2742 cpu_buffer_a->buffer = buffer_b;
2743
2744 atomic_dec(&cpu_buffer_a->record_disabled);
2745 atomic_dec(&cpu_buffer_b->record_disabled);
2746
2747 ret = 0;
2748 out:
2749 return ret;
2750 }
2751 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2752
2753 static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2754 struct buffer_data_page *bpage,
2755 unsigned int offset)
2756 {
2757 struct ring_buffer_event *event;
2758 unsigned long head;
2759
2760 __raw_spin_lock(&cpu_buffer->lock);
2761 for (head = offset; head < local_read(&bpage->commit);
2762 head += rb_event_length(event)) {
2763
2764 event = __rb_data_page_index(bpage, head);
2765 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2766 return;
2767 /* Only count data entries */
2768 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
2769 continue;
2770 cpu_buffer->entries--;
2771 }
2772 __raw_spin_unlock(&cpu_buffer->lock);
2773 }
2774
2775 /**
2776 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2777 * @buffer: the buffer to allocate for.
2778 *
2779 * This function is used in conjunction with ring_buffer_read_page.
2780 * When reading a full page from the ring buffer, these functions
2781 * can be used to speed up the process. The calling function should
2782 * allocate a few pages first with this function. Then when it
2783 * needs to get pages from the ring buffer, it passes the result
2784 * of this function into ring_buffer_read_page, which will swap
2785 * the page that was allocated, with the read page of the buffer.
2786 *
2787 * Returns:
2788 * The page allocated, or NULL on error.
2789 */
2790 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2791 {
2792 struct buffer_data_page *bpage;
2793 unsigned long addr;
2794
2795 addr = __get_free_page(GFP_KERNEL);
2796 if (!addr)
2797 return NULL;
2798
2799 bpage = (void *)addr;
2800
2801 rb_init_page(bpage);
2802
2803 return bpage;
2804 }
2805
2806 /**
2807 * ring_buffer_free_read_page - free an allocated read page
2808 * @buffer: the buffer the page was allocate for
2809 * @data: the page to free
2810 *
2811 * Free a page allocated from ring_buffer_alloc_read_page.
2812 */
2813 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2814 {
2815 free_page((unsigned long)data);
2816 }
2817
2818 /**
2819 * ring_buffer_read_page - extract a page from the ring buffer
2820 * @buffer: buffer to extract from
2821 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2822 * @len: amount to extract
2823 * @cpu: the cpu of the buffer to extract
2824 * @full: should the extraction only happen when the page is full.
2825 *
2826 * This function will pull out a page from the ring buffer and consume it.
2827 * @data_page must be the address of the variable that was returned
2828 * from ring_buffer_alloc_read_page. This is because the page might be used
2829 * to swap with a page in the ring buffer.
2830 *
2831 * for example:
2832 * rpage = ring_buffer_alloc_read_page(buffer);
2833 * if (!rpage)
2834 * return error;
2835 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2836 * if (ret >= 0)
2837 * process_page(rpage, ret);
2838 *
2839 * When @full is set, the function will not return true unless
2840 * the writer is off the reader page.
2841 *
2842 * Note: it is up to the calling functions to handle sleeps and wakeups.
2843 * The ring buffer can be used anywhere in the kernel and can not
2844 * blindly call wake_up. The layer that uses the ring buffer must be
2845 * responsible for that.
2846 *
2847 * Returns:
2848 * >=0 if data has been transferred, returns the offset of consumed data.
2849 * <0 if no data has been transferred.
2850 */
2851 int ring_buffer_read_page(struct ring_buffer *buffer,
2852 void **data_page, size_t len, int cpu, int full)
2853 {
2854 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2855 struct ring_buffer_event *event;
2856 struct buffer_data_page *bpage;
2857 struct buffer_page *reader;
2858 unsigned long flags;
2859 unsigned int commit;
2860 unsigned int read;
2861 u64 save_timestamp;
2862 int ret = -1;
2863
2864 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2865 goto out;
2866
2867 /*
2868 * If len is not big enough to hold the page header, then
2869 * we can not copy anything.
2870 */
2871 if (len <= BUF_PAGE_HDR_SIZE)
2872 goto out;
2873
2874 len -= BUF_PAGE_HDR_SIZE;
2875
2876 if (!data_page)
2877 goto out;
2878
2879 bpage = *data_page;
2880 if (!bpage)
2881 goto out;
2882
2883 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2884
2885 reader = rb_get_reader_page(cpu_buffer);
2886 if (!reader)
2887 goto out_unlock;
2888
2889 event = rb_reader_event(cpu_buffer);
2890
2891 read = reader->read;
2892 commit = rb_page_commit(reader);
2893
2894 /*
2895 * If this page has been partially read or
2896 * if len is not big enough to read the rest of the page or
2897 * a writer is still on the page, then
2898 * we must copy the data from the page to the buffer.
2899 * Otherwise, we can simply swap the page with the one passed in.
2900 */
2901 if (read || (len < (commit - read)) ||
2902 cpu_buffer->reader_page == cpu_buffer->commit_page) {
2903 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2904 unsigned int rpos = read;
2905 unsigned int pos = 0;
2906 unsigned int size;
2907
2908 if (full)
2909 goto out_unlock;
2910
2911 if (len > (commit - read))
2912 len = (commit - read);
2913
2914 size = rb_event_length(event);
2915
2916 if (len < size)
2917 goto out_unlock;
2918
2919 /* save the current timestamp, since the user will need it */
2920 save_timestamp = cpu_buffer->read_stamp;
2921
2922 /* Need to copy one event at a time */
2923 do {
2924 memcpy(bpage->data + pos, rpage->data + rpos, size);
2925
2926 len -= size;
2927
2928 rb_advance_reader(cpu_buffer);
2929 rpos = reader->read;
2930 pos += size;
2931
2932 event = rb_reader_event(cpu_buffer);
2933 size = rb_event_length(event);
2934 } while (len > size);
2935
2936 /* update bpage */
2937 local_set(&bpage->commit, pos);
2938 bpage->time_stamp = save_timestamp;
2939
2940 /* we copied everything to the beginning */
2941 read = 0;
2942 } else {
2943 /* swap the pages */
2944 rb_init_page(bpage);
2945 bpage = reader->page;
2946 reader->page = *data_page;
2947 local_set(&reader->write, 0);
2948 reader->read = 0;
2949 *data_page = bpage;
2950
2951 /* update the entry counter */
2952 rb_remove_entries(cpu_buffer, bpage, read);
2953 }
2954 ret = read;
2955
2956 out_unlock:
2957 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2958
2959 out:
2960 return ret;
2961 }
2962
2963 static ssize_t
2964 rb_simple_read(struct file *filp, char __user *ubuf,
2965 size_t cnt, loff_t *ppos)
2966 {
2967 unsigned long *p = filp->private_data;
2968 char buf[64];
2969 int r;
2970
2971 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2972 r = sprintf(buf, "permanently disabled\n");
2973 else
2974 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2975
2976 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2977 }
2978
2979 static ssize_t
2980 rb_simple_write(struct file *filp, const char __user *ubuf,
2981 size_t cnt, loff_t *ppos)
2982 {
2983 unsigned long *p = filp->private_data;
2984 char buf[64];
2985 unsigned long val;
2986 int ret;
2987
2988 if (cnt >= sizeof(buf))
2989 return -EINVAL;
2990
2991 if (copy_from_user(&buf, ubuf, cnt))
2992 return -EFAULT;
2993
2994 buf[cnt] = 0;
2995
2996 ret = strict_strtoul(buf, 10, &val);
2997 if (ret < 0)
2998 return ret;
2999
3000 if (val)
3001 set_bit(RB_BUFFERS_ON_BIT, p);
3002 else
3003 clear_bit(RB_BUFFERS_ON_BIT, p);
3004
3005 (*ppos)++;
3006
3007 return cnt;
3008 }
3009
3010 static const struct file_operations rb_simple_fops = {
3011 .open = tracing_open_generic,
3012 .read = rb_simple_read,
3013 .write = rb_simple_write,
3014 };
3015
3016
3017 static __init int rb_init_debugfs(void)
3018 {
3019 struct dentry *d_tracer;
3020
3021 d_tracer = tracing_init_dentry();
3022
3023 trace_create_file("tracing_on", 0644, d_tracer,
3024 &ring_buffer_flags, &rb_simple_fops);
3025
3026 return 0;
3027 }
3028
3029 fs_initcall(rb_init_debugfs);
3030
3031 #ifdef CONFIG_HOTPLUG_CPU
3032 static int rb_cpu_notify(struct notifier_block *self,
3033 unsigned long action, void *hcpu)
3034 {
3035 struct ring_buffer *buffer =
3036 container_of(self, struct ring_buffer, cpu_notify);
3037 long cpu = (long)hcpu;
3038
3039 switch (action) {
3040 case CPU_UP_PREPARE:
3041 case CPU_UP_PREPARE_FROZEN:
3042 if (cpu_isset(cpu, *buffer->cpumask))
3043 return NOTIFY_OK;
3044
3045 buffer->buffers[cpu] =
3046 rb_allocate_cpu_buffer(buffer, cpu);
3047 if (!buffer->buffers[cpu]) {
3048 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3049 cpu);
3050 return NOTIFY_OK;
3051 }
3052 smp_wmb();
3053 cpu_set(cpu, *buffer->cpumask);
3054 break;
3055 case CPU_DOWN_PREPARE:
3056 case CPU_DOWN_PREPARE_FROZEN:
3057 /*
3058 * Do nothing.
3059 * If we were to free the buffer, then the user would
3060 * lose any trace that was in the buffer.
3061 */
3062 break;
3063 default:
3064 break;
3065 }
3066 return NOTIFY_OK;
3067 }
3068 #endif