]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/ring_buffer.c
tracing: Have type enum modifications copy the strings
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / ring_buffer.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
7a8e76a3
SR
2/*
3 * Generic ring buffer
4 *
5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 */
28575c61 7#include <linux/trace_recursion.h>
af658dca 8#include <linux/trace_events.h>
7a8e76a3 9#include <linux/ring_buffer.h>
14131f2f 10#include <linux/trace_clock.h>
e6017571 11#include <linux/sched/clock.h>
0b07436d 12#include <linux/trace_seq.h>
7a8e76a3 13#include <linux/spinlock.h>
15693458 14#include <linux/irq_work.h>
a356646a 15#include <linux/security.h>
7a8e76a3 16#include <linux/uaccess.h>
a81bd80a 17#include <linux/hardirq.h>
6c43e554 18#include <linux/kthread.h> /* for self test */
7a8e76a3
SR
19#include <linux/module.h>
20#include <linux/percpu.h>
21#include <linux/mutex.h>
6c43e554 22#include <linux/delay.h>
5a0e3ad6 23#include <linux/slab.h>
7a8e76a3
SR
24#include <linux/init.h>
25#include <linux/hash.h>
26#include <linux/list.h>
554f786e 27#include <linux/cpu.h>
927e56db 28#include <linux/oom.h>
7a8e76a3 29
79615760 30#include <asm/local.h>
182e9f5f 31
83f40318
VN
32static void update_pages_handler(struct work_struct *work);
33
d1b182a8
SR
34/*
35 * The ring buffer header is special. We must manually up keep it.
36 */
37int ring_buffer_print_entry_header(struct trace_seq *s)
38{
c0cd93aa
SRRH
39 trace_seq_puts(s, "# compressed entry header\n");
40 trace_seq_puts(s, "\ttype_len : 5 bits\n");
41 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
42 trace_seq_puts(s, "\tarray : 32 bits\n");
43 trace_seq_putc(s, '\n');
44 trace_seq_printf(s, "\tpadding : type == %d\n",
45 RINGBUF_TYPE_PADDING);
46 trace_seq_printf(s, "\ttime_extend : type == %d\n",
47 RINGBUF_TYPE_TIME_EXTEND);
dc4e2801
TZ
48 trace_seq_printf(s, "\ttime_stamp : type == %d\n",
49 RINGBUF_TYPE_TIME_STAMP);
c0cd93aa
SRRH
50 trace_seq_printf(s, "\tdata max type_len == %d\n",
51 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
52
53 return !trace_seq_has_overflowed(s);
d1b182a8
SR
54}
55
5cc98548
SR
56/*
57 * The ring buffer is made up of a list of pages. A separate list of pages is
58 * allocated for each CPU. A writer may only write to a buffer that is
59 * associated with the CPU it is currently executing on. A reader may read
60 * from any per cpu buffer.
61 *
62 * The reader is special. For each per cpu buffer, the reader has its own
63 * reader page. When a reader has read the entire reader page, this reader
64 * page is swapped with another page in the ring buffer.
65 *
66 * Now, as long as the writer is off the reader page, the reader can do what
67 * ever it wants with that page. The writer will never write to that page
68 * again (as long as it is out of the ring buffer).
69 *
70 * Here's some silly ASCII art.
71 *
72 * +------+
73 * |reader| RING BUFFER
74 * |page |
75 * +------+ +---+ +---+ +---+
76 * | |-->| |-->| |
77 * +---+ +---+ +---+
78 * ^ |
79 * | |
80 * +---------------+
81 *
82 *
83 * +------+
84 * |reader| RING BUFFER
85 * |page |------------------v
86 * +------+ +---+ +---+ +---+
87 * | |-->| |-->| |
88 * +---+ +---+ +---+
89 * ^ |
90 * | |
91 * +---------------+
92 *
93 *
94 * +------+
95 * |reader| RING BUFFER
96 * |page |------------------v
97 * +------+ +---+ +---+ +---+
98 * ^ | |-->| |-->| |
99 * | +---+ +---+ +---+
100 * | |
101 * | |
102 * +------------------------------+
103 *
104 *
105 * +------+
106 * |buffer| RING BUFFER
107 * |page |------------------v
108 * +------+ +---+ +---+ +---+
109 * ^ | | | |-->| |
110 * | New +---+ +---+ +---+
111 * | Reader------^ |
112 * | page |
113 * +------------------------------+
114 *
115 *
116 * After we make this swap, the reader can hand this page off to the splice
117 * code and be done with it. It can even allocate a new page if it needs to
118 * and swap that into the ring buffer.
119 *
120 * We will be using cmpxchg soon to make all this lockless.
121 *
122 */
123
499e5470
SR
124/* Used for individual buffers (after the counter) */
125#define RB_BUFFER_OFF (1 << 20)
a3583244 126
499e5470 127#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
033601a3 128
e3d6bf0a 129#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 130#define RB_ALIGNMENT 4U
334d4169 131#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
c7b09308 132#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
adab66b7
SRV
133
134#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
135# define RB_FORCE_8BYTE_ALIGNMENT 0
136# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
137#else
138# define RB_FORCE_8BYTE_ALIGNMENT 1
139# define RB_ARCH_ALIGNMENT 8U
140#endif
141
142#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
649508f6 143
334d4169
LJ
144/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
145#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
146
147enum {
148 RB_LEN_TIME_EXTEND = 8,
dc4e2801 149 RB_LEN_TIME_STAMP = 8,
7a8e76a3
SR
150};
151
69d1b839
SR
152#define skip_time_extend(event) \
153 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
154
dc4e2801
TZ
155#define extended_time(event) \
156 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
157
2d622719
TZ
158static inline int rb_null_event(struct ring_buffer_event *event)
159{
a1863c21 160 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2d622719
TZ
161}
162
163static void rb_event_set_padding(struct ring_buffer_event *event)
164{
a1863c21 165 /* padding has a NULL time_delta */
334d4169 166 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
167 event->time_delta = 0;
168}
169
34a148bf 170static unsigned
2d622719 171rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
172{
173 unsigned length;
174
334d4169
LJ
175 if (event->type_len)
176 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
177 else
178 length = event->array[0];
179 return length + RB_EVNT_HDR_SIZE;
180}
181
69d1b839
SR
182/*
183 * Return the length of the given event. Will return
184 * the length of the time extend if the event is a
185 * time extend.
186 */
187static inline unsigned
2d622719
TZ
188rb_event_length(struct ring_buffer_event *event)
189{
334d4169 190 switch (event->type_len) {
7a8e76a3 191 case RINGBUF_TYPE_PADDING:
2d622719
TZ
192 if (rb_null_event(event))
193 /* undefined */
194 return -1;
334d4169 195 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
196
197 case RINGBUF_TYPE_TIME_EXTEND:
198 return RB_LEN_TIME_EXTEND;
199
200 case RINGBUF_TYPE_TIME_STAMP:
201 return RB_LEN_TIME_STAMP;
202
203 case RINGBUF_TYPE_DATA:
2d622719 204 return rb_event_data_length(event);
7a8e76a3 205 default:
da4d401a 206 WARN_ON_ONCE(1);
7a8e76a3
SR
207 }
208 /* not hit */
209 return 0;
210}
211
69d1b839
SR
212/*
213 * Return total length of time extend and data,
214 * or just the event length for all other events.
215 */
216static inline unsigned
217rb_event_ts_length(struct ring_buffer_event *event)
218{
219 unsigned len = 0;
220
dc4e2801 221 if (extended_time(event)) {
69d1b839
SR
222 /* time extends include the data event after it */
223 len = RB_LEN_TIME_EXTEND;
224 event = skip_time_extend(event);
225 }
226 return len + rb_event_length(event);
227}
228
7a8e76a3
SR
229/**
230 * ring_buffer_event_length - return the length of the event
231 * @event: the event to get the length of
69d1b839
SR
232 *
233 * Returns the size of the data load of a data event.
234 * If the event is something other than a data event, it
235 * returns the size of the event itself. With the exception
236 * of a TIME EXTEND, where it still returns the size of the
237 * data load of the data event after it.
7a8e76a3
SR
238 */
239unsigned ring_buffer_event_length(struct ring_buffer_event *event)
240{
69d1b839
SR
241 unsigned length;
242
dc4e2801 243 if (extended_time(event))
69d1b839
SR
244 event = skip_time_extend(event);
245
246 length = rb_event_length(event);
334d4169 247 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
248 return length;
249 length -= RB_EVNT_HDR_SIZE;
250 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
251 length -= sizeof(event->array[0]);
252 return length;
7a8e76a3 253}
c4f50183 254EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
255
256/* inline for ring buffer fast paths */
929ddbf3 257static __always_inline void *
7a8e76a3
SR
258rb_event_data(struct ring_buffer_event *event)
259{
dc4e2801 260 if (extended_time(event))
69d1b839 261 event = skip_time_extend(event);
da4d401a 262 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 263 /* If length is in len field, then array[0] has the data */
334d4169 264 if (event->type_len)
7a8e76a3
SR
265 return (void *)&event->array[0];
266 /* Otherwise length is in array[0] and array[1] has the data */
267 return (void *)&event->array[1];
268}
269
270/**
271 * ring_buffer_event_data - return the data of the event
272 * @event: the event to get the data from
273 */
274void *ring_buffer_event_data(struct ring_buffer_event *event)
275{
276 return rb_event_data(event);
277}
c4f50183 278EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
279
280#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 281 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3 282
b23d7a5f
NP
283#define for_each_online_buffer_cpu(buffer, cpu) \
284 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
285
7a8e76a3
SR
286#define TS_SHIFT 27
287#define TS_MASK ((1ULL << TS_SHIFT) - 1)
288#define TS_DELTA_TEST (~TS_MASK)
289
e20044f7
SRV
290static u64 rb_event_time_stamp(struct ring_buffer_event *event)
291{
292 u64 ts;
293
294 ts = event->array[0];
295 ts <<= TS_SHIFT;
296 ts += event->time_delta;
297
298 return ts;
299}
300
66a8cb95
SR
301/* Flag when events were overwritten */
302#define RB_MISSED_EVENTS (1 << 31)
ff0ff84a
SR
303/* Missed count stored at end */
304#define RB_MISSED_STORED (1 << 30)
66a8cb95 305
abc9b56d 306struct buffer_data_page {
e4c2ce82 307 u64 time_stamp; /* page time stamp */
c3706f00 308 local_t commit; /* write committed index */
649508f6 309 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
abc9b56d
SR
310};
311
77ae365e
SR
312/*
313 * Note, the buffer_page list must be first. The buffer pages
314 * are allocated in cache lines, which means that each buffer
315 * page will be at the beginning of a cache line, and thus
316 * the least significant bits will be zero. We use this to
317 * add flags in the list struct pointers, to make the ring buffer
318 * lockless.
319 */
abc9b56d 320struct buffer_page {
778c55d4 321 struct list_head list; /* list of buffer pages */
abc9b56d 322 local_t write; /* index for next write */
6f807acd 323 unsigned read; /* index for next read */
778c55d4 324 local_t entries; /* entries on this page */
ff0ff84a 325 unsigned long real_end; /* real end of data */
abc9b56d 326 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
327};
328
77ae365e
SR
329/*
330 * The buffer page counters, write and entries, must be reset
331 * atomically when crossing page boundaries. To synchronize this
332 * update, two counters are inserted into the number. One is
333 * the actual counter for the write position or count on the page.
334 *
335 * The other is a counter of updaters. Before an update happens
336 * the update partition of the counter is incremented. This will
337 * allow the updater to update the counter atomically.
338 *
339 * The counter is 20 bits, and the state data is 12.
340 */
341#define RB_WRITE_MASK 0xfffff
342#define RB_WRITE_INTCNT (1 << 20)
343
044fa782 344static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 345{
044fa782 346 local_set(&bpage->commit, 0);
abc9b56d
SR
347}
348
ed56829c
SR
349/*
350 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
351 * this issue out.
352 */
34a148bf 353static void free_buffer_page(struct buffer_page *bpage)
ed56829c 354{
34a148bf 355 free_page((unsigned long)bpage->page);
e4c2ce82 356 kfree(bpage);
ed56829c
SR
357}
358
7a8e76a3
SR
359/*
360 * We need to fit the time_stamp delta into 27 bits.
361 */
362static inline int test_time_stamp(u64 delta)
363{
364 if (delta & TS_DELTA_TEST)
365 return 1;
366 return 0;
367}
368
474d32b6 369#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 370
be957c44
SR
371/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
372#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
373
d1b182a8
SR
374int ring_buffer_print_page_header(struct trace_seq *s)
375{
376 struct buffer_data_page field;
c0cd93aa
SRRH
377
378 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
379 "offset:0;\tsize:%u;\tsigned:%u;\n",
380 (unsigned int)sizeof(field.time_stamp),
381 (unsigned int)is_signed_type(u64));
382
383 trace_seq_printf(s, "\tfield: local_t commit;\t"
384 "offset:%u;\tsize:%u;\tsigned:%u;\n",
385 (unsigned int)offsetof(typeof(field), commit),
386 (unsigned int)sizeof(field.commit),
387 (unsigned int)is_signed_type(long));
388
389 trace_seq_printf(s, "\tfield: int overwrite;\t"
390 "offset:%u;\tsize:%u;\tsigned:%u;\n",
391 (unsigned int)offsetof(typeof(field), commit),
392 1,
393 (unsigned int)is_signed_type(long));
394
395 trace_seq_printf(s, "\tfield: char data;\t"
396 "offset:%u;\tsize:%u;\tsigned:%u;\n",
397 (unsigned int)offsetof(typeof(field), data),
398 (unsigned int)BUF_PAGE_SIZE,
399 (unsigned int)is_signed_type(char));
400
401 return !trace_seq_has_overflowed(s);
d1b182a8
SR
402}
403
15693458
SRRH
404struct rb_irq_work {
405 struct irq_work work;
406 wait_queue_head_t waiters;
1e0d6714 407 wait_queue_head_t full_waiters;
15693458 408 bool waiters_pending;
1e0d6714
SRRH
409 bool full_waiters_pending;
410 bool wakeup_full;
15693458
SRRH
411};
412
fcc742ea
SRRH
413/*
414 * Structure to hold event state and handle nested events.
415 */
416struct rb_event_info {
417 u64 ts;
418 u64 delta;
58fbc3c6
SRV
419 u64 before;
420 u64 after;
fcc742ea
SRRH
421 unsigned long length;
422 struct buffer_page *tail_page;
423 int add_timestamp;
424};
425
a389d86f
SRV
426/*
427 * Used for the add_timestamp
428 * NONE
7c4b4a51
SRV
429 * EXTEND - wants a time extend
430 * ABSOLUTE - the buffer requests all events to have absolute time stamps
a389d86f
SRV
431 * FORCE - force a full time stamp.
432 */
433enum {
7c4b4a51
SRV
434 RB_ADD_STAMP_NONE = 0,
435 RB_ADD_STAMP_EXTEND = BIT(1),
436 RB_ADD_STAMP_ABSOLUTE = BIT(2),
437 RB_ADD_STAMP_FORCE = BIT(3)
a389d86f 438};
a497adb4
SRRH
439/*
440 * Used for which event context the event is in.
b02414c8
SRV
441 * TRANSITION = 0
442 * NMI = 1
443 * IRQ = 2
444 * SOFTIRQ = 3
445 * NORMAL = 4
a497adb4
SRRH
446 *
447 * See trace_recursive_lock() comment below for more details.
448 */
449enum {
b02414c8 450 RB_CTX_TRANSITION,
a497adb4
SRRH
451 RB_CTX_NMI,
452 RB_CTX_IRQ,
453 RB_CTX_SOFTIRQ,
454 RB_CTX_NORMAL,
455 RB_CTX_MAX
456};
457
10464b4a
SRV
458#if BITS_PER_LONG == 32
459#define RB_TIME_32
460#endif
461
462/* To test on 64 bit machines */
463//#define RB_TIME_32
464
465#ifdef RB_TIME_32
466
467struct rb_time_struct {
468 local_t cnt;
469 local_t top;
470 local_t bottom;
471};
472#else
473#include <asm/local64.h>
474struct rb_time_struct {
475 local64_t time;
476};
477#endif
478typedef struct rb_time_struct rb_time_t;
479
8672e494
SRV
480#define MAX_NEST 5
481
7a8e76a3
SR
482/*
483 * head_page == tail_page && head == tail then buffer is empty.
484 */
485struct ring_buffer_per_cpu {
486 int cpu;
985023de 487 atomic_t record_disabled;
07b8b10e 488 atomic_t resize_disabled;
13292494 489 struct trace_buffer *buffer;
5389f6fa 490 raw_spinlock_t reader_lock; /* serialize readers */
445c8951 491 arch_spinlock_t lock;
7a8e76a3 492 struct lock_class_key lock_key;
73a757e6 493 struct buffer_data_page *free_page;
9b94a8fb 494 unsigned long nr_pages;
58a09ec6 495 unsigned int current_context;
3adc54fa 496 struct list_head *pages;
6f807acd
SR
497 struct buffer_page *head_page; /* read from head */
498 struct buffer_page *tail_page; /* write to tail */
c3706f00 499 struct buffer_page *commit_page; /* committed pages */
d769041f 500 struct buffer_page *reader_page;
66a8cb95
SR
501 unsigned long lost_events;
502 unsigned long last_overrun;
8e012066 503 unsigned long nest;
c64e148a 504 local_t entries_bytes;
e4906eff 505 local_t entries;
884bfe89
SP
506 local_t overrun;
507 local_t commit_overrun;
508 local_t dropped_events;
fa743953
SR
509 local_t committing;
510 local_t commits;
2c2b0a78
SRV
511 local_t pages_touched;
512 local_t pages_read;
03329f99 513 long last_pages_touch;
2c2b0a78 514 size_t shortest_full;
77ae365e 515 unsigned long read;
c64e148a 516 unsigned long read_bytes;
10464b4a
SRV
517 rb_time_t write_stamp;
518 rb_time_t before_stamp;
8672e494 519 u64 event_stamp[MAX_NEST];
7a8e76a3 520 u64 read_stamp;
438ced17 521 /* ring buffer pages to update, > 0 to add, < 0 to remove */
9b94a8fb 522 long nr_pages_to_update;
438ced17 523 struct list_head new_pages; /* new pages to add */
83f40318 524 struct work_struct update_pages_work;
05fdd70d 525 struct completion update_done;
15693458
SRRH
526
527 struct rb_irq_work irq_work;
7a8e76a3
SR
528};
529
13292494 530struct trace_buffer {
7a8e76a3
SR
531 unsigned flags;
532 int cpus;
7a8e76a3 533 atomic_t record_disabled;
00f62f61 534 cpumask_var_t cpumask;
7a8e76a3 535
1f8a6a10
PZ
536 struct lock_class_key *reader_lock_key;
537
7a8e76a3
SR
538 struct mutex mutex;
539
540 struct ring_buffer_per_cpu **buffers;
554f786e 541
b32614c0 542 struct hlist_node node;
37886f6a 543 u64 (*clock)(void);
15693458
SRRH
544
545 struct rb_irq_work irq_work;
00b41452 546 bool time_stamp_abs;
7a8e76a3
SR
547};
548
549struct ring_buffer_iter {
550 struct ring_buffer_per_cpu *cpu_buffer;
551 unsigned long head;
785888c5 552 unsigned long next_event;
7a8e76a3 553 struct buffer_page *head_page;
492a74f4
SR
554 struct buffer_page *cache_reader_page;
555 unsigned long cache_read;
7a8e76a3 556 u64 read_stamp;
28e3fc56 557 u64 page_stamp;
785888c5 558 struct ring_buffer_event *event;
c9b7a4a7 559 int missed_events;
7a8e76a3
SR
560};
561
10464b4a
SRV
562#ifdef RB_TIME_32
563
564/*
565 * On 32 bit machines, local64_t is very expensive. As the ring
566 * buffer doesn't need all the features of a true 64 bit atomic,
567 * on 32 bit, it uses these functions (64 still uses local64_t).
568 *
569 * For the ring buffer, 64 bit required operations for the time is
570 * the following:
571 *
572 * - Only need 59 bits (uses 60 to make it even).
573 * - Reads may fail if it interrupted a modification of the time stamp.
574 * It will succeed if it did not interrupt another write even if
575 * the read itself is interrupted by a write.
576 * It returns whether it was successful or not.
577 *
578 * - Writes always succeed and will overwrite other writes and writes
579 * that were done by events interrupting the current write.
580 *
581 * - A write followed by a read of the same time stamp will always succeed,
582 * but may not contain the same value.
583 *
584 * - A cmpxchg will fail if it interrupted another write or cmpxchg.
585 * Other than that, it acts like a normal cmpxchg.
586 *
587 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
588 * (bottom being the least significant 30 bits of the 60 bit time stamp).
589 *
590 * The two most significant bits of each half holds a 2 bit counter (0-3).
591 * Each update will increment this counter by one.
592 * When reading the top and bottom, if the two counter bits match then the
593 * top and bottom together make a valid 60 bit number.
594 */
595#define RB_TIME_SHIFT 30
596#define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
597
598static inline int rb_time_cnt(unsigned long val)
599{
600 return (val >> RB_TIME_SHIFT) & 3;
601}
602
603static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
604{
605 u64 val;
606
607 val = top & RB_TIME_VAL_MASK;
608 val <<= RB_TIME_SHIFT;
609 val |= bottom & RB_TIME_VAL_MASK;
610
611 return val;
612}
613
614static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
615{
616 unsigned long top, bottom;
617 unsigned long c;
618
619 /*
620 * If the read is interrupted by a write, then the cnt will
621 * be different. Loop until both top and bottom have been read
622 * without interruption.
623 */
624 do {
625 c = local_read(&t->cnt);
626 top = local_read(&t->top);
627 bottom = local_read(&t->bottom);
628 } while (c != local_read(&t->cnt));
629
630 *cnt = rb_time_cnt(top);
631
632 /* If top and bottom counts don't match, this interrupted a write */
633 if (*cnt != rb_time_cnt(bottom))
634 return false;
635
636 *ret = rb_time_val(top, bottom);
637 return true;
638}
639
640static bool rb_time_read(rb_time_t *t, u64 *ret)
641{
642 unsigned long cnt;
643
644 return __rb_time_read(t, ret, &cnt);
645}
646
647static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
648{
649 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
650}
651
652static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom)
653{
654 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
655 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
656}
657
658static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
659{
660 val = rb_time_val_cnt(val, cnt);
661 local_set(t, val);
662}
663
664static void rb_time_set(rb_time_t *t, u64 val)
665{
666 unsigned long cnt, top, bottom;
667
668 rb_time_split(val, &top, &bottom);
669
670 /* Writes always succeed with a valid number even if it gets interrupted. */
671 do {
672 cnt = local_inc_return(&t->cnt);
673 rb_time_val_set(&t->top, top, cnt);
674 rb_time_val_set(&t->bottom, bottom, cnt);
675 } while (cnt != local_read(&t->cnt));
676}
677
678static inline bool
679rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
680{
681 unsigned long ret;
682
683 ret = local_cmpxchg(l, expect, set);
684 return ret == expect;
685}
686
687static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
688{
689 unsigned long cnt, top, bottom;
690 unsigned long cnt2, top2, bottom2;
691 u64 val;
692
693 /* The cmpxchg always fails if it interrupted an update */
694 if (!__rb_time_read(t, &val, &cnt2))
695 return false;
696
697 if (val != expect)
698 return false;
699
700 cnt = local_read(&t->cnt);
701 if ((cnt & 3) != cnt2)
702 return false;
703
704 cnt2 = cnt + 1;
705
706 rb_time_split(val, &top, &bottom);
707 top = rb_time_val_cnt(top, cnt);
708 bottom = rb_time_val_cnt(bottom, cnt);
709
710 rb_time_split(set, &top2, &bottom2);
711 top2 = rb_time_val_cnt(top2, cnt2);
712 bottom2 = rb_time_val_cnt(bottom2, cnt2);
713
714 if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
715 return false;
716 if (!rb_time_read_cmpxchg(&t->top, top, top2))
717 return false;
718 if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
719 return false;
720 return true;
721}
722
723#else /* 64 bits */
724
725/* local64_t always succeeds */
726
727static inline bool rb_time_read(rb_time_t *t, u64 *ret)
728{
729 *ret = local64_read(&t->time);
730 return true;
731}
732static void rb_time_set(rb_time_t *t, u64 val)
733{
734 local64_set(&t->time, val);
735}
736
737static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
738{
739 u64 val;
740 val = local64_cmpxchg(&t->time, expect, set);
741 return val == expect;
742}
743#endif
744
a948c69d
SRV
745/*
746 * Enable this to make sure that the event passed to
747 * ring_buffer_event_time_stamp() is not committed and also
748 * is on the buffer that it passed in.
749 */
750//#define RB_VERIFY_EVENT
751#ifdef RB_VERIFY_EVENT
752static struct list_head *rb_list_head(struct list_head *list);
753static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
754 void *event)
755{
756 struct buffer_page *page = cpu_buffer->commit_page;
757 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
758 struct list_head *next;
759 long commit, write;
760 unsigned long addr = (unsigned long)event;
761 bool done = false;
762 int stop = 0;
763
764 /* Make sure the event exists and is not committed yet */
765 do {
766 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
767 done = true;
768 commit = local_read(&page->page->commit);
769 write = local_read(&page->write);
770 if (addr >= (unsigned long)&page->page->data[commit] &&
771 addr < (unsigned long)&page->page->data[write])
772 return;
773
774 next = rb_list_head(page->list.next);
775 page = list_entry(next, struct buffer_page, list);
776 } while (!done);
777 WARN_ON_ONCE(1);
778}
779#else
780static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
781 void *event)
782{
783}
784#endif
785
786
efe6196a
SRV
787static inline u64 rb_time_stamp(struct trace_buffer *buffer);
788
789/**
790 * ring_buffer_event_time_stamp - return the event's current time stamp
791 * @buffer: The buffer that the event is on
792 * @event: the event to get the time stamp of
793 *
794 * Note, this must be called after @event is reserved, and before it is
795 * committed to the ring buffer. And must be called from the same
796 * context where the event was reserved (normal, softirq, irq, etc).
797 *
798 * Returns the time stamp associated with the current event.
799 * If the event has an extended time stamp, then that is used as
800 * the time stamp to return.
801 * In the highly unlikely case that the event was nested more than
802 * the max nesting, then the write_stamp of the buffer is returned,
803 * otherwise current time is returned, but that really neither of
804 * the last two cases should ever happen.
805 */
806u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
807 struct ring_buffer_event *event)
808{
809 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
810 unsigned int nest;
811 u64 ts;
812
813 /* If the event includes an absolute time, then just use that */
814 if (event->type_len == RINGBUF_TYPE_TIME_STAMP)
815 return rb_event_time_stamp(event);
816
a948c69d
SRV
817 nest = local_read(&cpu_buffer->committing);
818 verify_event(cpu_buffer, event);
819 if (WARN_ON_ONCE(!nest))
820 goto fail;
821
efe6196a 822 /* Read the current saved nesting level time stamp */
a948c69d 823 if (likely(--nest < MAX_NEST))
efe6196a
SRV
824 return cpu_buffer->event_stamp[nest];
825
a948c69d
SRV
826 /* Shouldn't happen, warn if it does */
827 WARN_ONCE(1, "nest (%d) greater than max", nest);
efe6196a 828
a948c69d 829 fail:
efe6196a
SRV
830 /* Can only fail on 32 bit */
831 if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
832 /* Screw it, just read the current time */
833 ts = rb_time_stamp(cpu_buffer->buffer);
834
835 return ts;
836}
837
2c2b0a78
SRV
838/**
839 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
840 * @buffer: The ring_buffer to get the number of pages from
841 * @cpu: The cpu of the ring_buffer to get the number of pages from
842 *
843 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
844 */
13292494 845size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
2c2b0a78
SRV
846{
847 return buffer->buffers[cpu]->nr_pages;
848}
849
850/**
851 * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
852 * @buffer: The ring_buffer to get the number of pages from
853 * @cpu: The cpu of the ring_buffer to get the number of pages from
854 *
855 * Returns the number of pages that have content in the ring buffer.
856 */
13292494 857size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
2c2b0a78
SRV
858{
859 size_t read;
860 size_t cnt;
861
862 read = local_read(&buffer->buffers[cpu]->pages_read);
863 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
864 /* The reader can read an empty page, but not more than that */
865 if (cnt < read) {
866 WARN_ON_ONCE(read > cnt + 1);
867 return 0;
868 }
869
870 return cnt - read;
871}
872
15693458
SRRH
873/*
874 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
875 *
876 * Schedules a delayed work to wake up any task that is blocked on the
877 * ring buffer waiters queue.
878 */
879static void rb_wake_up_waiters(struct irq_work *work)
880{
881 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
882
883 wake_up_all(&rbwork->waiters);
1e0d6714
SRRH
884 if (rbwork->wakeup_full) {
885 rbwork->wakeup_full = false;
886 wake_up_all(&rbwork->full_waiters);
887 }
15693458
SRRH
888}
889
890/**
891 * ring_buffer_wait - wait for input to the ring buffer
892 * @buffer: buffer to wait on
893 * @cpu: the cpu buffer to wait on
e1981f75 894 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
15693458
SRRH
895 *
896 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
897 * as data is added to any of the @buffer's cpu buffers. Otherwise
898 * it will wait for data to be added to a specific cpu buffer.
899 */
13292494 900int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
15693458 901{
3f649ab7 902 struct ring_buffer_per_cpu *cpu_buffer;
15693458
SRRH
903 DEFINE_WAIT(wait);
904 struct rb_irq_work *work;
e30f53aa 905 int ret = 0;
15693458
SRRH
906
907 /*
908 * Depending on what the caller is waiting for, either any
909 * data in any cpu buffer, or a specific buffer, put the
910 * caller on the appropriate wait queue.
911 */
1e0d6714 912 if (cpu == RING_BUFFER_ALL_CPUS) {
15693458 913 work = &buffer->irq_work;
1e0d6714 914 /* Full only makes sense on per cpu reads */
2c2b0a78 915 full = 0;
1e0d6714 916 } else {
8b8b3683
SRRH
917 if (!cpumask_test_cpu(cpu, buffer->cpumask))
918 return -ENODEV;
15693458
SRRH
919 cpu_buffer = buffer->buffers[cpu];
920 work = &cpu_buffer->irq_work;
921 }
922
923
e30f53aa 924 while (true) {
1e0d6714
SRRH
925 if (full)
926 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
927 else
928 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
e30f53aa
RV
929
930 /*
931 * The events can happen in critical sections where
932 * checking a work queue can cause deadlocks.
933 * After adding a task to the queue, this flag is set
934 * only to notify events to try to wake up the queue
935 * using irq_work.
936 *
937 * We don't clear it even if the buffer is no longer
938 * empty. The flag only causes the next event to run
939 * irq_work to do the work queue wake up. The worse
940 * that can happen if we race with !trace_empty() is that
941 * an event will cause an irq_work to try to wake up
942 * an empty queue.
943 *
944 * There's no reason to protect this flag either, as
945 * the work queue and irq_work logic will do the necessary
946 * synchronization for the wake ups. The only thing
947 * that is necessary is that the wake up happens after
948 * a task has been queued. It's OK for spurious wake ups.
949 */
1e0d6714
SRRH
950 if (full)
951 work->full_waiters_pending = true;
952 else
953 work->waiters_pending = true;
e30f53aa
RV
954
955 if (signal_pending(current)) {
956 ret = -EINTR;
957 break;
958 }
959
960 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
961 break;
962
963 if (cpu != RING_BUFFER_ALL_CPUS &&
964 !ring_buffer_empty_cpu(buffer, cpu)) {
965 unsigned long flags;
966 bool pagebusy;
2c2b0a78
SRV
967 size_t nr_pages;
968 size_t dirty;
e30f53aa
RV
969
970 if (!full)
971 break;
972
973 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
974 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
2c2b0a78
SRV
975 nr_pages = cpu_buffer->nr_pages;
976 dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
977 if (!cpu_buffer->shortest_full ||
978 cpu_buffer->shortest_full < full)
979 cpu_buffer->shortest_full = full;
e30f53aa 980 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2c2b0a78
SRV
981 if (!pagebusy &&
982 (!nr_pages || (dirty * 100) > full * nr_pages))
e30f53aa
RV
983 break;
984 }
15693458 985
15693458 986 schedule();
e30f53aa 987 }
15693458 988
1e0d6714
SRRH
989 if (full)
990 finish_wait(&work->full_waiters, &wait);
991 else
992 finish_wait(&work->waiters, &wait);
e30f53aa
RV
993
994 return ret;
15693458
SRRH
995}
996
997/**
998 * ring_buffer_poll_wait - poll on buffer input
999 * @buffer: buffer to wait on
1000 * @cpu: the cpu buffer to wait on
1001 * @filp: the file descriptor
1002 * @poll_table: The poll descriptor
1003 *
1004 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1005 * as data is added to any of the @buffer's cpu buffers. Otherwise
1006 * it will wait for data to be added to a specific cpu buffer.
1007 *
a9a08845 1008 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
15693458
SRRH
1009 * zero otherwise.
1010 */
13292494 1011__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
15693458
SRRH
1012 struct file *filp, poll_table *poll_table)
1013{
1014 struct ring_buffer_per_cpu *cpu_buffer;
1015 struct rb_irq_work *work;
1016
15693458
SRRH
1017 if (cpu == RING_BUFFER_ALL_CPUS)
1018 work = &buffer->irq_work;
1019 else {
6721cb60
SRRH
1020 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1021 return -EINVAL;
1022
15693458
SRRH
1023 cpu_buffer = buffer->buffers[cpu];
1024 work = &cpu_buffer->irq_work;
1025 }
1026
15693458 1027 poll_wait(filp, &work->waiters, poll_table);
4ce97dbf
JB
1028 work->waiters_pending = true;
1029 /*
1030 * There's a tight race between setting the waiters_pending and
1031 * checking if the ring buffer is empty. Once the waiters_pending bit
1032 * is set, the next event will wake the task up, but we can get stuck
1033 * if there's only a single event in.
1034 *
1035 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1036 * but adding a memory barrier to all events will cause too much of a
1037 * performance hit in the fast path. We only need a memory barrier when
1038 * the buffer goes from empty to having content. But as this race is
1039 * extremely small, and it's not a problem if another event comes in, we
1040 * will fix it later.
1041 */
1042 smp_mb();
15693458
SRRH
1043
1044 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1045 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
a9a08845 1046 return EPOLLIN | EPOLLRDNORM;
15693458
SRRH
1047 return 0;
1048}
1049
f536aafc 1050/* buffer may be either ring_buffer or ring_buffer_per_cpu */
077c5407
SR
1051#define RB_WARN_ON(b, cond) \
1052 ({ \
1053 int _____ret = unlikely(cond); \
1054 if (_____ret) { \
1055 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1056 struct ring_buffer_per_cpu *__b = \
1057 (void *)b; \
1058 atomic_inc(&__b->buffer->record_disabled); \
1059 } else \
1060 atomic_inc(&b->record_disabled); \
1061 WARN_ON(1); \
1062 } \
1063 _____ret; \
3e89c7bb 1064 })
f536aafc 1065
37886f6a
SR
1066/* Up this if you want to test the TIME_EXTENTS and normalization */
1067#define DEBUG_SHIFT 0
1068
13292494 1069static inline u64 rb_time_stamp(struct trace_buffer *buffer)
88eb0125 1070{
bbeba3e5
SRV
1071 u64 ts;
1072
1073 /* Skip retpolines :-( */
1074 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1075 ts = trace_clock_local();
1076 else
1077 ts = buffer->clock();
1078
88eb0125 1079 /* shift to debug/test normalization and TIME_EXTENTS */
bbeba3e5 1080 return ts << DEBUG_SHIFT;
88eb0125
SR
1081}
1082
f3ef7202 1083u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
37886f6a
SR
1084{
1085 u64 time;
1086
1087 preempt_disable_notrace();
6d3f1e12 1088 time = rb_time_stamp(buffer);
d6097c9e 1089 preempt_enable_notrace();
37886f6a
SR
1090
1091 return time;
1092}
1093EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1094
13292494 1095void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
37886f6a
SR
1096 int cpu, u64 *ts)
1097{
1098 /* Just stupid testing the normalize function and deltas */
1099 *ts >>= DEBUG_SHIFT;
1100}
1101EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1102
77ae365e
SR
1103/*
1104 * Making the ring buffer lockless makes things tricky.
1105 * Although writes only happen on the CPU that they are on,
1106 * and they only need to worry about interrupts. Reads can
1107 * happen on any CPU.
1108 *
1109 * The reader page is always off the ring buffer, but when the
1110 * reader finishes with a page, it needs to swap its page with
1111 * a new one from the buffer. The reader needs to take from
1112 * the head (writes go to the tail). But if a writer is in overwrite
1113 * mode and wraps, it must push the head page forward.
1114 *
1115 * Here lies the problem.
1116 *
1117 * The reader must be careful to replace only the head page, and
1118 * not another one. As described at the top of the file in the
1119 * ASCII art, the reader sets its old page to point to the next
1120 * page after head. It then sets the page after head to point to
1121 * the old reader page. But if the writer moves the head page
1122 * during this operation, the reader could end up with the tail.
1123 *
1124 * We use cmpxchg to help prevent this race. We also do something
1125 * special with the page before head. We set the LSB to 1.
1126 *
1127 * When the writer must push the page forward, it will clear the
1128 * bit that points to the head page, move the head, and then set
1129 * the bit that points to the new head page.
1130 *
1131 * We also don't want an interrupt coming in and moving the head
1132 * page on another writer. Thus we use the second LSB to catch
1133 * that too. Thus:
1134 *
1135 * head->list->prev->next bit 1 bit 0
1136 * ------- -------
1137 * Normal page 0 0
1138 * Points to head page 0 1
1139 * New head page 1 0
1140 *
1141 * Note we can not trust the prev pointer of the head page, because:
1142 *
1143 * +----+ +-----+ +-----+
1144 * | |------>| T |---X--->| N |
1145 * | |<------| | | |
1146 * +----+ +-----+ +-----+
1147 * ^ ^ |
1148 * | +-----+ | |
1149 * +----------| R |----------+ |
1150 * | |<-----------+
1151 * +-----+
1152 *
1153 * Key: ---X--> HEAD flag set in pointer
1154 * T Tail page
1155 * R Reader page
1156 * N Next page
1157 *
1158 * (see __rb_reserve_next() to see where this happens)
1159 *
1160 * What the above shows is that the reader just swapped out
1161 * the reader page with a page in the buffer, but before it
1162 * could make the new header point back to the new page added
1163 * it was preempted by a writer. The writer moved forward onto
1164 * the new page added by the reader and is about to move forward
1165 * again.
1166 *
1167 * You can see, it is legitimate for the previous pointer of
1168 * the head (or any page) not to point back to itself. But only
6167c205 1169 * temporarily.
77ae365e
SR
1170 */
1171
1172#define RB_PAGE_NORMAL 0UL
1173#define RB_PAGE_HEAD 1UL
1174#define RB_PAGE_UPDATE 2UL
1175
1176
1177#define RB_FLAG_MASK 3UL
1178
1179/* PAGE_MOVED is not part of the mask */
1180#define RB_PAGE_MOVED 4UL
1181
1182/*
1183 * rb_list_head - remove any bit
1184 */
1185static struct list_head *rb_list_head(struct list_head *list)
1186{
1187 unsigned long val = (unsigned long)list;
1188
1189 return (struct list_head *)(val & ~RB_FLAG_MASK);
1190}
1191
1192/*
6d3f1e12 1193 * rb_is_head_page - test if the given page is the head page
77ae365e
SR
1194 *
1195 * Because the reader may move the head_page pointer, we can
1196 * not trust what the head page is (it may be pointing to
1197 * the reader page). But if the next page is a header page,
1198 * its flags will be non zero.
1199 */
42b16b3f 1200static inline int
6689bed3 1201rb_is_head_page(struct buffer_page *page, struct list_head *list)
77ae365e
SR
1202{
1203 unsigned long val;
1204
1205 val = (unsigned long)list->next;
1206
1207 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1208 return RB_PAGE_MOVED;
1209
1210 return val & RB_FLAG_MASK;
1211}
1212
1213/*
1214 * rb_is_reader_page
1215 *
1216 * The unique thing about the reader page, is that, if the
1217 * writer is ever on it, the previous pointer never points
1218 * back to the reader page.
1219 */
06ca3209 1220static bool rb_is_reader_page(struct buffer_page *page)
77ae365e
SR
1221{
1222 struct list_head *list = page->list.prev;
1223
1224 return rb_list_head(list->next) != &page->list;
1225}
1226
1227/*
1228 * rb_set_list_to_head - set a list_head to be pointing to head.
1229 */
6689bed3 1230static void rb_set_list_to_head(struct list_head *list)
77ae365e
SR
1231{
1232 unsigned long *ptr;
1233
1234 ptr = (unsigned long *)&list->next;
1235 *ptr |= RB_PAGE_HEAD;
1236 *ptr &= ~RB_PAGE_UPDATE;
1237}
1238
1239/*
1240 * rb_head_page_activate - sets up head page
1241 */
1242static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1243{
1244 struct buffer_page *head;
1245
1246 head = cpu_buffer->head_page;
1247 if (!head)
1248 return;
1249
1250 /*
1251 * Set the previous list pointer to have the HEAD flag.
1252 */
6689bed3 1253 rb_set_list_to_head(head->list.prev);
77ae365e
SR
1254}
1255
1256static void rb_list_head_clear(struct list_head *list)
1257{
1258 unsigned long *ptr = (unsigned long *)&list->next;
1259
1260 *ptr &= ~RB_FLAG_MASK;
1261}
1262
1263/*
6167c205 1264 * rb_head_page_deactivate - clears head page ptr (for free list)
77ae365e
SR
1265 */
1266static void
1267rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1268{
1269 struct list_head *hd;
1270
1271 /* Go through the whole list and clear any pointers found. */
1272 rb_list_head_clear(cpu_buffer->pages);
1273
1274 list_for_each(hd, cpu_buffer->pages)
1275 rb_list_head_clear(hd);
1276}
1277
1278static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1279 struct buffer_page *head,
1280 struct buffer_page *prev,
1281 int old_flag, int new_flag)
1282{
1283 struct list_head *list;
1284 unsigned long val = (unsigned long)&head->list;
1285 unsigned long ret;
1286
1287 list = &prev->list;
1288
1289 val &= ~RB_FLAG_MASK;
1290
08a40816
SR
1291 ret = cmpxchg((unsigned long *)&list->next,
1292 val | old_flag, val | new_flag);
77ae365e
SR
1293
1294 /* check if the reader took the page */
1295 if ((ret & ~RB_FLAG_MASK) != val)
1296 return RB_PAGE_MOVED;
1297
1298 return ret & RB_FLAG_MASK;
1299}
1300
1301static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1302 struct buffer_page *head,
1303 struct buffer_page *prev,
1304 int old_flag)
1305{
1306 return rb_head_page_set(cpu_buffer, head, prev,
1307 old_flag, RB_PAGE_UPDATE);
1308}
1309
1310static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1311 struct buffer_page *head,
1312 struct buffer_page *prev,
1313 int old_flag)
1314{
1315 return rb_head_page_set(cpu_buffer, head, prev,
1316 old_flag, RB_PAGE_HEAD);
1317}
1318
1319static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1320 struct buffer_page *head,
1321 struct buffer_page *prev,
1322 int old_flag)
1323{
1324 return rb_head_page_set(cpu_buffer, head, prev,
1325 old_flag, RB_PAGE_NORMAL);
1326}
1327
6689bed3 1328static inline void rb_inc_page(struct buffer_page **bpage)
77ae365e
SR
1329{
1330 struct list_head *p = rb_list_head((*bpage)->list.next);
1331
1332 *bpage = list_entry(p, struct buffer_page, list);
1333}
1334
1335static struct buffer_page *
1336rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1337{
1338 struct buffer_page *head;
1339 struct buffer_page *page;
1340 struct list_head *list;
1341 int i;
1342
1343 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1344 return NULL;
1345
1346 /* sanity check */
1347 list = cpu_buffer->pages;
1348 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1349 return NULL;
1350
1351 page = head = cpu_buffer->head_page;
1352 /*
1353 * It is possible that the writer moves the header behind
1354 * where we started, and we miss in one loop.
1355 * A second loop should grab the header, but we'll do
1356 * three loops just because I'm paranoid.
1357 */
1358 for (i = 0; i < 3; i++) {
1359 do {
6689bed3 1360 if (rb_is_head_page(page, page->list.prev)) {
77ae365e
SR
1361 cpu_buffer->head_page = page;
1362 return page;
1363 }
6689bed3 1364 rb_inc_page(&page);
77ae365e
SR
1365 } while (page != head);
1366 }
1367
1368 RB_WARN_ON(cpu_buffer, 1);
1369
1370 return NULL;
1371}
1372
1373static int rb_head_page_replace(struct buffer_page *old,
1374 struct buffer_page *new)
1375{
1376 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1377 unsigned long val;
1378 unsigned long ret;
1379
1380 val = *ptr & ~RB_FLAG_MASK;
1381 val |= RB_PAGE_HEAD;
1382
08a40816 1383 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
77ae365e
SR
1384
1385 return ret == val;
1386}
1387
1388/*
1389 * rb_tail_page_update - move the tail page forward
77ae365e 1390 */
70004986 1391static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
77ae365e
SR
1392 struct buffer_page *tail_page,
1393 struct buffer_page *next_page)
1394{
77ae365e
SR
1395 unsigned long old_entries;
1396 unsigned long old_write;
77ae365e
SR
1397
1398 /*
1399 * The tail page now needs to be moved forward.
1400 *
1401 * We need to reset the tail page, but without messing
1402 * with possible erasing of data brought in by interrupts
1403 * that have moved the tail page and are currently on it.
1404 *
1405 * We add a counter to the write field to denote this.
1406 */
1407 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1408 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1409
2c2b0a78 1410 local_inc(&cpu_buffer->pages_touched);
77ae365e
SR
1411 /*
1412 * Just make sure we have seen our old_write and synchronize
1413 * with any interrupts that come in.
1414 */
1415 barrier();
1416
1417 /*
1418 * If the tail page is still the same as what we think
1419 * it is, then it is up to us to update the tail
1420 * pointer.
1421 */
8573636e 1422 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
77ae365e
SR
1423 /* Zero the write counter */
1424 unsigned long val = old_write & ~RB_WRITE_MASK;
1425 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1426
1427 /*
1428 * This will only succeed if an interrupt did
1429 * not come in and change it. In which case, we
1430 * do not want to modify it.
da706d8b
LJ
1431 *
1432 * We add (void) to let the compiler know that we do not care
1433 * about the return value of these functions. We use the
1434 * cmpxchg to only update if an interrupt did not already
1435 * do it for us. If the cmpxchg fails, we don't care.
77ae365e 1436 */
da706d8b
LJ
1437 (void)local_cmpxchg(&next_page->write, old_write, val);
1438 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77ae365e
SR
1439
1440 /*
1441 * No need to worry about races with clearing out the commit.
1442 * it only can increment when a commit takes place. But that
1443 * only happens in the outer most nested commit.
1444 */
1445 local_set(&next_page->page->commit, 0);
1446
70004986
SRRH
1447 /* Again, either we update tail_page or an interrupt does */
1448 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
77ae365e 1449 }
77ae365e
SR
1450}
1451
1452static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1453 struct buffer_page *bpage)
1454{
1455 unsigned long val = (unsigned long)bpage;
1456
1457 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1458 return 1;
1459
1460 return 0;
1461}
1462
1463/**
1464 * rb_check_list - make sure a pointer to a list has the last bits zero
1465 */
1466static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1467 struct list_head *list)
1468{
1469 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1470 return 1;
1471 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1472 return 1;
1473 return 0;
1474}
1475
7a8e76a3 1476/**
d611851b 1477 * rb_check_pages - integrity check of buffer pages
7a8e76a3
SR
1478 * @cpu_buffer: CPU buffer with pages to test
1479 *
c3706f00 1480 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
1481 * been corrupted.
1482 */
1483static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1484{
3adc54fa 1485 struct list_head *head = cpu_buffer->pages;
044fa782 1486 struct buffer_page *bpage, *tmp;
7a8e76a3 1487
308f7eeb
SR
1488 /* Reset the head page if it exists */
1489 if (cpu_buffer->head_page)
1490 rb_set_head_page(cpu_buffer);
1491
77ae365e
SR
1492 rb_head_page_deactivate(cpu_buffer);
1493
3e89c7bb
SR
1494 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1495 return -1;
1496 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1497 return -1;
7a8e76a3 1498
77ae365e
SR
1499 if (rb_check_list(cpu_buffer, head))
1500 return -1;
1501
044fa782 1502 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 1503 if (RB_WARN_ON(cpu_buffer,
044fa782 1504 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
1505 return -1;
1506 if (RB_WARN_ON(cpu_buffer,
044fa782 1507 bpage->list.prev->next != &bpage->list))
3e89c7bb 1508 return -1;
77ae365e
SR
1509 if (rb_check_list(cpu_buffer, &bpage->list))
1510 return -1;
7a8e76a3
SR
1511 }
1512
77ae365e
SR
1513 rb_head_page_activate(cpu_buffer);
1514
7a8e76a3
SR
1515 return 0;
1516}
1517
74e2afc6
QH
1518static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1519 long nr_pages, struct list_head *pages)
7a8e76a3 1520{
044fa782 1521 struct buffer_page *bpage, *tmp;
927e56db
SRV
1522 bool user_thread = current->mm != NULL;
1523 gfp_t mflags;
9b94a8fb 1524 long i;
3adc54fa 1525
927e56db
SRV
1526 /*
1527 * Check if the available memory is there first.
1528 * Note, si_mem_available() only gives us a rough estimate of available
1529 * memory. It may not be accurate. But we don't care, we just want
1530 * to prevent doing any allocation when it is obvious that it is
1531 * not going to succeed.
1532 */
2a872fa4
SRV
1533 i = si_mem_available();
1534 if (i < nr_pages)
1535 return -ENOMEM;
1536
927e56db
SRV
1537 /*
1538 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1539 * gracefully without invoking oom-killer and the system is not
1540 * destabilized.
1541 */
1542 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1543
1544 /*
1545 * If a user thread allocates too much, and si_mem_available()
1546 * reports there's enough memory, even though there is not.
1547 * Make sure the OOM killer kills this thread. This can happen
1548 * even with RETRY_MAYFAIL because another task may be doing
1549 * an allocation after this task has taken all memory.
1550 * This is the task the OOM killer needs to take out during this
1551 * loop, even if it was triggered by an allocation somewhere else.
1552 */
1553 if (user_thread)
1554 set_current_oom_origin();
7a8e76a3 1555 for (i = 0; i < nr_pages; i++) {
7ea59064 1556 struct page *page;
927e56db 1557
044fa782 1558 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
74e2afc6 1559 mflags, cpu_to_node(cpu_buffer->cpu));
044fa782 1560 if (!bpage)
e4c2ce82 1561 goto free_pages;
77ae365e 1562
74e2afc6
QH
1563 rb_check_bpage(cpu_buffer, bpage);
1564
438ced17 1565 list_add(&bpage->list, pages);
77ae365e 1566
74e2afc6 1567 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
7ea59064 1568 if (!page)
7a8e76a3 1569 goto free_pages;
7ea59064 1570 bpage->page = page_address(page);
044fa782 1571 rb_init_page(bpage->page);
927e56db
SRV
1572
1573 if (user_thread && fatal_signal_pending(current))
1574 goto free_pages;
7a8e76a3 1575 }
927e56db
SRV
1576 if (user_thread)
1577 clear_current_oom_origin();
7a8e76a3 1578
438ced17
VN
1579 return 0;
1580
1581free_pages:
1582 list_for_each_entry_safe(bpage, tmp, pages, list) {
1583 list_del_init(&bpage->list);
1584 free_buffer_page(bpage);
1585 }
927e56db
SRV
1586 if (user_thread)
1587 clear_current_oom_origin();
438ced17
VN
1588
1589 return -ENOMEM;
1590}
1591
1592static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
9b94a8fb 1593 unsigned long nr_pages)
438ced17
VN
1594{
1595 LIST_HEAD(pages);
1596
1597 WARN_ON(!nr_pages);
1598
74e2afc6 1599 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
438ced17
VN
1600 return -ENOMEM;
1601
3adc54fa
SR
1602 /*
1603 * The ring buffer page list is a circular list that does not
1604 * start and end with a list head. All page list items point to
1605 * other pages.
1606 */
1607 cpu_buffer->pages = pages.next;
1608 list_del(&pages);
7a8e76a3 1609
438ced17
VN
1610 cpu_buffer->nr_pages = nr_pages;
1611
7a8e76a3
SR
1612 rb_check_pages(cpu_buffer);
1613
1614 return 0;
7a8e76a3
SR
1615}
1616
1617static struct ring_buffer_per_cpu *
13292494 1618rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
7a8e76a3
SR
1619{
1620 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 1621 struct buffer_page *bpage;
7ea59064 1622 struct page *page;
7a8e76a3
SR
1623 int ret;
1624
1625 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1626 GFP_KERNEL, cpu_to_node(cpu));
1627 if (!cpu_buffer)
1628 return NULL;
1629
1630 cpu_buffer->cpu = cpu;
1631 cpu_buffer->buffer = buffer;
5389f6fa 1632 raw_spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 1633 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
edc35bd7 1634 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
83f40318 1635 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
05fdd70d 1636 init_completion(&cpu_buffer->update_done);
15693458 1637 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
f1dc6725 1638 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1e0d6714 1639 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
7a8e76a3 1640
044fa782 1641 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 1642 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1643 if (!bpage)
e4c2ce82
SR
1644 goto fail_free_buffer;
1645
77ae365e
SR
1646 rb_check_bpage(cpu_buffer, bpage);
1647
044fa782 1648 cpu_buffer->reader_page = bpage;
7ea59064
VN
1649 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1650 if (!page)
e4c2ce82 1651 goto fail_free_reader;
7ea59064 1652 bpage->page = page_address(page);
044fa782 1653 rb_init_page(bpage->page);
e4c2ce82 1654
d769041f 1655 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
44b99462 1656 INIT_LIST_HEAD(&cpu_buffer->new_pages);
d769041f 1657
438ced17 1658 ret = rb_allocate_pages(cpu_buffer, nr_pages);
7a8e76a3 1659 if (ret < 0)
d769041f 1660 goto fail_free_reader;
7a8e76a3
SR
1661
1662 cpu_buffer->head_page
3adc54fa 1663 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 1664 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3 1665
77ae365e
SR
1666 rb_head_page_activate(cpu_buffer);
1667
7a8e76a3
SR
1668 return cpu_buffer;
1669
d769041f
SR
1670 fail_free_reader:
1671 free_buffer_page(cpu_buffer->reader_page);
1672
7a8e76a3
SR
1673 fail_free_buffer:
1674 kfree(cpu_buffer);
1675 return NULL;
1676}
1677
1678static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1679{
3adc54fa 1680 struct list_head *head = cpu_buffer->pages;
044fa782 1681 struct buffer_page *bpage, *tmp;
7a8e76a3 1682
d769041f
SR
1683 free_buffer_page(cpu_buffer->reader_page);
1684
77ae365e
SR
1685 rb_head_page_deactivate(cpu_buffer);
1686
3adc54fa
SR
1687 if (head) {
1688 list_for_each_entry_safe(bpage, tmp, head, list) {
1689 list_del_init(&bpage->list);
1690 free_buffer_page(bpage);
1691 }
1692 bpage = list_entry(head, struct buffer_page, list);
044fa782 1693 free_buffer_page(bpage);
7a8e76a3 1694 }
3adc54fa 1695
7a8e76a3
SR
1696 kfree(cpu_buffer);
1697}
1698
1699/**
d611851b 1700 * __ring_buffer_alloc - allocate a new ring_buffer
68814b58 1701 * @size: the size in bytes per cpu that is needed.
7a8e76a3 1702 * @flags: attributes to set for the ring buffer.
59e7cffe 1703 * @key: ring buffer reader_lock_key.
7a8e76a3
SR
1704 *
1705 * Currently the only flag that is available is the RB_FL_OVERWRITE
1706 * flag. This flag means that the buffer will overwrite old data
1707 * when the buffer wraps. If this flag is not set, the buffer will
1708 * drop data when the tail hits the head.
1709 */
13292494 1710struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1f8a6a10 1711 struct lock_class_key *key)
7a8e76a3 1712{
13292494 1713 struct trace_buffer *buffer;
9b94a8fb 1714 long nr_pages;
7a8e76a3 1715 int bsize;
9b94a8fb 1716 int cpu;
b32614c0 1717 int ret;
7a8e76a3
SR
1718
1719 /* keep it in its own cache line */
1720 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1721 GFP_KERNEL);
1722 if (!buffer)
1723 return NULL;
1724
b18cc3de 1725 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
9e01c1b7
RR
1726 goto fail_free_buffer;
1727
438ced17 1728 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
7a8e76a3 1729 buffer->flags = flags;
37886f6a 1730 buffer->clock = trace_clock_local;
1f8a6a10 1731 buffer->reader_lock_key = key;
7a8e76a3 1732
15693458 1733 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
f1dc6725 1734 init_waitqueue_head(&buffer->irq_work.waiters);
15693458 1735
7a8e76a3 1736 /* need at least two pages */
438ced17
VN
1737 if (nr_pages < 2)
1738 nr_pages = 2;
7a8e76a3 1739
7a8e76a3
SR
1740 buffer->cpus = nr_cpu_ids;
1741
1742 bsize = sizeof(void *) * nr_cpu_ids;
1743 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1744 GFP_KERNEL);
1745 if (!buffer->buffers)
9e01c1b7 1746 goto fail_free_cpumask;
7a8e76a3 1747
b32614c0
SAS
1748 cpu = raw_smp_processor_id();
1749 cpumask_set_cpu(cpu, buffer->cpumask);
1750 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1751 if (!buffer->buffers[cpu])
1752 goto fail_free_buffers;
7a8e76a3 1753
b32614c0
SAS
1754 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1755 if (ret < 0)
1756 goto fail_free_buffers;
554f786e 1757
7a8e76a3
SR
1758 mutex_init(&buffer->mutex);
1759
1760 return buffer;
1761
1762 fail_free_buffers:
1763 for_each_buffer_cpu(buffer, cpu) {
1764 if (buffer->buffers[cpu])
1765 rb_free_cpu_buffer(buffer->buffers[cpu]);
1766 }
1767 kfree(buffer->buffers);
1768
9e01c1b7
RR
1769 fail_free_cpumask:
1770 free_cpumask_var(buffer->cpumask);
1771
7a8e76a3
SR
1772 fail_free_buffer:
1773 kfree(buffer);
1774 return NULL;
1775}
1f8a6a10 1776EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
1777
1778/**
1779 * ring_buffer_free - free a ring buffer.
1780 * @buffer: the buffer to free.
1781 */
1782void
13292494 1783ring_buffer_free(struct trace_buffer *buffer)
7a8e76a3
SR
1784{
1785 int cpu;
1786
b32614c0 1787 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
554f786e 1788
7a8e76a3
SR
1789 for_each_buffer_cpu(buffer, cpu)
1790 rb_free_cpu_buffer(buffer->buffers[cpu]);
1791
bd3f0221 1792 kfree(buffer->buffers);
9e01c1b7
RR
1793 free_cpumask_var(buffer->cpumask);
1794
7a8e76a3
SR
1795 kfree(buffer);
1796}
c4f50183 1797EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 1798
13292494 1799void ring_buffer_set_clock(struct trace_buffer *buffer,
37886f6a
SR
1800 u64 (*clock)(void))
1801{
1802 buffer->clock = clock;
1803}
1804
13292494 1805void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
00b41452
TZ
1806{
1807 buffer->time_stamp_abs = abs;
1808}
1809
13292494 1810bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
00b41452
TZ
1811{
1812 return buffer->time_stamp_abs;
1813}
1814
7a8e76a3
SR
1815static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1816
83f40318
VN
1817static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1818{
1819 return local_read(&bpage->entries) & RB_WRITE_MASK;
1820}
1821
1822static inline unsigned long rb_page_write(struct buffer_page *bpage)
1823{
1824 return local_read(&bpage->write) & RB_WRITE_MASK;
1825}
1826
5040b4b7 1827static int
9b94a8fb 1828rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
7a8e76a3 1829{
83f40318
VN
1830 struct list_head *tail_page, *to_remove, *next_page;
1831 struct buffer_page *to_remove_page, *tmp_iter_page;
1832 struct buffer_page *last_page, *first_page;
9b94a8fb 1833 unsigned long nr_removed;
83f40318
VN
1834 unsigned long head_bit;
1835 int page_entries;
1836
1837 head_bit = 0;
7a8e76a3 1838
5389f6fa 1839 raw_spin_lock_irq(&cpu_buffer->reader_lock);
83f40318
VN
1840 atomic_inc(&cpu_buffer->record_disabled);
1841 /*
1842 * We don't race with the readers since we have acquired the reader
1843 * lock. We also don't race with writers after disabling recording.
1844 * This makes it easy to figure out the first and the last page to be
1845 * removed from the list. We unlink all the pages in between including
1846 * the first and last pages. This is done in a busy loop so that we
1847 * lose the least number of traces.
1848 * The pages are freed after we restart recording and unlock readers.
1849 */
1850 tail_page = &cpu_buffer->tail_page->list;
77ae365e 1851
83f40318
VN
1852 /*
1853 * tail page might be on reader page, we remove the next page
1854 * from the ring buffer
1855 */
1856 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1857 tail_page = rb_list_head(tail_page->next);
1858 to_remove = tail_page;
1859
1860 /* start of pages to remove */
1861 first_page = list_entry(rb_list_head(to_remove->next),
1862 struct buffer_page, list);
1863
1864 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1865 to_remove = rb_list_head(to_remove)->next;
1866 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
7a8e76a3 1867 }
7a8e76a3 1868
83f40318 1869 next_page = rb_list_head(to_remove)->next;
7a8e76a3 1870
83f40318
VN
1871 /*
1872 * Now we remove all pages between tail_page and next_page.
1873 * Make sure that we have head_bit value preserved for the
1874 * next page
1875 */
1876 tail_page->next = (struct list_head *)((unsigned long)next_page |
1877 head_bit);
1878 next_page = rb_list_head(next_page);
1879 next_page->prev = tail_page;
1880
1881 /* make sure pages points to a valid page in the ring buffer */
1882 cpu_buffer->pages = next_page;
1883
1884 /* update head page */
1885 if (head_bit)
1886 cpu_buffer->head_page = list_entry(next_page,
1887 struct buffer_page, list);
1888
1889 /*
1890 * change read pointer to make sure any read iterators reset
1891 * themselves
1892 */
1893 cpu_buffer->read = 0;
1894
1895 /* pages are removed, resume tracing and then free the pages */
1896 atomic_dec(&cpu_buffer->record_disabled);
5389f6fa 1897 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
83f40318
VN
1898
1899 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1900
1901 /* last buffer page to remove */
1902 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1903 list);
1904 tmp_iter_page = first_page;
1905
1906 do {
83f36555
VN
1907 cond_resched();
1908
83f40318 1909 to_remove_page = tmp_iter_page;
6689bed3 1910 rb_inc_page(&tmp_iter_page);
83f40318
VN
1911
1912 /* update the counters */
1913 page_entries = rb_page_entries(to_remove_page);
1914 if (page_entries) {
1915 /*
1916 * If something was added to this page, it was full
1917 * since it is not the tail page. So we deduct the
1918 * bytes consumed in ring buffer from here.
48fdc72f 1919 * Increment overrun to account for the lost events.
83f40318 1920 */
48fdc72f 1921 local_add(page_entries, &cpu_buffer->overrun);
83f40318
VN
1922 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1923 }
1924
1925 /*
1926 * We have already removed references to this list item, just
1927 * free up the buffer_page and its page
1928 */
1929 free_buffer_page(to_remove_page);
1930 nr_removed--;
1931
1932 } while (to_remove_page != last_page);
1933
1934 RB_WARN_ON(cpu_buffer, nr_removed);
5040b4b7
VN
1935
1936 return nr_removed == 0;
7a8e76a3
SR
1937}
1938
5040b4b7
VN
1939static int
1940rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1941{
5040b4b7
VN
1942 struct list_head *pages = &cpu_buffer->new_pages;
1943 int retries, success;
7a8e76a3 1944
5389f6fa 1945 raw_spin_lock_irq(&cpu_buffer->reader_lock);
5040b4b7
VN
1946 /*
1947 * We are holding the reader lock, so the reader page won't be swapped
1948 * in the ring buffer. Now we are racing with the writer trying to
1949 * move head page and the tail page.
1950 * We are going to adapt the reader page update process where:
1951 * 1. We first splice the start and end of list of new pages between
1952 * the head page and its previous page.
1953 * 2. We cmpxchg the prev_page->next to point from head page to the
1954 * start of new pages list.
1955 * 3. Finally, we update the head->prev to the end of new list.
1956 *
1957 * We will try this process 10 times, to make sure that we don't keep
1958 * spinning.
1959 */
1960 retries = 10;
1961 success = 0;
1962 while (retries--) {
1963 struct list_head *head_page, *prev_page, *r;
1964 struct list_head *last_page, *first_page;
1965 struct list_head *head_page_with_bit;
77ae365e 1966
5040b4b7 1967 head_page = &rb_set_head_page(cpu_buffer)->list;
54f7be5b
SR
1968 if (!head_page)
1969 break;
5040b4b7
VN
1970 prev_page = head_page->prev;
1971
1972 first_page = pages->next;
1973 last_page = pages->prev;
1974
1975 head_page_with_bit = (struct list_head *)
1976 ((unsigned long)head_page | RB_PAGE_HEAD);
1977
1978 last_page->next = head_page_with_bit;
1979 first_page->prev = prev_page;
1980
1981 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1982
1983 if (r == head_page_with_bit) {
1984 /*
1985 * yay, we replaced the page pointer to our new list,
1986 * now, we just have to update to head page's prev
1987 * pointer to point to end of list
1988 */
1989 head_page->prev = last_page;
1990 success = 1;
1991 break;
1992 }
7a8e76a3 1993 }
7a8e76a3 1994
5040b4b7
VN
1995 if (success)
1996 INIT_LIST_HEAD(pages);
1997 /*
1998 * If we weren't successful in adding in new pages, warn and stop
1999 * tracing
2000 */
2001 RB_WARN_ON(cpu_buffer, !success);
5389f6fa 2002 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
5040b4b7
VN
2003
2004 /* free pages if they weren't inserted */
2005 if (!success) {
2006 struct buffer_page *bpage, *tmp;
2007 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2008 list) {
2009 list_del_init(&bpage->list);
2010 free_buffer_page(bpage);
2011 }
2012 }
2013 return success;
7a8e76a3
SR
2014}
2015
83f40318 2016static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
438ced17 2017{
5040b4b7
VN
2018 int success;
2019
438ced17 2020 if (cpu_buffer->nr_pages_to_update > 0)
5040b4b7 2021 success = rb_insert_pages(cpu_buffer);
438ced17 2022 else
5040b4b7
VN
2023 success = rb_remove_pages(cpu_buffer,
2024 -cpu_buffer->nr_pages_to_update);
83f40318 2025
5040b4b7
VN
2026 if (success)
2027 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
83f40318
VN
2028}
2029
2030static void update_pages_handler(struct work_struct *work)
2031{
2032 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2033 struct ring_buffer_per_cpu, update_pages_work);
2034 rb_update_pages(cpu_buffer);
05fdd70d 2035 complete(&cpu_buffer->update_done);
438ced17
VN
2036}
2037
7a8e76a3
SR
2038/**
2039 * ring_buffer_resize - resize the ring buffer
2040 * @buffer: the buffer to resize.
2041 * @size: the new size.
d611851b 2042 * @cpu_id: the cpu buffer to resize
7a8e76a3 2043 *
7a8e76a3
SR
2044 * Minimum size is 2 * BUF_PAGE_SIZE.
2045 *
83f40318 2046 * Returns 0 on success and < 0 on failure.
7a8e76a3 2047 */
13292494 2048int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
438ced17 2049 int cpu_id)
7a8e76a3
SR
2050{
2051 struct ring_buffer_per_cpu *cpu_buffer;
9b94a8fb 2052 unsigned long nr_pages;
0a1754b2 2053 int cpu, err;
7a8e76a3 2054
ee51a1de
IM
2055 /*
2056 * Always succeed at resizing a non-existent buffer:
2057 */
2058 if (!buffer)
0a1754b2 2059 return 0;
ee51a1de 2060
6a31e1f1
SR
2061 /* Make sure the requested buffer exists */
2062 if (cpu_id != RING_BUFFER_ALL_CPUS &&
2063 !cpumask_test_cpu(cpu_id, buffer->cpumask))
0a1754b2 2064 return 0;
6a31e1f1 2065
59643d15 2066 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
7a8e76a3
SR
2067
2068 /* we need a minimum of two pages */
59643d15
SRRH
2069 if (nr_pages < 2)
2070 nr_pages = 2;
7a8e76a3 2071
83f40318 2072 /* prevent another thread from changing buffer sizes */
7a8e76a3 2073 mutex_lock(&buffer->mutex);
7a8e76a3 2074
07b8b10e 2075
438ced17 2076 if (cpu_id == RING_BUFFER_ALL_CPUS) {
07b8b10e
SRV
2077 /*
2078 * Don't succeed if resizing is disabled, as a reader might be
2079 * manipulating the ring buffer and is expecting a sane state while
2080 * this is true.
2081 */
2082 for_each_buffer_cpu(buffer, cpu) {
2083 cpu_buffer = buffer->buffers[cpu];
2084 if (atomic_read(&cpu_buffer->resize_disabled)) {
2085 err = -EBUSY;
2086 goto out_err_unlock;
2087 }
2088 }
2089
438ced17 2090 /* calculate the pages to update */
7a8e76a3
SR
2091 for_each_buffer_cpu(buffer, cpu) {
2092 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 2093
438ced17
VN
2094 cpu_buffer->nr_pages_to_update = nr_pages -
2095 cpu_buffer->nr_pages;
438ced17
VN
2096 /*
2097 * nothing more to do for removing pages or no update
2098 */
2099 if (cpu_buffer->nr_pages_to_update <= 0)
2100 continue;
d7ec4bfe 2101 /*
438ced17
VN
2102 * to add pages, make sure all new pages can be
2103 * allocated without receiving ENOMEM
d7ec4bfe 2104 */
438ced17 2105 INIT_LIST_HEAD(&cpu_buffer->new_pages);
74e2afc6
QH
2106 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2107 &cpu_buffer->new_pages)) {
438ced17 2108 /* not enough memory for new pages */
83f40318
VN
2109 err = -ENOMEM;
2110 goto out_err;
2111 }
2112 }
2113
99c37d1a 2114 cpus_read_lock();
83f40318
VN
2115 /*
2116 * Fire off all the required work handlers
05fdd70d 2117 * We can't schedule on offline CPUs, but it's not necessary
83f40318
VN
2118 * since we can change their buffer sizes without any race.
2119 */
2120 for_each_buffer_cpu(buffer, cpu) {
2121 cpu_buffer = buffer->buffers[cpu];
05fdd70d 2122 if (!cpu_buffer->nr_pages_to_update)
83f40318
VN
2123 continue;
2124
021c5b34
CM
2125 /* Can't run something on an offline CPU. */
2126 if (!cpu_online(cpu)) {
f5eb5588
SRRH
2127 rb_update_pages(cpu_buffer);
2128 cpu_buffer->nr_pages_to_update = 0;
2129 } else {
05fdd70d
VN
2130 schedule_work_on(cpu,
2131 &cpu_buffer->update_pages_work);
f5eb5588 2132 }
7a8e76a3 2133 }
7a8e76a3 2134
438ced17
VN
2135 /* wait for all the updates to complete */
2136 for_each_buffer_cpu(buffer, cpu) {
2137 cpu_buffer = buffer->buffers[cpu];
05fdd70d 2138 if (!cpu_buffer->nr_pages_to_update)
83f40318
VN
2139 continue;
2140
05fdd70d
VN
2141 if (cpu_online(cpu))
2142 wait_for_completion(&cpu_buffer->update_done);
83f40318 2143 cpu_buffer->nr_pages_to_update = 0;
438ced17 2144 }
83f40318 2145
99c37d1a 2146 cpus_read_unlock();
438ced17
VN
2147 } else {
2148 cpu_buffer = buffer->buffers[cpu_id];
83f40318 2149
438ced17
VN
2150 if (nr_pages == cpu_buffer->nr_pages)
2151 goto out;
7a8e76a3 2152
07b8b10e
SRV
2153 /*
2154 * Don't succeed if resizing is disabled, as a reader might be
2155 * manipulating the ring buffer and is expecting a sane state while
2156 * this is true.
2157 */
2158 if (atomic_read(&cpu_buffer->resize_disabled)) {
2159 err = -EBUSY;
2160 goto out_err_unlock;
2161 }
2162
438ced17
VN
2163 cpu_buffer->nr_pages_to_update = nr_pages -
2164 cpu_buffer->nr_pages;
2165
2166 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2167 if (cpu_buffer->nr_pages_to_update > 0 &&
74e2afc6
QH
2168 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2169 &cpu_buffer->new_pages)) {
83f40318
VN
2170 err = -ENOMEM;
2171 goto out_err;
2172 }
438ced17 2173
99c37d1a 2174 cpus_read_lock();
83f40318 2175
021c5b34
CM
2176 /* Can't run something on an offline CPU. */
2177 if (!cpu_online(cpu_id))
f5eb5588
SRRH
2178 rb_update_pages(cpu_buffer);
2179 else {
83f40318
VN
2180 schedule_work_on(cpu_id,
2181 &cpu_buffer->update_pages_work);
05fdd70d 2182 wait_for_completion(&cpu_buffer->update_done);
f5eb5588 2183 }
83f40318 2184
83f40318 2185 cpu_buffer->nr_pages_to_update = 0;
99c37d1a 2186 cpus_read_unlock();
438ced17 2187 }
7a8e76a3
SR
2188
2189 out:
659f451f
SR
2190 /*
2191 * The ring buffer resize can happen with the ring buffer
2192 * enabled, so that the update disturbs the tracing as little
2193 * as possible. But if the buffer is disabled, we do not need
2194 * to worry about that, and we can take the time to verify
2195 * that the buffer is not corrupt.
2196 */
2197 if (atomic_read(&buffer->record_disabled)) {
2198 atomic_inc(&buffer->record_disabled);
2199 /*
2200 * Even though the buffer was disabled, we must make sure
2201 * that it is truly disabled before calling rb_check_pages.
2202 * There could have been a race between checking
2203 * record_disable and incrementing it.
2204 */
74401729 2205 synchronize_rcu();
659f451f
SR
2206 for_each_buffer_cpu(buffer, cpu) {
2207 cpu_buffer = buffer->buffers[cpu];
2208 rb_check_pages(cpu_buffer);
2209 }
2210 atomic_dec(&buffer->record_disabled);
2211 }
2212
7a8e76a3 2213 mutex_unlock(&buffer->mutex);
0a1754b2 2214 return 0;
7a8e76a3 2215
83f40318 2216 out_err:
438ced17
VN
2217 for_each_buffer_cpu(buffer, cpu) {
2218 struct buffer_page *bpage, *tmp;
83f40318 2219
438ced17 2220 cpu_buffer = buffer->buffers[cpu];
438ced17 2221 cpu_buffer->nr_pages_to_update = 0;
83f40318 2222
438ced17
VN
2223 if (list_empty(&cpu_buffer->new_pages))
2224 continue;
83f40318 2225
438ced17
VN
2226 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2227 list) {
2228 list_del_init(&bpage->list);
2229 free_buffer_page(bpage);
2230 }
7a8e76a3 2231 }
07b8b10e 2232 out_err_unlock:
641d2f63 2233 mutex_unlock(&buffer->mutex);
83f40318 2234 return err;
7a8e76a3 2235}
c4f50183 2236EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 2237
13292494 2238void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
750912fa
DS
2239{
2240 mutex_lock(&buffer->mutex);
2241 if (val)
2242 buffer->flags |= RB_FL_OVERWRITE;
2243 else
2244 buffer->flags &= ~RB_FL_OVERWRITE;
2245 mutex_unlock(&buffer->mutex);
2246}
2247EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2248
2289d567 2249static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 2250{
044fa782 2251 return bpage->page->data + index;
7a8e76a3
SR
2252}
2253
2289d567 2254static __always_inline struct ring_buffer_event *
d769041f 2255rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 2256{
6f807acd
SR
2257 return __rb_page_index(cpu_buffer->reader_page,
2258 cpu_buffer->reader_page->read);
2259}
2260
785888c5 2261static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
7a8e76a3 2262{
785888c5 2263 return local_read(&bpage->page->commit);
7a8e76a3
SR
2264}
2265
785888c5
SRV
2266static struct ring_buffer_event *
2267rb_iter_head_event(struct ring_buffer_iter *iter)
bf41a158 2268{
785888c5
SRV
2269 struct ring_buffer_event *event;
2270 struct buffer_page *iter_head_page = iter->head_page;
2271 unsigned long commit;
2272 unsigned length;
2273
153368ce
SRV
2274 if (iter->head != iter->next_event)
2275 return iter->event;
2276
785888c5
SRV
2277 /*
2278 * When the writer goes across pages, it issues a cmpxchg which
2279 * is a mb(), which will synchronize with the rmb here.
2280 * (see rb_tail_page_update() and __rb_reserve_next())
2281 */
2282 commit = rb_page_commit(iter_head_page);
2283 smp_rmb();
2284 event = __rb_page_index(iter_head_page, iter->head);
2285 length = rb_event_length(event);
2286
2287 /*
2288 * READ_ONCE() doesn't work on functions and we don't want the
2289 * compiler doing any crazy optimizations with length.
2290 */
2291 barrier();
2292
2293 if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
2294 /* Writer corrupted the read? */
2295 goto reset;
2296
2297 memcpy(iter->event, event, length);
2298 /*
2299 * If the page stamp is still the same after this rmb() then the
2300 * event was safely copied without the writer entering the page.
2301 */
2302 smp_rmb();
2303
2304 /* Make sure the page didn't change since we read this */
2305 if (iter->page_stamp != iter_head_page->page->time_stamp ||
2306 commit > rb_page_commit(iter_head_page))
2307 goto reset;
2308
2309 iter->next_event = iter->head + length;
2310 return iter->event;
2311 reset:
2312 /* Reset to the beginning */
2313 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2314 iter->head = 0;
2315 iter->next_event = 0;
c9b7a4a7 2316 iter->missed_events = 1;
785888c5 2317 return NULL;
bf41a158
SR
2318}
2319
25985edc 2320/* Size is determined by what has been committed */
2289d567 2321static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
bf41a158
SR
2322{
2323 return rb_page_commit(bpage);
2324}
2325
2289d567 2326static __always_inline unsigned
bf41a158
SR
2327rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2328{
2329 return rb_page_commit(cpu_buffer->commit_page);
2330}
2331
2289d567 2332static __always_inline unsigned
bf41a158
SR
2333rb_event_index(struct ring_buffer_event *event)
2334{
2335 unsigned long addr = (unsigned long)event;
2336
22f470f8 2337 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
bf41a158
SR
2338}
2339
34a148bf 2340static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
2341{
2342 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2343
2344 /*
2345 * The iterator could be on the reader page (it starts there).
2346 * But the head could have moved, since the reader was
2347 * found. Check for this case and assign the iterator
2348 * to the head page instead of next.
2349 */
2350 if (iter->head_page == cpu_buffer->reader_page)
77ae365e 2351 iter->head_page = rb_set_head_page(cpu_buffer);
d769041f 2352 else
6689bed3 2353 rb_inc_page(&iter->head_page);
d769041f 2354
28e3fc56 2355 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3 2356 iter->head = 0;
785888c5 2357 iter->next_event = 0;
7a8e76a3
SR
2358}
2359
77ae365e
SR
2360/*
2361 * rb_handle_head_page - writer hit the head page
2362 *
2363 * Returns: +1 to retry page
2364 * 0 to continue
2365 * -1 on error
2366 */
2367static int
2368rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2369 struct buffer_page *tail_page,
2370 struct buffer_page *next_page)
2371{
2372 struct buffer_page *new_head;
2373 int entries;
2374 int type;
2375 int ret;
2376
2377 entries = rb_page_entries(next_page);
2378
2379 /*
2380 * The hard part is here. We need to move the head
2381 * forward, and protect against both readers on
2382 * other CPUs and writers coming in via interrupts.
2383 */
2384 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2385 RB_PAGE_HEAD);
2386
2387 /*
2388 * type can be one of four:
2389 * NORMAL - an interrupt already moved it for us
2390 * HEAD - we are the first to get here.
2391 * UPDATE - we are the interrupt interrupting
2392 * a current move.
2393 * MOVED - a reader on another CPU moved the next
2394 * pointer to its reader page. Give up
2395 * and try again.
2396 */
2397
2398 switch (type) {
2399 case RB_PAGE_HEAD:
2400 /*
2401 * We changed the head to UPDATE, thus
2402 * it is our responsibility to update
2403 * the counters.
2404 */
2405 local_add(entries, &cpu_buffer->overrun);
c64e148a 2406 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
77ae365e
SR
2407
2408 /*
2409 * The entries will be zeroed out when we move the
2410 * tail page.
2411 */
2412
2413 /* still more to do */
2414 break;
2415
2416 case RB_PAGE_UPDATE:
2417 /*
2418 * This is an interrupt that interrupt the
2419 * previous update. Still more to do.
2420 */
2421 break;
2422 case RB_PAGE_NORMAL:
2423 /*
2424 * An interrupt came in before the update
2425 * and processed this for us.
2426 * Nothing left to do.
2427 */
2428 return 1;
2429 case RB_PAGE_MOVED:
2430 /*
2431 * The reader is on another CPU and just did
2432 * a swap with our next_page.
2433 * Try again.
2434 */
2435 return 1;
2436 default:
2437 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2438 return -1;
2439 }
2440
2441 /*
2442 * Now that we are here, the old head pointer is
2443 * set to UPDATE. This will keep the reader from
2444 * swapping the head page with the reader page.
2445 * The reader (on another CPU) will spin till
2446 * we are finished.
2447 *
2448 * We just need to protect against interrupts
2449 * doing the job. We will set the next pointer
2450 * to HEAD. After that, we set the old pointer
2451 * to NORMAL, but only if it was HEAD before.
2452 * otherwise we are an interrupt, and only
2453 * want the outer most commit to reset it.
2454 */
2455 new_head = next_page;
6689bed3 2456 rb_inc_page(&new_head);
77ae365e
SR
2457
2458 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2459 RB_PAGE_NORMAL);
2460
2461 /*
2462 * Valid returns are:
2463 * HEAD - an interrupt came in and already set it.
2464 * NORMAL - One of two things:
2465 * 1) We really set it.
2466 * 2) A bunch of interrupts came in and moved
2467 * the page forward again.
2468 */
2469 switch (ret) {
2470 case RB_PAGE_HEAD:
2471 case RB_PAGE_NORMAL:
2472 /* OK */
2473 break;
2474 default:
2475 RB_WARN_ON(cpu_buffer, 1);
2476 return -1;
2477 }
2478
2479 /*
2480 * It is possible that an interrupt came in,
2481 * set the head up, then more interrupts came in
2482 * and moved it again. When we get back here,
2483 * the page would have been set to NORMAL but we
2484 * just set it back to HEAD.
2485 *
2486 * How do you detect this? Well, if that happened
2487 * the tail page would have moved.
2488 */
2489 if (ret == RB_PAGE_NORMAL) {
8573636e
SRRH
2490 struct buffer_page *buffer_tail_page;
2491
2492 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
77ae365e
SR
2493 /*
2494 * If the tail had moved passed next, then we need
2495 * to reset the pointer.
2496 */
8573636e
SRRH
2497 if (buffer_tail_page != tail_page &&
2498 buffer_tail_page != next_page)
77ae365e
SR
2499 rb_head_page_set_normal(cpu_buffer, new_head,
2500 next_page,
2501 RB_PAGE_HEAD);
2502 }
2503
2504 /*
2505 * If this was the outer most commit (the one that
2506 * changed the original pointer from HEAD to UPDATE),
2507 * then it is up to us to reset it to NORMAL.
2508 */
2509 if (type == RB_PAGE_HEAD) {
2510 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2511 tail_page,
2512 RB_PAGE_UPDATE);
2513 if (RB_WARN_ON(cpu_buffer,
2514 ret != RB_PAGE_UPDATE))
2515 return -1;
2516 }
2517
2518 return 0;
2519}
2520
c7b09308
SR
2521static inline void
2522rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
fcc742ea 2523 unsigned long tail, struct rb_event_info *info)
c7b09308 2524{
fcc742ea 2525 struct buffer_page *tail_page = info->tail_page;
c7b09308 2526 struct ring_buffer_event *event;
fcc742ea 2527 unsigned long length = info->length;
c7b09308
SR
2528
2529 /*
2530 * Only the event that crossed the page boundary
2531 * must fill the old tail_page with padding.
2532 */
2533 if (tail >= BUF_PAGE_SIZE) {
b3230c8b
SR
2534 /*
2535 * If the page was filled, then we still need
2536 * to update the real_end. Reset it to zero
2537 * and the reader will ignore it.
2538 */
2539 if (tail == BUF_PAGE_SIZE)
2540 tail_page->real_end = 0;
2541
c7b09308
SR
2542 local_sub(length, &tail_page->write);
2543 return;
2544 }
2545
2546 event = __rb_page_index(tail_page, tail);
2547
c64e148a
VN
2548 /* account for padding bytes */
2549 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2550
ff0ff84a
SR
2551 /*
2552 * Save the original length to the meta data.
2553 * This will be used by the reader to add lost event
2554 * counter.
2555 */
2556 tail_page->real_end = tail;
2557
c7b09308
SR
2558 /*
2559 * If this event is bigger than the minimum size, then
2560 * we need to be careful that we don't subtract the
2561 * write counter enough to allow another writer to slip
2562 * in on this page.
2563 * We put in a discarded commit instead, to make sure
2564 * that this space is not used again.
2565 *
2566 * If we are less than the minimum size, we don't need to
2567 * worry about it.
2568 */
2569 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2570 /* No room for any events */
2571
2572 /* Mark the rest of the page with padding */
2573 rb_event_set_padding(event);
2574
2575 /* Set the write back to the previous setting */
2576 local_sub(length, &tail_page->write);
2577 return;
2578 }
2579
2580 /* Put in a discarded event */
2581 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2582 event->type_len = RINGBUF_TYPE_PADDING;
2583 /* time delta must be non zero */
2584 event->time_delta = 1;
c7b09308
SR
2585
2586 /* Set write to end of buffer */
2587 length = (tail + length) - BUF_PAGE_SIZE;
2588 local_sub(length, &tail_page->write);
2589}
6634ff26 2590
4239c38f
SRRH
2591static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2592
747e94ae
SR
2593/*
2594 * This is the slow path, force gcc not to inline it.
2595 */
2596static noinline struct ring_buffer_event *
6634ff26 2597rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
fcc742ea 2598 unsigned long tail, struct rb_event_info *info)
7a8e76a3 2599{
fcc742ea 2600 struct buffer_page *tail_page = info->tail_page;
5a50e33c 2601 struct buffer_page *commit_page = cpu_buffer->commit_page;
13292494 2602 struct trace_buffer *buffer = cpu_buffer->buffer;
77ae365e
SR
2603 struct buffer_page *next_page;
2604 int ret;
aa20ae84
SR
2605
2606 next_page = tail_page;
2607
6689bed3 2608 rb_inc_page(&next_page);
aa20ae84 2609
aa20ae84
SR
2610 /*
2611 * If for some reason, we had an interrupt storm that made
2612 * it all the way around the buffer, bail, and warn
2613 * about it.
2614 */
2615 if (unlikely(next_page == commit_page)) {
77ae365e 2616 local_inc(&cpu_buffer->commit_overrun);
aa20ae84
SR
2617 goto out_reset;
2618 }
2619
77ae365e
SR
2620 /*
2621 * This is where the fun begins!
2622 *
2623 * We are fighting against races between a reader that
2624 * could be on another CPU trying to swap its reader
2625 * page with the buffer head.
2626 *
2627 * We are also fighting against interrupts coming in and
2628 * moving the head or tail on us as well.
2629 *
2630 * If the next page is the head page then we have filled
2631 * the buffer, unless the commit page is still on the
2632 * reader page.
2633 */
6689bed3 2634 if (rb_is_head_page(next_page, &tail_page->list)) {
aa20ae84 2635
77ae365e
SR
2636 /*
2637 * If the commit is not on the reader page, then
2638 * move the header page.
2639 */
2640 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2641 /*
2642 * If we are not in overwrite mode,
2643 * this is easy, just stop here.
2644 */
884bfe89
SP
2645 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2646 local_inc(&cpu_buffer->dropped_events);
77ae365e 2647 goto out_reset;
884bfe89 2648 }
77ae365e
SR
2649
2650 ret = rb_handle_head_page(cpu_buffer,
2651 tail_page,
2652 next_page);
2653 if (ret < 0)
2654 goto out_reset;
2655 if (ret)
2656 goto out_again;
2657 } else {
2658 /*
2659 * We need to be careful here too. The
2660 * commit page could still be on the reader
2661 * page. We could have a small buffer, and
2662 * have filled up the buffer with events
2663 * from interrupts and such, and wrapped.
2664 *
c6358bac 2665 * Note, if the tail page is also on the
77ae365e
SR
2666 * reader_page, we let it move out.
2667 */
2668 if (unlikely((cpu_buffer->commit_page !=
2669 cpu_buffer->tail_page) &&
2670 (cpu_buffer->commit_page ==
2671 cpu_buffer->reader_page))) {
2672 local_inc(&cpu_buffer->commit_overrun);
2673 goto out_reset;
2674 }
aa20ae84
SR
2675 }
2676 }
2677
70004986 2678 rb_tail_page_update(cpu_buffer, tail_page, next_page);
aa20ae84 2679
77ae365e 2680 out_again:
aa20ae84 2681
fcc742ea 2682 rb_reset_tail(cpu_buffer, tail, info);
aa20ae84 2683
4239c38f
SRRH
2684 /* Commit what we have for now. */
2685 rb_end_commit(cpu_buffer);
2686 /* rb_end_commit() decs committing */
2687 local_inc(&cpu_buffer->committing);
2688
aa20ae84
SR
2689 /* fail and let the caller try again */
2690 return ERR_PTR(-EAGAIN);
2691
45141d46 2692 out_reset:
6f3b3440 2693 /* reset write */
fcc742ea 2694 rb_reset_tail(cpu_buffer, tail, info);
6f3b3440 2695
bf41a158 2696 return NULL;
7a8e76a3
SR
2697}
2698
74e87937
SRV
2699/* Slow path */
2700static struct ring_buffer_event *
dc4e2801 2701rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
9826b273 2702{
dc4e2801
TZ
2703 if (abs)
2704 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2705 else
2706 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
9826b273 2707
dc4e2801
TZ
2708 /* Not the first event on the page, or not delta? */
2709 if (abs || rb_event_index(event)) {
d90fd774
SRRH
2710 event->time_delta = delta & TS_MASK;
2711 event->array[0] = delta >> TS_SHIFT;
2712 } else {
2713 /* nope, just zero it */
2714 event->time_delta = 0;
2715 event->array[0] = 0;
2716 }
a4543a2f 2717
d90fd774
SRRH
2718 return skip_time_extend(event);
2719}
a4543a2f 2720
58fbc3c6
SRV
2721#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2722static inline bool sched_clock_stable(void)
2723{
2724 return true;
2725}
2726#endif
2727
74e87937 2728static void
58fbc3c6
SRV
2729rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2730 struct rb_event_info *info)
2731{
2732 u64 write_stamp;
2733
29ce2451 2734 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
58fbc3c6
SRV
2735 (unsigned long long)info->delta,
2736 (unsigned long long)info->ts,
2737 (unsigned long long)info->before,
2738 (unsigned long long)info->after,
2739 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2740 sched_clock_stable() ? "" :
2741 "If you just came from a suspend/resume,\n"
2742 "please switch to the trace global clock:\n"
2743 " echo global > /sys/kernel/debug/tracing/trace_clock\n"
2744 "or add trace_clock=global to the kernel command line\n");
2745}
2746
74e87937
SRV
2747static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2748 struct ring_buffer_event **event,
2749 struct rb_event_info *info,
2750 u64 *delta,
2751 unsigned int *length)
2752{
2753 bool abs = info->add_timestamp &
2754 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2755
29ce2451
SRV
2756 if (unlikely(info->delta > (1ULL << 59))) {
2757 /* did the clock go backwards */
2758 if (info->before == info->after && info->before > info->ts) {
2759 /* not interrupted */
2760 static int once;
2761
2762 /*
2763 * This is possible with a recalibrating of the TSC.
2764 * Do not produce a call stack, but just report it.
2765 */
2766 if (!once) {
2767 once++;
2768 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2769 info->before, info->ts);
2770 }
2771 } else
2772 rb_check_timestamp(cpu_buffer, info);
2773 if (!abs)
2774 info->delta = 0;
2775 }
74e87937
SRV
2776 *event = rb_add_time_stamp(*event, info->delta, abs);
2777 *length -= RB_LEN_TIME_EXTEND;
2778 *delta = 0;
2779}
2780
d90fd774
SRRH
2781/**
2782 * rb_update_event - update event type and data
cfc585a4 2783 * @cpu_buffer: The per cpu buffer of the @event
d90fd774 2784 * @event: the event to update
cfc585a4 2785 * @info: The info to update the @event with (contains length and delta)
d90fd774 2786 *
cfc585a4 2787 * Update the type and data fields of the @event. The length
d90fd774
SRRH
2788 * is the actual size that is written to the ring buffer,
2789 * and with this, we can determine what to place into the
2790 * data field.
2791 */
b7dc42fd 2792static void
d90fd774
SRRH
2793rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2794 struct ring_buffer_event *event,
2795 struct rb_event_info *info)
2796{
2797 unsigned length = info->length;
2798 u64 delta = info->delta;
8672e494
SRV
2799 unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2800
a948c69d 2801 if (!WARN_ON_ONCE(nest >= MAX_NEST))
8672e494 2802 cpu_buffer->event_stamp[nest] = info->ts;
a4543a2f
SRRH
2803
2804 /*
d90fd774 2805 * If we need to add a timestamp, then we
6167c205 2806 * add it to the start of the reserved space.
a4543a2f 2807 */
74e87937
SRV
2808 if (unlikely(info->add_timestamp))
2809 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
a4543a2f 2810
d90fd774
SRRH
2811 event->time_delta = delta;
2812 length -= RB_EVNT_HDR_SIZE;
adab66b7 2813 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
d90fd774
SRRH
2814 event->type_len = 0;
2815 event->array[0] = length;
2816 } else
2817 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2818}
2819
2820static unsigned rb_calculate_event_length(unsigned length)
2821{
2822 struct ring_buffer_event event; /* Used only for sizeof array */
2823
2824 /* zero length can cause confusions */
2825 if (!length)
2826 length++;
2827
adab66b7 2828 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
d90fd774
SRRH
2829 length += sizeof(event.array[0]);
2830
2831 length += RB_EVNT_HDR_SIZE;
adab66b7 2832 length = ALIGN(length, RB_ARCH_ALIGNMENT);
d90fd774
SRRH
2833
2834 /*
2835 * In case the time delta is larger than the 27 bits for it
2836 * in the header, we need to add a timestamp. If another
2837 * event comes in when trying to discard this one to increase
2838 * the length, then the timestamp will be added in the allocated
2839 * space of this event. If length is bigger than the size needed
2840 * for the TIME_EXTEND, then padding has to be used. The events
2841 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2842 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2843 * As length is a multiple of 4, we only need to worry if it
2844 * is 12 (RB_LEN_TIME_EXTEND + 4).
2845 */
2846 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2847 length += RB_ALIGNMENT;
2848
2849 return length;
2850}
2851
a389d86f
SRV
2852static u64 rb_time_delta(struct ring_buffer_event *event)
2853{
2854 switch (event->type_len) {
2855 case RINGBUF_TYPE_PADDING:
2856 return 0;
2857
2858 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 2859 return rb_event_time_stamp(event);
a389d86f
SRV
2860
2861 case RINGBUF_TYPE_TIME_STAMP:
2862 return 0;
2863
2864 case RINGBUF_TYPE_DATA:
2865 return event->time_delta;
2866 default:
2867 return 0;
2868 }
d90fd774 2869}
d90fd774
SRRH
2870
2871static inline int
2872rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2873 struct ring_buffer_event *event)
2874{
2875 unsigned long new_index, old_index;
2876 struct buffer_page *bpage;
2877 unsigned long index;
2878 unsigned long addr;
a389d86f
SRV
2879 u64 write_stamp;
2880 u64 delta;
d90fd774
SRRH
2881
2882 new_index = rb_event_index(event);
2883 old_index = new_index + rb_event_ts_length(event);
2884 addr = (unsigned long)event;
2885 addr &= PAGE_MASK;
2886
8573636e 2887 bpage = READ_ONCE(cpu_buffer->tail_page);
d90fd774 2888
a389d86f
SRV
2889 delta = rb_time_delta(event);
2890
10464b4a
SRV
2891 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
2892 return 0;
a389d86f
SRV
2893
2894 /* Make sure the write stamp is read before testing the location */
2895 barrier();
2896
d90fd774
SRRH
2897 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2898 unsigned long write_mask =
2899 local_read(&bpage->write) & ~RB_WRITE_MASK;
2900 unsigned long event_length = rb_event_length(event);
a389d86f 2901
a389d86f 2902 /* Something came in, can't discard */
10464b4a
SRV
2903 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
2904 write_stamp, write_stamp - delta))
a389d86f
SRV
2905 return 0;
2906
6f6be606
SRV
2907 /*
2908 * It's possible that the event time delta is zero
2909 * (has the same time stamp as the previous event)
2910 * in which case write_stamp and before_stamp could
2911 * be the same. In such a case, force before_stamp
2912 * to be different than write_stamp. It doesn't
2913 * matter what it is, as long as its different.
2914 */
2915 if (!delta)
2916 rb_time_set(&cpu_buffer->before_stamp, 0);
2917
a389d86f
SRV
2918 /*
2919 * If an event were to come in now, it would see that the
2920 * write_stamp and the before_stamp are different, and assume
2921 * that this event just added itself before updating
2922 * the write stamp. The interrupting event will fix the
2923 * write stamp for us, and use the before stamp as its delta.
2924 */
2925
d90fd774
SRRH
2926 /*
2927 * This is on the tail page. It is possible that
2928 * a write could come in and move the tail page
2929 * and write to the next page. That is fine
2930 * because we just shorten what is on this page.
2931 */
2932 old_index += write_mask;
2933 new_index += write_mask;
2934 index = local_cmpxchg(&bpage->write, old_index, new_index);
2935 if (index == old_index) {
2936 /* update counters */
2937 local_sub(event_length, &cpu_buffer->entries_bytes);
2938 return 1;
2939 }
2940 }
2941
2942 /* could not discard */
2943 return 0;
2944}
2945
2946static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2947{
2948 local_inc(&cpu_buffer->committing);
2949 local_inc(&cpu_buffer->commits);
2950}
2951
38e11df1 2952static __always_inline void
d90fd774
SRRH
2953rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2954{
2955 unsigned long max_count;
2956
2957 /*
2958 * We only race with interrupts and NMIs on this CPU.
2959 * If we own the commit event, then we can commit
2960 * all others that interrupted us, since the interruptions
2961 * are in stack format (they finish before they come
2962 * back to us). This allows us to do a simple loop to
2963 * assign the commit to the tail.
2964 */
2965 again:
2966 max_count = cpu_buffer->nr_pages * 100;
2967
8573636e 2968 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
d90fd774
SRRH
2969 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2970 return;
2971 if (RB_WARN_ON(cpu_buffer,
2972 rb_is_reader_page(cpu_buffer->tail_page)))
2973 return;
2974 local_set(&cpu_buffer->commit_page->page->commit,
2975 rb_page_write(cpu_buffer->commit_page));
6689bed3 2976 rb_inc_page(&cpu_buffer->commit_page);
d90fd774
SRRH
2977 /* add barrier to keep gcc from optimizing too much */
2978 barrier();
2979 }
2980 while (rb_commit_index(cpu_buffer) !=
2981 rb_page_write(cpu_buffer->commit_page)) {
2982
2983 local_set(&cpu_buffer->commit_page->page->commit,
2984 rb_page_write(cpu_buffer->commit_page));
2985 RB_WARN_ON(cpu_buffer,
2986 local_read(&cpu_buffer->commit_page->page->commit) &
2987 ~RB_WRITE_MASK);
2988 barrier();
2989 }
2990
2991 /* again, keep gcc from optimizing */
2992 barrier();
2993
2994 /*
2995 * If an interrupt came in just after the first while loop
2996 * and pushed the tail page forward, we will be left with
2997 * a dangling commit that will never go forward.
2998 */
8573636e 2999 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
d90fd774
SRRH
3000 goto again;
3001}
3002
38e11df1 3003static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
d90fd774
SRRH
3004{
3005 unsigned long commits;
3006
3007 if (RB_WARN_ON(cpu_buffer,
3008 !local_read(&cpu_buffer->committing)))
3009 return;
3010
3011 again:
3012 commits = local_read(&cpu_buffer->commits);
3013 /* synchronize with interrupts */
3014 barrier();
3015 if (local_read(&cpu_buffer->committing) == 1)
3016 rb_set_commit_to_write(cpu_buffer);
3017
3018 local_dec(&cpu_buffer->committing);
3019
3020 /* synchronize with interrupts */
3021 barrier();
3022
3023 /*
3024 * Need to account for interrupts coming in between the
3025 * updating of the commit page and the clearing of the
3026 * committing counter.
3027 */
3028 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3029 !local_read(&cpu_buffer->committing)) {
3030 local_inc(&cpu_buffer->committing);
3031 goto again;
3032 }
3033}
3034
3035static inline void rb_event_discard(struct ring_buffer_event *event)
3036{
dc4e2801 3037 if (extended_time(event))
d90fd774
SRRH
3038 event = skip_time_extend(event);
3039
3040 /* array[0] holds the actual length for the discarded event */
3041 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3042 event->type_len = RINGBUF_TYPE_PADDING;
3043 /* time delta must be non zero */
3044 if (!event->time_delta)
3045 event->time_delta = 1;
3046}
3047
d90fd774
SRRH
3048static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
3049 struct ring_buffer_event *event)
3050{
3051 local_inc(&cpu_buffer->entries);
d90fd774
SRRH
3052 rb_end_commit(cpu_buffer);
3053}
3054
3055static __always_inline void
13292494 3056rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
d90fd774 3057{
03329f99
SRV
3058 size_t nr_pages;
3059 size_t dirty;
3060 size_t full;
d90fd774
SRRH
3061
3062 if (buffer->irq_work.waiters_pending) {
3063 buffer->irq_work.waiters_pending = false;
3064 /* irq_work_queue() supplies it's own memory barriers */
3065 irq_work_queue(&buffer->irq_work.work);
3066 }
3067
3068 if (cpu_buffer->irq_work.waiters_pending) {
3069 cpu_buffer->irq_work.waiters_pending = false;
3070 /* irq_work_queue() supplies it's own memory barriers */
3071 irq_work_queue(&cpu_buffer->irq_work.work);
3072 }
3073
03329f99
SRV
3074 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3075 return;
d90fd774 3076
03329f99
SRV
3077 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3078 return;
2c2b0a78 3079
03329f99
SRV
3080 if (!cpu_buffer->irq_work.full_waiters_pending)
3081 return;
2c2b0a78 3082
03329f99
SRV
3083 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3084
3085 full = cpu_buffer->shortest_full;
3086 nr_pages = cpu_buffer->nr_pages;
3087 dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
3088 if (full && nr_pages && (dirty * 100) <= full * nr_pages)
3089 return;
3090
3091 cpu_buffer->irq_work.wakeup_full = true;
3092 cpu_buffer->irq_work.full_waiters_pending = false;
3093 /* irq_work_queue() supplies it's own memory barriers */
3094 irq_work_queue(&cpu_buffer->irq_work.work);
d90fd774
SRRH
3095}
3096
28575c61
SRV
3097#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3098# define do_ring_buffer_record_recursion() \
3099 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3100#else
3101# define do_ring_buffer_record_recursion() do { } while (0)
3102#endif
3103
d90fd774
SRRH
3104/*
3105 * The lock and unlock are done within a preempt disable section.
3106 * The current_context per_cpu variable can only be modified
3107 * by the current task between lock and unlock. But it can
a0e3a18f
SRV
3108 * be modified more than once via an interrupt. To pass this
3109 * information from the lock to the unlock without having to
3110 * access the 'in_interrupt()' functions again (which do show
3111 * a bit of overhead in something as critical as function tracing,
3112 * we use a bitmask trick.
d90fd774 3113 *
b02414c8
SRV
3114 * bit 1 = NMI context
3115 * bit 2 = IRQ context
3116 * bit 3 = SoftIRQ context
3117 * bit 4 = normal context.
d90fd774 3118 *
a0e3a18f
SRV
3119 * This works because this is the order of contexts that can
3120 * preempt other contexts. A SoftIRQ never preempts an IRQ
3121 * context.
3122 *
3123 * When the context is determined, the corresponding bit is
3124 * checked and set (if it was set, then a recursion of that context
3125 * happened).
3126 *
3127 * On unlock, we need to clear this bit. To do so, just subtract
3128 * 1 from the current_context and AND it to itself.
3129 *
3130 * (binary)
3131 * 101 - 1 = 100
3132 * 101 & 100 = 100 (clearing bit zero)
3133 *
3134 * 1010 - 1 = 1001
3135 * 1010 & 1001 = 1000 (clearing bit 1)
3136 *
3137 * The least significant bit can be cleared this way, and it
3138 * just so happens that it is the same bit corresponding to
3139 * the current context.
b02414c8
SRV
3140 *
3141 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3142 * is set when a recursion is detected at the current context, and if
3143 * the TRANSITION bit is already set, it will fail the recursion.
3144 * This is needed because there's a lag between the changing of
3145 * interrupt context and updating the preempt count. In this case,
3146 * a false positive will be found. To handle this, one extra recursion
3147 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3148 * bit is already set, then it is considered a recursion and the function
3149 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3150 *
3151 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3152 * to be cleared. Even if it wasn't the context that set it. That is,
3153 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3154 * is called before preempt_count() is updated, since the check will
3155 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3156 * NMI then comes in, it will set the NMI bit, but when the NMI code
f2cc020d 3157 * does the trace_recursive_unlock() it will clear the TRANSITION bit
b02414c8
SRV
3158 * and leave the NMI bit set. But this is fine, because the interrupt
3159 * code that set the TRANSITION bit will then clear the NMI bit when it
3160 * calls trace_recursive_unlock(). If another NMI comes in, it will
3161 * set the TRANSITION bit and continue.
3162 *
3163 * Note: The TRANSITION bit only handles a single transition between context.
d90fd774
SRRH
3164 */
3165
3166static __always_inline int
3167trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3168{
a0e3a18f
SRV
3169 unsigned int val = cpu_buffer->current_context;
3170 unsigned long pc = preempt_count();
3171 int bit;
3172
3173 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
3174 bit = RB_CTX_NORMAL;
3175 else
3176 bit = pc & NMI_MASK ? RB_CTX_NMI :
0164e0d7 3177 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
a0e3a18f 3178
b02414c8
SRV
3179 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3180 /*
3181 * It is possible that this was called by transitioning
3182 * between interrupt context, and preempt_count() has not
3183 * been updated yet. In this case, use the TRANSITION bit.
3184 */
3185 bit = RB_CTX_TRANSITION;
28575c61
SRV
3186 if (val & (1 << (bit + cpu_buffer->nest))) {
3187 do_ring_buffer_record_recursion();
b02414c8 3188 return 1;
28575c61 3189 }
b02414c8 3190 }
d90fd774 3191
8e012066 3192 val |= (1 << (bit + cpu_buffer->nest));
a0e3a18f 3193 cpu_buffer->current_context = val;
d90fd774
SRRH
3194
3195 return 0;
3196}
3197
3198static __always_inline void
3199trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3200{
8e012066
SRV
3201 cpu_buffer->current_context &=
3202 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3203}
3204
b02414c8
SRV
3205/* The recursive locking above uses 5 bits */
3206#define NESTED_BITS 5
8e012066
SRV
3207
3208/**
3209 * ring_buffer_nest_start - Allow to trace while nested
3210 * @buffer: The ring buffer to modify
3211 *
6167c205 3212 * The ring buffer has a safety mechanism to prevent recursion.
8e012066
SRV
3213 * But there may be a case where a trace needs to be done while
3214 * tracing something else. In this case, calling this function
3215 * will allow this function to nest within a currently active
3216 * ring_buffer_lock_reserve().
3217 *
3218 * Call this function before calling another ring_buffer_lock_reserve() and
3219 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3220 */
13292494 3221void ring_buffer_nest_start(struct trace_buffer *buffer)
8e012066
SRV
3222{
3223 struct ring_buffer_per_cpu *cpu_buffer;
3224 int cpu;
3225
3226 /* Enabled by ring_buffer_nest_end() */
3227 preempt_disable_notrace();
3228 cpu = raw_smp_processor_id();
3229 cpu_buffer = buffer->buffers[cpu];
6167c205 3230 /* This is the shift value for the above recursive locking */
8e012066
SRV
3231 cpu_buffer->nest += NESTED_BITS;
3232}
3233
3234/**
3235 * ring_buffer_nest_end - Allow to trace while nested
3236 * @buffer: The ring buffer to modify
3237 *
3238 * Must be called after ring_buffer_nest_start() and after the
3239 * ring_buffer_unlock_commit().
3240 */
13292494 3241void ring_buffer_nest_end(struct trace_buffer *buffer)
8e012066
SRV
3242{
3243 struct ring_buffer_per_cpu *cpu_buffer;
3244 int cpu;
3245
3246 /* disabled by ring_buffer_nest_start() */
3247 cpu = raw_smp_processor_id();
3248 cpu_buffer = buffer->buffers[cpu];
6167c205 3249 /* This is the shift value for the above recursive locking */
8e012066
SRV
3250 cpu_buffer->nest -= NESTED_BITS;
3251 preempt_enable_notrace();
d90fd774
SRRH
3252}
3253
3254/**
3255 * ring_buffer_unlock_commit - commit a reserved
3256 * @buffer: The buffer to commit to
3257 * @event: The event pointer to commit.
3258 *
3259 * This commits the data to the ring buffer, and releases any locks held.
3260 *
3261 * Must be paired with ring_buffer_lock_reserve.
3262 */
13292494 3263int ring_buffer_unlock_commit(struct trace_buffer *buffer,
d90fd774
SRRH
3264 struct ring_buffer_event *event)
3265{
3266 struct ring_buffer_per_cpu *cpu_buffer;
3267 int cpu = raw_smp_processor_id();
3268
3269 cpu_buffer = buffer->buffers[cpu];
3270
3271 rb_commit(cpu_buffer, event);
3272
3273 rb_wakeups(buffer, cpu_buffer);
3274
3275 trace_recursive_unlock(cpu_buffer);
3276
3277 preempt_enable_notrace();
3278
3279 return 0;
3280}
3281EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3282
5b7be9c7
SRV
3283/* Special value to validate all deltas on a page. */
3284#define CHECK_FULL_PAGE 1L
3285
3286#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3287static void dump_buffer_page(struct buffer_data_page *bpage,
3288 struct rb_event_info *info,
3289 unsigned long tail)
3290{
3291 struct ring_buffer_event *event;
3292 u64 ts, delta;
3293 int e;
3294
3295 ts = bpage->time_stamp;
3296 pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
3297
3298 for (e = 0; e < tail; e += rb_event_length(event)) {
3299
3300 event = (struct ring_buffer_event *)(bpage->data + e);
3301
3302 switch (event->type_len) {
3303
3304 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 3305 delta = rb_event_time_stamp(event);
5b7be9c7
SRV
3306 ts += delta;
3307 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta);
3308 break;
3309
3310 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 3311 delta = rb_event_time_stamp(event);
5b7be9c7
SRV
3312 ts = delta;
3313 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta);
3314 break;
3315
3316 case RINGBUF_TYPE_PADDING:
3317 ts += event->time_delta;
3318 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta);
3319 break;
3320
3321 case RINGBUF_TYPE_DATA:
3322 ts += event->time_delta;
3323 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta);
3324 break;
3325
3326 default:
3327 break;
3328 }
3329 }
3330}
3331
3332static DEFINE_PER_CPU(atomic_t, checking);
3333static atomic_t ts_dump;
3334
3335/*
3336 * Check if the current event time stamp matches the deltas on
3337 * the buffer page.
3338 */
3339static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3340 struct rb_event_info *info,
3341 unsigned long tail)
3342{
3343 struct ring_buffer_event *event;
3344 struct buffer_data_page *bpage;
3345 u64 ts, delta;
3346 bool full = false;
3347 int e;
3348
3349 bpage = info->tail_page->page;
3350
3351 if (tail == CHECK_FULL_PAGE) {
3352 full = true;
3353 tail = local_read(&bpage->commit);
3354 } else if (info->add_timestamp &
3355 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3356 /* Ignore events with absolute time stamps */
3357 return;
3358 }
3359
3360 /*
3361 * Do not check the first event (skip possible extends too).
3362 * Also do not check if previous events have not been committed.
3363 */
3364 if (tail <= 8 || tail > local_read(&bpage->commit))
3365 return;
3366
3367 /*
3368 * If this interrupted another event,
3369 */
3370 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3371 goto out;
3372
3373 ts = bpage->time_stamp;
3374
3375 for (e = 0; e < tail; e += rb_event_length(event)) {
3376
3377 event = (struct ring_buffer_event *)(bpage->data + e);
3378
3379 switch (event->type_len) {
3380
3381 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 3382 delta = rb_event_time_stamp(event);
5b7be9c7
SRV
3383 ts += delta;
3384 break;
3385
3386 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 3387 delta = rb_event_time_stamp(event);
5b7be9c7
SRV
3388 ts = delta;
3389 break;
3390
3391 case RINGBUF_TYPE_PADDING:
3392 if (event->time_delta == 1)
3393 break;
957cdcd9 3394 fallthrough;
5b7be9c7
SRV
3395 case RINGBUF_TYPE_DATA:
3396 ts += event->time_delta;
3397 break;
3398
3399 default:
3400 RB_WARN_ON(cpu_buffer, 1);
3401 }
3402 }
3403 if ((full && ts > info->ts) ||
3404 (!full && ts + info->delta != info->ts)) {
3405 /* If another report is happening, ignore this one */
3406 if (atomic_inc_return(&ts_dump) != 1) {
3407 atomic_dec(&ts_dump);
3408 goto out;
3409 }
3410 atomic_inc(&cpu_buffer->record_disabled);
6549de1f
SRV
3411 /* There's some cases in boot up that this can happen */
3412 WARN_ON_ONCE(system_state != SYSTEM_BOOTING);
3413 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n",
3414 cpu_buffer->cpu,
3415 ts + info->delta, info->ts, info->delta,
3416 info->before, info->after,
3417 full ? " (full)" : "");
5b7be9c7
SRV
3418 dump_buffer_page(bpage, info, tail);
3419 atomic_dec(&ts_dump);
3420 /* Do not re-enable checking */
3421 return;
3422 }
3423out:
3424 atomic_dec(this_cpu_ptr(&checking));
3425}
3426#else
3427static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3428 struct rb_event_info *info,
3429 unsigned long tail)
3430{
3431}
3432#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3433
6634ff26
SR
3434static struct ring_buffer_event *
3435__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
fcc742ea 3436 struct rb_event_info *info)
6634ff26 3437{
6634ff26 3438 struct ring_buffer_event *event;
fcc742ea 3439 struct buffer_page *tail_page;
a389d86f 3440 unsigned long tail, write, w;
10464b4a
SRV
3441 bool a_ok;
3442 bool b_ok;
69d1b839 3443
8573636e
SRRH
3444 /* Don't let the compiler play games with cpu_buffer->tail_page */
3445 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
a389d86f
SRV
3446
3447 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
3448 barrier();
58fbc3c6
SRV
3449 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3450 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
a389d86f
SRV
3451 barrier();
3452 info->ts = rb_time_stamp(cpu_buffer->buffer);
3453
58fbc3c6 3454 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
a389d86f 3455 info->delta = info->ts;
a389d86f 3456 } else {
58fbc3c6
SRV
3457 /*
3458 * If interrupting an event time update, we may need an
3459 * absolute timestamp.
3460 * Don't bother if this is the start of a new page (w == 0).
3461 */
3462 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
3463 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3464 info->length += RB_LEN_TIME_EXTEND;
3465 } else {
3466 info->delta = info->ts - info->after;
3467 if (unlikely(test_time_stamp(info->delta))) {
3468 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3469 info->length += RB_LEN_TIME_EXTEND;
3470 }
10464b4a 3471 }
7c4b4a51 3472 }
b7dc42fd 3473
10464b4a 3474 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts);
a389d86f
SRV
3475
3476 /*C*/ write = local_add_return(info->length, &tail_page->write);
77ae365e
SR
3477
3478 /* set write to only the index of the write */
3479 write &= RB_WRITE_MASK;
a389d86f 3480
fcc742ea 3481 tail = write - info->length;
6634ff26 3482
a389d86f
SRV
3483 /* See if we shot pass the end of this buffer page */
3484 if (unlikely(write > BUF_PAGE_SIZE)) {
68e10d5f
SRV
3485 /* before and after may now different, fix it up*/
3486 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3487 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3488 if (a_ok && b_ok && info->before != info->after)
3489 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
3490 info->before, info->after);
5b7be9c7
SRV
3491 if (a_ok && b_ok)
3492 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
a389d86f
SRV
3493 return rb_move_tail(cpu_buffer, tail, info);
3494 }
3495
3496 if (likely(tail == w)) {
3497 u64 save_before;
10464b4a 3498 bool s_ok;
a389d86f
SRV
3499
3500 /* Nothing interrupted us between A and C */
10464b4a 3501 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
a389d86f 3502 barrier();
10464b4a
SRV
3503 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
3504 RB_WARN_ON(cpu_buffer, !s_ok);
7c4b4a51
SRV
3505 if (likely(!(info->add_timestamp &
3506 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
a389d86f 3507 /* This did not interrupt any time update */
58fbc3c6 3508 info->delta = info->ts - info->after;
a389d86f 3509 else
82db909e 3510 /* Just use full timestamp for interrupting event */
a389d86f
SRV
3511 info->delta = info->ts;
3512 barrier();
5b7be9c7 3513 check_buffer(cpu_buffer, info, tail);
a389d86f
SRV
3514 if (unlikely(info->ts != save_before)) {
3515 /* SLOW PATH - Interrupted between C and E */
3516
58fbc3c6 3517 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
10464b4a
SRV
3518 RB_WARN_ON(cpu_buffer, !a_ok);
3519
a389d86f 3520 /* Write stamp must only go forward */
58fbc3c6 3521 if (save_before > info->after) {
a389d86f
SRV
3522 /*
3523 * We do not care about the result, only that
3524 * it gets updated atomically.
3525 */
58fbc3c6
SRV
3526 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3527 info->after, save_before);
a389d86f
SRV
3528 }
3529 }
3530 } else {
3531 u64 ts;
3532 /* SLOW PATH - Interrupted between A and C */
58fbc3c6 3533 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
10464b4a
SRV
3534 /* Was interrupted before here, write_stamp must be valid */
3535 RB_WARN_ON(cpu_buffer, !a_ok);
a389d86f
SRV
3536 ts = rb_time_stamp(cpu_buffer->buffer);
3537 barrier();
3538 /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
8785f51a
AR
3539 info->after < ts &&
3540 rb_time_cmpxchg(&cpu_buffer->write_stamp,
3541 info->after, ts)) {
a389d86f 3542 /* Nothing came after this event between C and E */
58fbc3c6 3543 info->delta = ts - info->after;
a389d86f
SRV
3544 } else {
3545 /*
82db909e 3546 * Interrupted between C and E:
a389d86f
SRV
3547 * Lost the previous events time stamp. Just set the
3548 * delta to zero, and this will be the same time as
3549 * the event this event interrupted. And the events that
3550 * came after this will still be correct (as they would
3551 * have built their delta on the previous event.
3552 */
3553 info->delta = 0;
3554 }
8672e494 3555 info->ts = ts;
7c4b4a51 3556 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
a389d86f
SRV
3557 }
3558
6634ff26 3559 /*
a4543a2f 3560 * If this is the first commit on the page, then it has the same
b7dc42fd 3561 * timestamp as the page itself.
6634ff26 3562 */
7c4b4a51
SRV
3563 if (unlikely(!tail && !(info->add_timestamp &
3564 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
a4543a2f
SRRH
3565 info->delta = 0;
3566
b7dc42fd
SRRH
3567 /* We reserved something on the buffer */
3568
3569 event = __rb_page_index(tail_page, tail);
a4543a2f
SRRH
3570 rb_update_event(cpu_buffer, event, info);
3571
3572 local_inc(&tail_page->entries);
6634ff26 3573
b7dc42fd
SRRH
3574 /*
3575 * If this is the first commit on the page, then update
3576 * its timestamp.
3577 */
75b21c6d 3578 if (unlikely(!tail))
b7dc42fd
SRRH
3579 tail_page->page->time_stamp = info->ts;
3580
c64e148a 3581 /* account for these added bytes */
fcc742ea 3582 local_add(info->length, &cpu_buffer->entries_bytes);
c64e148a 3583
6634ff26
SR
3584 return event;
3585}
3586
fa7ffb39 3587static __always_inline struct ring_buffer_event *
13292494 3588rb_reserve_next_event(struct trace_buffer *buffer,
62f0b3eb 3589 struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 3590 unsigned long length)
7a8e76a3
SR
3591{
3592 struct ring_buffer_event *event;
fcc742ea 3593 struct rb_event_info info;
818e3dd3 3594 int nr_loops = 0;
58fbc3c6 3595 int add_ts_default;
7a8e76a3 3596
fa743953 3597 rb_start_commit(cpu_buffer);
a389d86f 3598 /* The commit page can not change after this */
fa743953 3599
85bac32c 3600#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62f0b3eb
SR
3601 /*
3602 * Due to the ability to swap a cpu buffer from a buffer
3603 * it is possible it was swapped before we committed.
3604 * (committing stops a swap). We check for it here and
3605 * if it happened, we have to fail the write.
3606 */
3607 barrier();
6aa7de05 3608 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
62f0b3eb
SR
3609 local_dec(&cpu_buffer->committing);
3610 local_dec(&cpu_buffer->commits);
3611 return NULL;
3612 }
85bac32c 3613#endif
b7dc42fd 3614
fcc742ea 3615 info.length = rb_calculate_event_length(length);
58fbc3c6
SRV
3616
3617 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3618 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3619 info.length += RB_LEN_TIME_EXTEND;
3620 } else {
3621 add_ts_default = RB_ADD_STAMP_NONE;
3622 }
3623
a4543a2f 3624 again:
58fbc3c6 3625 info.add_timestamp = add_ts_default;
b7dc42fd
SRRH
3626 info.delta = 0;
3627
818e3dd3
SR
3628 /*
3629 * We allow for interrupts to reenter here and do a trace.
3630 * If one does, it will cause this original code to loop
3631 * back here. Even with heavy interrupts happening, this
3632 * should only happen a few times in a row. If this happens
3633 * 1000 times in a row, there must be either an interrupt
3634 * storm or we have something buggy.
3635 * Bail!
3636 */
3e89c7bb 3637 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
fa743953 3638 goto out_fail;
818e3dd3 3639
fcc742ea
SRRH
3640 event = __rb_reserve_next(cpu_buffer, &info);
3641
bd1b7cd3 3642 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
58fbc3c6 3643 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
bd1b7cd3 3644 info.length -= RB_LEN_TIME_EXTEND;
bf41a158 3645 goto again;
bd1b7cd3 3646 }
bf41a158 3647
a389d86f
SRV
3648 if (likely(event))
3649 return event;
fa743953
SR
3650 out_fail:
3651 rb_end_commit(cpu_buffer);
3652 return NULL;
7a8e76a3
SR
3653}
3654
3655/**
3656 * ring_buffer_lock_reserve - reserve a part of the buffer
3657 * @buffer: the ring buffer to reserve from
3658 * @length: the length of the data to reserve (excluding event header)
7a8e76a3 3659 *
6167c205 3660 * Returns a reserved event on the ring buffer to copy directly to.
7a8e76a3
SR
3661 * The user of this interface will need to get the body to write into
3662 * and can use the ring_buffer_event_data() interface.
3663 *
3664 * The length is the length of the data needed, not the event length
3665 * which also includes the event header.
3666 *
3667 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3668 * If NULL is returned, then nothing has been allocated or locked.
3669 */
3670struct ring_buffer_event *
13292494 3671ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
7a8e76a3
SR
3672{
3673 struct ring_buffer_per_cpu *cpu_buffer;
3674 struct ring_buffer_event *event;
5168ae50 3675 int cpu;
7a8e76a3 3676
bf41a158 3677 /* If we are tracing schedule, we don't want to recurse */
5168ae50 3678 preempt_disable_notrace();
bf41a158 3679
3205f806 3680 if (unlikely(atomic_read(&buffer->record_disabled)))
58a09ec6 3681 goto out;
261842b7 3682
7a8e76a3
SR
3683 cpu = raw_smp_processor_id();
3684
3205f806 3685 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
d769041f 3686 goto out;
7a8e76a3
SR
3687
3688 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 3689
3205f806 3690 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
d769041f 3691 goto out;
7a8e76a3 3692
3205f806 3693 if (unlikely(length > BUF_MAX_DATA_SIZE))
bf41a158 3694 goto out;
7a8e76a3 3695
58a09ec6
SRRH
3696 if (unlikely(trace_recursive_lock(cpu_buffer)))
3697 goto out;
3698
62f0b3eb 3699 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 3700 if (!event)
58a09ec6 3701 goto out_unlock;
7a8e76a3
SR
3702
3703 return event;
3704
58a09ec6
SRRH
3705 out_unlock:
3706 trace_recursive_unlock(cpu_buffer);
d769041f 3707 out:
5168ae50 3708 preempt_enable_notrace();
7a8e76a3
SR
3709 return NULL;
3710}
c4f50183 3711EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3 3712
a1863c21
SR
3713/*
3714 * Decrement the entries to the page that an event is on.
3715 * The event does not even need to exist, only the pointer
3716 * to the page it is on. This may only be called before the commit
3717 * takes place.
3718 */
3719static inline void
3720rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3721 struct ring_buffer_event *event)
3722{
3723 unsigned long addr = (unsigned long)event;
3724 struct buffer_page *bpage = cpu_buffer->commit_page;
3725 struct buffer_page *start;
3726
3727 addr &= PAGE_MASK;
3728
3729 /* Do the likely case first */
3730 if (likely(bpage->page == (void *)addr)) {
3731 local_dec(&bpage->entries);
3732 return;
3733 }
3734
3735 /*
3736 * Because the commit page may be on the reader page we
3737 * start with the next page and check the end loop there.
3738 */
6689bed3 3739 rb_inc_page(&bpage);
a1863c21
SR
3740 start = bpage;
3741 do {
3742 if (bpage->page == (void *)addr) {
3743 local_dec(&bpage->entries);
3744 return;
3745 }
6689bed3 3746 rb_inc_page(&bpage);
a1863c21
SR
3747 } while (bpage != start);
3748
3749 /* commit not part of this buffer?? */
3750 RB_WARN_ON(cpu_buffer, 1);
3751}
3752
fa1b47dd 3753/**
88883490 3754 * ring_buffer_discard_commit - discard an event that has not been committed
fa1b47dd
SR
3755 * @buffer: the ring buffer
3756 * @event: non committed event to discard
3757 *
dc892f73
SR
3758 * Sometimes an event that is in the ring buffer needs to be ignored.
3759 * This function lets the user discard an event in the ring buffer
3760 * and then that event will not be read later.
3761 *
6167c205 3762 * This function only works if it is called before the item has been
dc892f73 3763 * committed. It will try to free the event from the ring buffer
fa1b47dd
SR
3764 * if another event has not been added behind it.
3765 *
3766 * If another event has been added behind it, it will set the event
3767 * up as discarded, and perform the commit.
3768 *
3769 * If this function is called, do not call ring_buffer_unlock_commit on
3770 * the event.
3771 */
13292494 3772void ring_buffer_discard_commit(struct trace_buffer *buffer,
fa1b47dd
SR
3773 struct ring_buffer_event *event)
3774{
3775 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
3776 int cpu;
3777
3778 /* The event is discarded regardless */
f3b9aae1 3779 rb_event_discard(event);
fa1b47dd 3780
fa743953
SR
3781 cpu = smp_processor_id();
3782 cpu_buffer = buffer->buffers[cpu];
3783
fa1b47dd
SR
3784 /*
3785 * This must only be called if the event has not been
3786 * committed yet. Thus we can assume that preemption
3787 * is still disabled.
3788 */
fa743953 3789 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
fa1b47dd 3790
a1863c21 3791 rb_decrement_entry(cpu_buffer, event);
0f2541d2 3792 if (rb_try_to_discard(cpu_buffer, event))
edd813bf 3793 goto out;
fa1b47dd 3794
fa1b47dd 3795 out:
fa743953 3796 rb_end_commit(cpu_buffer);
fa1b47dd 3797
58a09ec6 3798 trace_recursive_unlock(cpu_buffer);
f3b9aae1 3799
5168ae50 3800 preempt_enable_notrace();
fa1b47dd
SR
3801
3802}
3803EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3804
7a8e76a3
SR
3805/**
3806 * ring_buffer_write - write data to the buffer without reserving
3807 * @buffer: The ring buffer to write to.
3808 * @length: The length of the data being written (excluding the event header)
3809 * @data: The data to write to the buffer.
3810 *
3811 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3812 * one function. If you already have the data to write to the buffer, it
3813 * may be easier to simply call this function.
3814 *
3815 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3816 * and not the length of the event which would hold the header.
3817 */
13292494 3818int ring_buffer_write(struct trace_buffer *buffer,
01e3e710
DS
3819 unsigned long length,
3820 void *data)
7a8e76a3
SR
3821{
3822 struct ring_buffer_per_cpu *cpu_buffer;
3823 struct ring_buffer_event *event;
7a8e76a3
SR
3824 void *body;
3825 int ret = -EBUSY;
5168ae50 3826 int cpu;
7a8e76a3 3827
5168ae50 3828 preempt_disable_notrace();
bf41a158 3829
52fbe9cd
LJ
3830 if (atomic_read(&buffer->record_disabled))
3831 goto out;
3832
7a8e76a3
SR
3833 cpu = raw_smp_processor_id();
3834
9e01c1b7 3835 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 3836 goto out;
7a8e76a3
SR
3837
3838 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
3839
3840 if (atomic_read(&cpu_buffer->record_disabled))
3841 goto out;
3842
be957c44
SR
3843 if (length > BUF_MAX_DATA_SIZE)
3844 goto out;
3845
985e871b
SRRH
3846 if (unlikely(trace_recursive_lock(cpu_buffer)))
3847 goto out;
3848
62f0b3eb 3849 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 3850 if (!event)
985e871b 3851 goto out_unlock;
7a8e76a3
SR
3852
3853 body = rb_event_data(event);
3854
3855 memcpy(body, data, length);
3856
3857 rb_commit(cpu_buffer, event);
3858
15693458
SRRH
3859 rb_wakeups(buffer, cpu_buffer);
3860
7a8e76a3 3861 ret = 0;
985e871b
SRRH
3862
3863 out_unlock:
3864 trace_recursive_unlock(cpu_buffer);
3865
7a8e76a3 3866 out:
5168ae50 3867 preempt_enable_notrace();
7a8e76a3
SR
3868
3869 return ret;
3870}
c4f50183 3871EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 3872
da58834c 3873static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
3874{
3875 struct buffer_page *reader = cpu_buffer->reader_page;
77ae365e 3876 struct buffer_page *head = rb_set_head_page(cpu_buffer);
bf41a158
SR
3877 struct buffer_page *commit = cpu_buffer->commit_page;
3878
77ae365e
SR
3879 /* In case of error, head will be NULL */
3880 if (unlikely(!head))
da58834c 3881 return true;
77ae365e 3882
67f0d6d9
HL
3883 /* Reader should exhaust content in reader page */
3884 if (reader->read != rb_page_commit(reader))
3885 return false;
3886
3887 /*
3888 * If writers are committing on the reader page, knowing all
3889 * committed content has been read, the ring buffer is empty.
3890 */
3891 if (commit == reader)
3892 return true;
3893
3894 /*
3895 * If writers are committing on a page other than reader page
3896 * and head page, there should always be content to read.
3897 */
3898 if (commit != head)
3899 return false;
3900
3901 /*
3902 * Writers are committing on the head page, we just need
3903 * to care about there're committed data, and the reader will
3904 * swap reader page with head page when it is to read data.
3905 */
3906 return rb_page_commit(commit) == 0;
bf41a158
SR
3907}
3908
7a8e76a3
SR
3909/**
3910 * ring_buffer_record_disable - stop all writes into the buffer
3911 * @buffer: The ring buffer to stop writes to.
3912 *
3913 * This prevents all writes to the buffer. Any attempt to write
3914 * to the buffer after this will fail and return NULL.
3915 *
74401729 3916 * The caller should call synchronize_rcu() after this.
7a8e76a3 3917 */
13292494 3918void ring_buffer_record_disable(struct trace_buffer *buffer)
7a8e76a3
SR
3919{
3920 atomic_inc(&buffer->record_disabled);
3921}
c4f50183 3922EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
3923
3924/**
3925 * ring_buffer_record_enable - enable writes to the buffer
3926 * @buffer: The ring buffer to enable writes
3927 *
3928 * Note, multiple disables will need the same number of enables
c41b20e7 3929 * to truly enable the writing (much like preempt_disable).
7a8e76a3 3930 */
13292494 3931void ring_buffer_record_enable(struct trace_buffer *buffer)
7a8e76a3
SR
3932{
3933 atomic_dec(&buffer->record_disabled);
3934}
c4f50183 3935EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3 3936
499e5470
SR
3937/**
3938 * ring_buffer_record_off - stop all writes into the buffer
3939 * @buffer: The ring buffer to stop writes to.
3940 *
3941 * This prevents all writes to the buffer. Any attempt to write
3942 * to the buffer after this will fail and return NULL.
3943 *
3944 * This is different than ring_buffer_record_disable() as
87abb3b1 3945 * it works like an on/off switch, where as the disable() version
499e5470
SR
3946 * must be paired with a enable().
3947 */
13292494 3948void ring_buffer_record_off(struct trace_buffer *buffer)
499e5470
SR
3949{
3950 unsigned int rd;
3951 unsigned int new_rd;
3952
3953 do {
3954 rd = atomic_read(&buffer->record_disabled);
3955 new_rd = rd | RB_BUFFER_OFF;
3956 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3957}
3958EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3959
3960/**
3961 * ring_buffer_record_on - restart writes into the buffer
3962 * @buffer: The ring buffer to start writes to.
3963 *
3964 * This enables all writes to the buffer that was disabled by
3965 * ring_buffer_record_off().
3966 *
3967 * This is different than ring_buffer_record_enable() as
87abb3b1 3968 * it works like an on/off switch, where as the enable() version
499e5470
SR
3969 * must be paired with a disable().
3970 */
13292494 3971void ring_buffer_record_on(struct trace_buffer *buffer)
499e5470
SR
3972{
3973 unsigned int rd;
3974 unsigned int new_rd;
3975
3976 do {
3977 rd = atomic_read(&buffer->record_disabled);
3978 new_rd = rd & ~RB_BUFFER_OFF;
3979 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3980}
3981EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3982
3983/**
3984 * ring_buffer_record_is_on - return true if the ring buffer can write
3985 * @buffer: The ring buffer to see if write is enabled
3986 *
3987 * Returns true if the ring buffer is in a state that it accepts writes.
3988 */
13292494 3989bool ring_buffer_record_is_on(struct trace_buffer *buffer)
499e5470
SR
3990{
3991 return !atomic_read(&buffer->record_disabled);
3992}
3993
73c8d894
MH
3994/**
3995 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3996 * @buffer: The ring buffer to see if write is set enabled
3997 *
3998 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3999 * Note that this does NOT mean it is in a writable state.
4000 *
4001 * It may return true when the ring buffer has been disabled by
4002 * ring_buffer_record_disable(), as that is a temporary disabling of
4003 * the ring buffer.
4004 */
13292494 4005bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
73c8d894
MH
4006{
4007 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4008}
4009
7a8e76a3
SR
4010/**
4011 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4012 * @buffer: The ring buffer to stop writes to.
4013 * @cpu: The CPU buffer to stop
4014 *
4015 * This prevents all writes to the buffer. Any attempt to write
4016 * to the buffer after this will fail and return NULL.
4017 *
74401729 4018 * The caller should call synchronize_rcu() after this.
7a8e76a3 4019 */
13292494 4020void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4021{
4022 struct ring_buffer_per_cpu *cpu_buffer;
4023
9e01c1b7 4024 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4025 return;
7a8e76a3
SR
4026
4027 cpu_buffer = buffer->buffers[cpu];
4028 atomic_inc(&cpu_buffer->record_disabled);
4029}
c4f50183 4030EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
4031
4032/**
4033 * ring_buffer_record_enable_cpu - enable writes to the buffer
4034 * @buffer: The ring buffer to enable writes
4035 * @cpu: The CPU to enable.
4036 *
4037 * Note, multiple disables will need the same number of enables
c41b20e7 4038 * to truly enable the writing (much like preempt_disable).
7a8e76a3 4039 */
13292494 4040void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4041{
4042 struct ring_buffer_per_cpu *cpu_buffer;
4043
9e01c1b7 4044 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4045 return;
7a8e76a3
SR
4046
4047 cpu_buffer = buffer->buffers[cpu];
4048 atomic_dec(&cpu_buffer->record_disabled);
4049}
c4f50183 4050EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3 4051
f6195aa0
SR
4052/*
4053 * The total entries in the ring buffer is the running counter
4054 * of entries entered into the ring buffer, minus the sum of
4055 * the entries read from the ring buffer and the number of
4056 * entries that were overwritten.
4057 */
4058static inline unsigned long
4059rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4060{
4061 return local_read(&cpu_buffer->entries) -
4062 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4063}
4064
c64e148a
VN
4065/**
4066 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4067 * @buffer: The ring buffer
4068 * @cpu: The per CPU buffer to read from.
4069 */
13292494 4070u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
c64e148a
VN
4071{
4072 unsigned long flags;
4073 struct ring_buffer_per_cpu *cpu_buffer;
4074 struct buffer_page *bpage;
da830e58 4075 u64 ret = 0;
c64e148a
VN
4076
4077 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4078 return 0;
4079
4080 cpu_buffer = buffer->buffers[cpu];
7115e3fc 4081 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
c64e148a
VN
4082 /*
4083 * if the tail is on reader_page, oldest time stamp is on the reader
4084 * page
4085 */
4086 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4087 bpage = cpu_buffer->reader_page;
4088 else
4089 bpage = rb_set_head_page(cpu_buffer);
54f7be5b
SR
4090 if (bpage)
4091 ret = bpage->page->time_stamp;
7115e3fc 4092 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
c64e148a
VN
4093
4094 return ret;
4095}
4096EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4097
4098/**
4099 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4100 * @buffer: The ring buffer
4101 * @cpu: The per CPU buffer to read from.
4102 */
13292494 4103unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
c64e148a
VN
4104{
4105 struct ring_buffer_per_cpu *cpu_buffer;
4106 unsigned long ret;
4107
4108 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4109 return 0;
4110
4111 cpu_buffer = buffer->buffers[cpu];
4112 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4113
4114 return ret;
4115}
4116EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4117
7a8e76a3
SR
4118/**
4119 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4120 * @buffer: The ring buffer
4121 * @cpu: The per CPU buffer to get the entries from.
4122 */
13292494 4123unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4124{
4125 struct ring_buffer_per_cpu *cpu_buffer;
4126
9e01c1b7 4127 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4128 return 0;
7a8e76a3
SR
4129
4130 cpu_buffer = buffer->buffers[cpu];
554f786e 4131
f6195aa0 4132 return rb_num_of_entries(cpu_buffer);
7a8e76a3 4133}
c4f50183 4134EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
4135
4136/**
884bfe89
SP
4137 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4138 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
7a8e76a3
SR
4139 * @buffer: The ring buffer
4140 * @cpu: The per CPU buffer to get the number of overruns from
4141 */
13292494 4142unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
4143{
4144 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 4145 unsigned long ret;
7a8e76a3 4146
9e01c1b7 4147 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4148 return 0;
7a8e76a3
SR
4149
4150 cpu_buffer = buffer->buffers[cpu];
77ae365e 4151 ret = local_read(&cpu_buffer->overrun);
554f786e
SR
4152
4153 return ret;
7a8e76a3 4154}
c4f50183 4155EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 4156
f0d2c681 4157/**
884bfe89
SP
4158 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4159 * commits failing due to the buffer wrapping around while there are uncommitted
4160 * events, such as during an interrupt storm.
f0d2c681
SR
4161 * @buffer: The ring buffer
4162 * @cpu: The per CPU buffer to get the number of overruns from
4163 */
4164unsigned long
13292494 4165ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
f0d2c681
SR
4166{
4167 struct ring_buffer_per_cpu *cpu_buffer;
4168 unsigned long ret;
4169
4170 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4171 return 0;
4172
4173 cpu_buffer = buffer->buffers[cpu];
77ae365e 4174 ret = local_read(&cpu_buffer->commit_overrun);
f0d2c681
SR
4175
4176 return ret;
4177}
4178EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4179
884bfe89
SP
4180/**
4181 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4182 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4183 * @buffer: The ring buffer
4184 * @cpu: The per CPU buffer to get the number of overruns from
4185 */
4186unsigned long
13292494 4187ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
884bfe89
SP
4188{
4189 struct ring_buffer_per_cpu *cpu_buffer;
4190 unsigned long ret;
4191
4192 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4193 return 0;
4194
4195 cpu_buffer = buffer->buffers[cpu];
4196 ret = local_read(&cpu_buffer->dropped_events);
4197
4198 return ret;
4199}
4200EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4201
ad964704
SRRH
4202/**
4203 * ring_buffer_read_events_cpu - get the number of events successfully read
4204 * @buffer: The ring buffer
4205 * @cpu: The per CPU buffer to get the number of events read
4206 */
4207unsigned long
13292494 4208ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
ad964704
SRRH
4209{
4210 struct ring_buffer_per_cpu *cpu_buffer;
4211
4212 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4213 return 0;
4214
4215 cpu_buffer = buffer->buffers[cpu];
4216 return cpu_buffer->read;
4217}
4218EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4219
7a8e76a3
SR
4220/**
4221 * ring_buffer_entries - get the number of entries in a buffer
4222 * @buffer: The ring buffer
4223 *
4224 * Returns the total number of entries in the ring buffer
4225 * (all CPU entries)
4226 */
13292494 4227unsigned long ring_buffer_entries(struct trace_buffer *buffer)
7a8e76a3
SR
4228{
4229 struct ring_buffer_per_cpu *cpu_buffer;
4230 unsigned long entries = 0;
4231 int cpu;
4232
4233 /* if you care about this being correct, lock the buffer */
4234 for_each_buffer_cpu(buffer, cpu) {
4235 cpu_buffer = buffer->buffers[cpu];
f6195aa0 4236 entries += rb_num_of_entries(cpu_buffer);
7a8e76a3
SR
4237 }
4238
4239 return entries;
4240}
c4f50183 4241EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
4242
4243/**
67b394f7 4244 * ring_buffer_overruns - get the number of overruns in buffer
7a8e76a3
SR
4245 * @buffer: The ring buffer
4246 *
4247 * Returns the total number of overruns in the ring buffer
4248 * (all CPU entries)
4249 */
13292494 4250unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
7a8e76a3
SR
4251{
4252 struct ring_buffer_per_cpu *cpu_buffer;
4253 unsigned long overruns = 0;
4254 int cpu;
4255
4256 /* if you care about this being correct, lock the buffer */
4257 for_each_buffer_cpu(buffer, cpu) {
4258 cpu_buffer = buffer->buffers[cpu];
77ae365e 4259 overruns += local_read(&cpu_buffer->overrun);
7a8e76a3
SR
4260 }
4261
4262 return overruns;
4263}
c4f50183 4264EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 4265
642edba5 4266static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
4267{
4268 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4269
d769041f 4270 /* Iterator usage is expected to have record disabled */
651e22f2
SRRH
4271 iter->head_page = cpu_buffer->reader_page;
4272 iter->head = cpu_buffer->reader_page->read;
785888c5 4273 iter->next_event = iter->head;
651e22f2
SRRH
4274
4275 iter->cache_reader_page = iter->head_page;
24607f11 4276 iter->cache_read = cpu_buffer->read;
651e22f2 4277
28e3fc56 4278 if (iter->head) {
d769041f 4279 iter->read_stamp = cpu_buffer->read_stamp;
28e3fc56
SRV
4280 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4281 } else {
abc9b56d 4282 iter->read_stamp = iter->head_page->page->time_stamp;
28e3fc56
SRV
4283 iter->page_stamp = iter->read_stamp;
4284 }
642edba5 4285}
f83c9d0f 4286
642edba5
SR
4287/**
4288 * ring_buffer_iter_reset - reset an iterator
4289 * @iter: The iterator to reset
4290 *
4291 * Resets the iterator, so that it will start from the beginning
4292 * again.
4293 */
4294void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4295{
554f786e 4296 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
4297 unsigned long flags;
4298
554f786e
SR
4299 if (!iter)
4300 return;
4301
4302 cpu_buffer = iter->cpu_buffer;
4303
5389f6fa 4304 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
642edba5 4305 rb_iter_reset(iter);
5389f6fa 4306 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 4307}
c4f50183 4308EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
4309
4310/**
4311 * ring_buffer_iter_empty - check if an iterator has no more to read
4312 * @iter: The iterator to check
4313 */
4314int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4315{
4316 struct ring_buffer_per_cpu *cpu_buffer;
78f7a45d
SRV
4317 struct buffer_page *reader;
4318 struct buffer_page *head_page;
4319 struct buffer_page *commit_page;
ead6ecfd 4320 struct buffer_page *curr_commit_page;
78f7a45d 4321 unsigned commit;
ead6ecfd
SRV
4322 u64 curr_commit_ts;
4323 u64 commit_ts;
7a8e76a3
SR
4324
4325 cpu_buffer = iter->cpu_buffer;
78f7a45d
SRV
4326 reader = cpu_buffer->reader_page;
4327 head_page = cpu_buffer->head_page;
4328 commit_page = cpu_buffer->commit_page;
ead6ecfd
SRV
4329 commit_ts = commit_page->page->time_stamp;
4330
4331 /*
4332 * When the writer goes across pages, it issues a cmpxchg which
4333 * is a mb(), which will synchronize with the rmb here.
4334 * (see rb_tail_page_update())
4335 */
4336 smp_rmb();
78f7a45d 4337 commit = rb_page_commit(commit_page);
ead6ecfd
SRV
4338 /* We want to make sure that the commit page doesn't change */
4339 smp_rmb();
4340
4341 /* Make sure commit page didn't change */
4342 curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4343 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4344
4345 /* If the commit page changed, then there's more data */
4346 if (curr_commit_page != commit_page ||
4347 curr_commit_ts != commit_ts)
4348 return 0;
78f7a45d 4349
ead6ecfd 4350 /* Still racy, as it may return a false positive, but that's OK */
785888c5 4351 return ((iter->head_page == commit_page && iter->head >= commit) ||
78f7a45d
SRV
4352 (iter->head_page == reader && commit_page == head_page &&
4353 head_page->read == commit &&
4354 iter->head == rb_page_commit(cpu_buffer->reader_page)));
7a8e76a3 4355}
c4f50183 4356EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
4357
4358static void
4359rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4360 struct ring_buffer_event *event)
4361{
4362 u64 delta;
4363
334d4169 4364 switch (event->type_len) {
7a8e76a3
SR
4365 case RINGBUF_TYPE_PADDING:
4366 return;
4367
4368 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 4369 delta = rb_event_time_stamp(event);
7a8e76a3
SR
4370 cpu_buffer->read_stamp += delta;
4371 return;
4372
4373 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 4374 delta = rb_event_time_stamp(event);
dc4e2801 4375 cpu_buffer->read_stamp = delta;
7a8e76a3
SR
4376 return;
4377
4378 case RINGBUF_TYPE_DATA:
4379 cpu_buffer->read_stamp += event->time_delta;
4380 return;
4381
4382 default:
da4d401a 4383 RB_WARN_ON(cpu_buffer, 1);
7a8e76a3
SR
4384 }
4385 return;
4386}
4387
4388static void
4389rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4390 struct ring_buffer_event *event)
4391{
4392 u64 delta;
4393
334d4169 4394 switch (event->type_len) {
7a8e76a3
SR
4395 case RINGBUF_TYPE_PADDING:
4396 return;
4397
4398 case RINGBUF_TYPE_TIME_EXTEND:
e20044f7 4399 delta = rb_event_time_stamp(event);
7a8e76a3
SR
4400 iter->read_stamp += delta;
4401 return;
4402
4403 case RINGBUF_TYPE_TIME_STAMP:
e20044f7 4404 delta = rb_event_time_stamp(event);
dc4e2801 4405 iter->read_stamp = delta;
7a8e76a3
SR
4406 return;
4407
4408 case RINGBUF_TYPE_DATA:
4409 iter->read_stamp += event->time_delta;
4410 return;
4411
4412 default:
da4d401a 4413 RB_WARN_ON(iter->cpu_buffer, 1);
7a8e76a3
SR
4414 }
4415 return;
4416}
4417
d769041f
SR
4418static struct buffer_page *
4419rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 4420{
d769041f 4421 struct buffer_page *reader = NULL;
66a8cb95 4422 unsigned long overwrite;
d769041f 4423 unsigned long flags;
818e3dd3 4424 int nr_loops = 0;
77ae365e 4425 int ret;
d769041f 4426
3e03fb7f 4427 local_irq_save(flags);
0199c4e6 4428 arch_spin_lock(&cpu_buffer->lock);
d769041f
SR
4429
4430 again:
818e3dd3
SR
4431 /*
4432 * This should normally only loop twice. But because the
4433 * start of the reader inserts an empty page, it causes
4434 * a case where we will loop three times. There should be no
4435 * reason to loop four times (that I know of).
4436 */
3e89c7bb 4437 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
4438 reader = NULL;
4439 goto out;
4440 }
4441
d769041f
SR
4442 reader = cpu_buffer->reader_page;
4443
4444 /* If there's more to read, return this page */
bf41a158 4445 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
4446 goto out;
4447
4448 /* Never should we have an index greater than the size */
3e89c7bb
SR
4449 if (RB_WARN_ON(cpu_buffer,
4450 cpu_buffer->reader_page->read > rb_page_size(reader)))
4451 goto out;
d769041f
SR
4452
4453 /* check if we caught up to the tail */
4454 reader = NULL;
bf41a158 4455 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 4456 goto out;
7a8e76a3 4457
a5fb8331
SR
4458 /* Don't bother swapping if the ring buffer is empty */
4459 if (rb_num_of_entries(cpu_buffer) == 0)
4460 goto out;
4461
7a8e76a3 4462 /*
d769041f 4463 * Reset the reader page to size zero.
7a8e76a3 4464 */
77ae365e
SR
4465 local_set(&cpu_buffer->reader_page->write, 0);
4466 local_set(&cpu_buffer->reader_page->entries, 0);
4467 local_set(&cpu_buffer->reader_page->page->commit, 0);
ff0ff84a 4468 cpu_buffer->reader_page->real_end = 0;
7a8e76a3 4469
77ae365e
SR
4470 spin:
4471 /*
4472 * Splice the empty reader page into the list around the head.
4473 */
4474 reader = rb_set_head_page(cpu_buffer);
54f7be5b
SR
4475 if (!reader)
4476 goto out;
0e1ff5d7 4477 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
d769041f 4478 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158 4479
3adc54fa
SR
4480 /*
4481 * cpu_buffer->pages just needs to point to the buffer, it
4482 * has no specific buffer page to point to. Lets move it out
25985edc 4483 * of our way so we don't accidentally swap it.
3adc54fa
SR
4484 */
4485 cpu_buffer->pages = reader->list.prev;
4486
77ae365e 4487 /* The reader page will be pointing to the new head */
6689bed3 4488 rb_set_list_to_head(&cpu_buffer->reader_page->list);
7a8e76a3 4489
66a8cb95
SR
4490 /*
4491 * We want to make sure we read the overruns after we set up our
4492 * pointers to the next object. The writer side does a
4493 * cmpxchg to cross pages which acts as the mb on the writer
4494 * side. Note, the reader will constantly fail the swap
4495 * while the writer is updating the pointers, so this
4496 * guarantees that the overwrite recorded here is the one we
4497 * want to compare with the last_overrun.
4498 */
4499 smp_mb();
4500 overwrite = local_read(&(cpu_buffer->overrun));
4501
77ae365e
SR
4502 /*
4503 * Here's the tricky part.
4504 *
4505 * We need to move the pointer past the header page.
4506 * But we can only do that if a writer is not currently
4507 * moving it. The page before the header page has the
4508 * flag bit '1' set if it is pointing to the page we want.
4509 * but if the writer is in the process of moving it
4510 * than it will be '2' or already moved '0'.
4511 */
4512
4513 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
7a8e76a3
SR
4514
4515 /*
77ae365e 4516 * If we did not convert it, then we must try again.
7a8e76a3 4517 */
77ae365e
SR
4518 if (!ret)
4519 goto spin;
7a8e76a3 4520
77ae365e 4521 /*
2c2b0a78 4522 * Yay! We succeeded in replacing the page.
77ae365e
SR
4523 *
4524 * Now make the new head point back to the reader page.
4525 */
5ded3dc6 4526 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
6689bed3 4527 rb_inc_page(&cpu_buffer->head_page);
d769041f 4528
2c2b0a78
SRV
4529 local_inc(&cpu_buffer->pages_read);
4530
d769041f
SR
4531 /* Finally update the reader page to the new head */
4532 cpu_buffer->reader_page = reader;
b81f472a 4533 cpu_buffer->reader_page->read = 0;
d769041f 4534
66a8cb95
SR
4535 if (overwrite != cpu_buffer->last_overrun) {
4536 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4537 cpu_buffer->last_overrun = overwrite;
4538 }
4539
d769041f
SR
4540 goto again;
4541
4542 out:
b81f472a
SRRH
4543 /* Update the read_stamp on the first event */
4544 if (reader && reader->read == 0)
4545 cpu_buffer->read_stamp = reader->page->time_stamp;
4546
0199c4e6 4547 arch_spin_unlock(&cpu_buffer->lock);
3e03fb7f 4548 local_irq_restore(flags);
d769041f
SR
4549
4550 return reader;
4551}
4552
4553static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4554{
4555 struct ring_buffer_event *event;
4556 struct buffer_page *reader;
4557 unsigned length;
4558
4559 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 4560
d769041f 4561 /* This function should not be called when buffer is empty */
3e89c7bb
SR
4562 if (RB_WARN_ON(cpu_buffer, !reader))
4563 return;
7a8e76a3 4564
d769041f
SR
4565 event = rb_reader_event(cpu_buffer);
4566
a1863c21 4567 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
e4906eff 4568 cpu_buffer->read++;
d769041f
SR
4569
4570 rb_update_read_stamp(cpu_buffer, event);
4571
4572 length = rb_event_length(event);
6f807acd 4573 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
4574}
4575
4576static void rb_advance_iter(struct ring_buffer_iter *iter)
4577{
7a8e76a3 4578 struct ring_buffer_per_cpu *cpu_buffer;
7a8e76a3
SR
4579
4580 cpu_buffer = iter->cpu_buffer;
7a8e76a3 4581
785888c5
SRV
4582 /* If head == next_event then we need to jump to the next event */
4583 if (iter->head == iter->next_event) {
4584 /* If the event gets overwritten again, there's nothing to do */
4585 if (rb_iter_head_event(iter) == NULL)
4586 return;
4587 }
4588
4589 iter->head = iter->next_event;
4590
7a8e76a3
SR
4591 /*
4592 * Check if we are at the end of the buffer.
4593 */
785888c5 4594 if (iter->next_event >= rb_page_size(iter->head_page)) {
ea05b57c
SR
4595 /* discarded commits can make the page empty */
4596 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 4597 return;
d769041f 4598 rb_inc_iter(iter);
7a8e76a3
SR
4599 return;
4600 }
4601
785888c5 4602 rb_update_iter_read_stamp(iter, iter->event);
7a8e76a3
SR
4603}
4604
66a8cb95
SR
4605static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4606{
4607 return cpu_buffer->lost_events;
4608}
4609
f83c9d0f 4610static struct ring_buffer_event *
66a8cb95
SR
4611rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4612 unsigned long *lost_events)
7a8e76a3 4613{
7a8e76a3 4614 struct ring_buffer_event *event;
d769041f 4615 struct buffer_page *reader;
818e3dd3 4616 int nr_loops = 0;
7a8e76a3 4617
dc4e2801
TZ
4618 if (ts)
4619 *ts = 0;
7a8e76a3 4620 again:
818e3dd3 4621 /*
69d1b839
SR
4622 * We repeat when a time extend is encountered.
4623 * Since the time extend is always attached to a data event,
4624 * we should never loop more than once.
4625 * (We never hit the following condition more than twice).
818e3dd3 4626 */
69d1b839 4627 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
818e3dd3 4628 return NULL;
818e3dd3 4629
d769041f
SR
4630 reader = rb_get_reader_page(cpu_buffer);
4631 if (!reader)
7a8e76a3
SR
4632 return NULL;
4633
d769041f 4634 event = rb_reader_event(cpu_buffer);
7a8e76a3 4635
334d4169 4636 switch (event->type_len) {
7a8e76a3 4637 case RINGBUF_TYPE_PADDING:
2d622719
TZ
4638 if (rb_null_event(event))
4639 RB_WARN_ON(cpu_buffer, 1);
4640 /*
4641 * Because the writer could be discarding every
4642 * event it creates (which would probably be bad)
4643 * if we were to go back to "again" then we may never
4644 * catch up, and will trigger the warn on, or lock
4645 * the box. Return the padding, and we will release
4646 * the current locks, and try again.
4647 */
2d622719 4648 return event;
7a8e76a3
SR
4649
4650 case RINGBUF_TYPE_TIME_EXTEND:
4651 /* Internal data, OK to advance */
d769041f 4652 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
4653 goto again;
4654
4655 case RINGBUF_TYPE_TIME_STAMP:
dc4e2801 4656 if (ts) {
e20044f7 4657 *ts = rb_event_time_stamp(event);
dc4e2801
TZ
4658 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4659 cpu_buffer->cpu, ts);
4660 }
4661 /* Internal data, OK to advance */
d769041f 4662 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
4663 goto again;
4664
4665 case RINGBUF_TYPE_DATA:
dc4e2801 4666 if (ts && !(*ts)) {
7a8e76a3 4667 *ts = cpu_buffer->read_stamp + event->time_delta;
d8eeb2d3 4668 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
37886f6a 4669 cpu_buffer->cpu, ts);
7a8e76a3 4670 }
66a8cb95
SR
4671 if (lost_events)
4672 *lost_events = rb_lost_events(cpu_buffer);
7a8e76a3
SR
4673 return event;
4674
4675 default:
da4d401a 4676 RB_WARN_ON(cpu_buffer, 1);
7a8e76a3
SR
4677 }
4678
4679 return NULL;
4680}
c4f50183 4681EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 4682
f83c9d0f
SR
4683static struct ring_buffer_event *
4684rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3 4685{
13292494 4686 struct trace_buffer *buffer;
7a8e76a3
SR
4687 struct ring_buffer_per_cpu *cpu_buffer;
4688 struct ring_buffer_event *event;
818e3dd3 4689 int nr_loops = 0;
7a8e76a3 4690
dc4e2801
TZ
4691 if (ts)
4692 *ts = 0;
4693
7a8e76a3
SR
4694 cpu_buffer = iter->cpu_buffer;
4695 buffer = cpu_buffer->buffer;
4696
492a74f4
SR
4697 /*
4698 * Check if someone performed a consuming read to
4699 * the buffer. A consuming read invalidates the iterator
4700 * and we need to reset the iterator in this case.
4701 */
4702 if (unlikely(iter->cache_read != cpu_buffer->read ||
4703 iter->cache_reader_page != cpu_buffer->reader_page))
4704 rb_iter_reset(iter);
4705
7a8e76a3 4706 again:
3c05d748
SR
4707 if (ring_buffer_iter_empty(iter))
4708 return NULL;
4709
818e3dd3 4710 /*
3d2353de
SRV
4711 * As the writer can mess with what the iterator is trying
4712 * to read, just give up if we fail to get an event after
4713 * three tries. The iterator is not as reliable when reading
4714 * the ring buffer with an active write as the consumer is.
4715 * Do not warn if the three failures is reached.
818e3dd3 4716 */
3d2353de 4717 if (++nr_loops > 3)
818e3dd3 4718 return NULL;
818e3dd3 4719
7a8e76a3
SR
4720 if (rb_per_cpu_empty(cpu_buffer))
4721 return NULL;
4722
10e83fd0 4723 if (iter->head >= rb_page_size(iter->head_page)) {
3c05d748
SR
4724 rb_inc_iter(iter);
4725 goto again;
4726 }
4727
7a8e76a3 4728 event = rb_iter_head_event(iter);
3d2353de 4729 if (!event)
785888c5 4730 goto again;
7a8e76a3 4731
334d4169 4732 switch (event->type_len) {
7a8e76a3 4733 case RINGBUF_TYPE_PADDING:
2d622719
TZ
4734 if (rb_null_event(event)) {
4735 rb_inc_iter(iter);
4736 goto again;
4737 }
4738 rb_advance_iter(iter);
4739 return event;
7a8e76a3
SR
4740
4741 case RINGBUF_TYPE_TIME_EXTEND:
4742 /* Internal data, OK to advance */
4743 rb_advance_iter(iter);
4744 goto again;
4745
4746 case RINGBUF_TYPE_TIME_STAMP:
dc4e2801 4747 if (ts) {
e20044f7 4748 *ts = rb_event_time_stamp(event);
dc4e2801
TZ
4749 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4750 cpu_buffer->cpu, ts);
4751 }
4752 /* Internal data, OK to advance */
7a8e76a3
SR
4753 rb_advance_iter(iter);
4754 goto again;
4755
4756 case RINGBUF_TYPE_DATA:
dc4e2801 4757 if (ts && !(*ts)) {
7a8e76a3 4758 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
4759 ring_buffer_normalize_time_stamp(buffer,
4760 cpu_buffer->cpu, ts);
7a8e76a3
SR
4761 }
4762 return event;
4763
4764 default:
da4d401a 4765 RB_WARN_ON(cpu_buffer, 1);
7a8e76a3
SR
4766 }
4767
4768 return NULL;
4769}
c4f50183 4770EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 4771
289a5a25 4772static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
8d707e8e 4773{
289a5a25
SRRH
4774 if (likely(!in_nmi())) {
4775 raw_spin_lock(&cpu_buffer->reader_lock);
4776 return true;
4777 }
4778
8d707e8e
SR
4779 /*
4780 * If an NMI die dumps out the content of the ring buffer
289a5a25
SRRH
4781 * trylock must be used to prevent a deadlock if the NMI
4782 * preempted a task that holds the ring buffer locks. If
4783 * we get the lock then all is fine, if not, then continue
4784 * to do the read, but this can corrupt the ring buffer,
4785 * so it must be permanently disabled from future writes.
4786 * Reading from NMI is a oneshot deal.
8d707e8e 4787 */
289a5a25
SRRH
4788 if (raw_spin_trylock(&cpu_buffer->reader_lock))
4789 return true;
8d707e8e 4790
289a5a25
SRRH
4791 /* Continue without locking, but disable the ring buffer */
4792 atomic_inc(&cpu_buffer->record_disabled);
4793 return false;
4794}
4795
4796static inline void
4797rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4798{
4799 if (likely(locked))
4800 raw_spin_unlock(&cpu_buffer->reader_lock);
4801 return;
8d707e8e
SR
4802}
4803
f83c9d0f
SR
4804/**
4805 * ring_buffer_peek - peek at the next event to be read
4806 * @buffer: The ring buffer to read
4807 * @cpu: The cpu to peak at
4808 * @ts: The timestamp counter of this event.
66a8cb95 4809 * @lost_events: a variable to store if events were lost (may be NULL)
f83c9d0f
SR
4810 *
4811 * This will return the event that will be read next, but does
4812 * not consume the data.
4813 */
4814struct ring_buffer_event *
13292494 4815ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
66a8cb95 4816 unsigned long *lost_events)
f83c9d0f
SR
4817{
4818 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 4819 struct ring_buffer_event *event;
f83c9d0f 4820 unsigned long flags;
289a5a25 4821 bool dolock;
f83c9d0f 4822
554f786e 4823 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4824 return NULL;
554f786e 4825
2d622719 4826 again:
8d707e8e 4827 local_irq_save(flags);
289a5a25 4828 dolock = rb_reader_lock(cpu_buffer);
66a8cb95 4829 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
469535a5
RR
4830 if (event && event->type_len == RINGBUF_TYPE_PADDING)
4831 rb_advance_reader(cpu_buffer);
289a5a25 4832 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e 4833 local_irq_restore(flags);
f83c9d0f 4834
1b959e18 4835 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 4836 goto again;
2d622719 4837
f83c9d0f
SR
4838 return event;
4839}
4840
c9b7a4a7
SRV
4841/** ring_buffer_iter_dropped - report if there are dropped events
4842 * @iter: The ring buffer iterator
4843 *
4844 * Returns true if there was dropped events since the last peek.
4845 */
4846bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4847{
4848 bool ret = iter->missed_events != 0;
4849
4850 iter->missed_events = 0;
4851 return ret;
4852}
4853EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4854
f83c9d0f
SR
4855/**
4856 * ring_buffer_iter_peek - peek at the next event to be read
4857 * @iter: The ring buffer iterator
4858 * @ts: The timestamp counter of this event.
4859 *
4860 * This will return the event that will be read next, but does
4861 * not increment the iterator.
4862 */
4863struct ring_buffer_event *
4864ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4865{
4866 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4867 struct ring_buffer_event *event;
4868 unsigned long flags;
4869
2d622719 4870 again:
5389f6fa 4871 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
f83c9d0f 4872 event = rb_iter_peek(iter, ts);
5389f6fa 4873 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
f83c9d0f 4874
1b959e18 4875 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 4876 goto again;
2d622719 4877
f83c9d0f
SR
4878 return event;
4879}
4880
7a8e76a3
SR
4881/**
4882 * ring_buffer_consume - return an event and consume it
4883 * @buffer: The ring buffer to get the next event from
66a8cb95
SR
4884 * @cpu: the cpu to read the buffer from
4885 * @ts: a variable to store the timestamp (may be NULL)
4886 * @lost_events: a variable to store if events were lost (may be NULL)
7a8e76a3
SR
4887 *
4888 * Returns the next event in the ring buffer, and that event is consumed.
4889 * Meaning, that sequential reads will keep returning a different event,
4890 * and eventually empty the ring buffer if the producer is slower.
4891 */
4892struct ring_buffer_event *
13292494 4893ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
66a8cb95 4894 unsigned long *lost_events)
7a8e76a3 4895{
554f786e
SR
4896 struct ring_buffer_per_cpu *cpu_buffer;
4897 struct ring_buffer_event *event = NULL;
f83c9d0f 4898 unsigned long flags;
289a5a25 4899 bool dolock;
7a8e76a3 4900
2d622719 4901 again:
554f786e
SR
4902 /* might be called in atomic */
4903 preempt_disable();
4904
9e01c1b7 4905 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 4906 goto out;
7a8e76a3 4907
554f786e 4908 cpu_buffer = buffer->buffers[cpu];
8d707e8e 4909 local_irq_save(flags);
289a5a25 4910 dolock = rb_reader_lock(cpu_buffer);
f83c9d0f 4911
66a8cb95
SR
4912 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4913 if (event) {
4914 cpu_buffer->lost_events = 0;
469535a5 4915 rb_advance_reader(cpu_buffer);
66a8cb95 4916 }
7a8e76a3 4917
289a5a25 4918 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e 4919 local_irq_restore(flags);
f83c9d0f 4920
554f786e
SR
4921 out:
4922 preempt_enable();
4923
1b959e18 4924 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 4925 goto again;
2d622719 4926
7a8e76a3
SR
4927 return event;
4928}
c4f50183 4929EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
4930
4931/**
72c9ddfd 4932 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
7a8e76a3
SR
4933 * @buffer: The ring buffer to read from
4934 * @cpu: The cpu buffer to iterate over
31b265b3 4935 * @flags: gfp flags to use for memory allocation
7a8e76a3 4936 *
72c9ddfd
DM
4937 * This performs the initial preparations necessary to iterate
4938 * through the buffer. Memory is allocated, buffer recording
4939 * is disabled, and the iterator pointer is returned to the caller.
7a8e76a3 4940 *
6167c205 4941 * Disabling buffer recording prevents the reading from being
72c9ddfd
DM
4942 * corrupted. This is not a consuming read, so a producer is not
4943 * expected.
4944 *
4945 * After a sequence of ring_buffer_read_prepare calls, the user is
d611851b 4946 * expected to make at least one call to ring_buffer_read_prepare_sync.
72c9ddfd
DM
4947 * Afterwards, ring_buffer_read_start is invoked to get things going
4948 * for real.
4949 *
d611851b 4950 * This overall must be paired with ring_buffer_read_finish.
7a8e76a3
SR
4951 */
4952struct ring_buffer_iter *
13292494 4953ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
7a8e76a3
SR
4954{
4955 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 4956 struct ring_buffer_iter *iter;
7a8e76a3 4957
9e01c1b7 4958 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4959 return NULL;
7a8e76a3 4960
785888c5 4961 iter = kzalloc(sizeof(*iter), flags);
7a8e76a3 4962 if (!iter)
8aabee57 4963 return NULL;
7a8e76a3 4964
785888c5
SRV
4965 iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
4966 if (!iter->event) {
4967 kfree(iter);
4968 return NULL;
4969 }
4970
7a8e76a3
SR
4971 cpu_buffer = buffer->buffers[cpu];
4972
4973 iter->cpu_buffer = cpu_buffer;
4974
07b8b10e 4975 atomic_inc(&cpu_buffer->resize_disabled);
72c9ddfd
DM
4976
4977 return iter;
4978}
4979EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4980
4981/**
4982 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4983 *
4984 * All previously invoked ring_buffer_read_prepare calls to prepare
4985 * iterators will be synchronized. Afterwards, read_buffer_read_start
4986 * calls on those iterators are allowed.
4987 */
4988void
4989ring_buffer_read_prepare_sync(void)
4990{
74401729 4991 synchronize_rcu();
72c9ddfd
DM
4992}
4993EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4994
4995/**
4996 * ring_buffer_read_start - start a non consuming read of the buffer
4997 * @iter: The iterator returned by ring_buffer_read_prepare
4998 *
4999 * This finalizes the startup of an iteration through the buffer.
5000 * The iterator comes from a call to ring_buffer_read_prepare and
5001 * an intervening ring_buffer_read_prepare_sync must have been
5002 * performed.
5003 *
d611851b 5004 * Must be paired with ring_buffer_read_finish.
72c9ddfd
DM
5005 */
5006void
5007ring_buffer_read_start(struct ring_buffer_iter *iter)
5008{
5009 struct ring_buffer_per_cpu *cpu_buffer;
5010 unsigned long flags;
5011
5012 if (!iter)
5013 return;
5014
5015 cpu_buffer = iter->cpu_buffer;
7a8e76a3 5016
5389f6fa 5017 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
0199c4e6 5018 arch_spin_lock(&cpu_buffer->lock);
642edba5 5019 rb_iter_reset(iter);
0199c4e6 5020 arch_spin_unlock(&cpu_buffer->lock);
5389f6fa 5021 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 5022}
c4f50183 5023EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
5024
5025/**
d611851b 5026 * ring_buffer_read_finish - finish reading the iterator of the buffer
7a8e76a3
SR
5027 * @iter: The iterator retrieved by ring_buffer_start
5028 *
5029 * This re-enables the recording to the buffer, and frees the
5030 * iterator.
5031 */
5032void
5033ring_buffer_read_finish(struct ring_buffer_iter *iter)
5034{
5035 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
9366c1ba 5036 unsigned long flags;
7a8e76a3 5037
659f451f
SR
5038 /*
5039 * Ring buffer is disabled from recording, here's a good place
9366c1ba
SR
5040 * to check the integrity of the ring buffer.
5041 * Must prevent readers from trying to read, as the check
5042 * clears the HEAD page and readers require it.
659f451f 5043 */
9366c1ba 5044 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
659f451f 5045 rb_check_pages(cpu_buffer);
9366c1ba 5046 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
659f451f 5047
07b8b10e 5048 atomic_dec(&cpu_buffer->resize_disabled);
785888c5 5049 kfree(iter->event);
7a8e76a3
SR
5050 kfree(iter);
5051}
c4f50183 5052EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
5053
5054/**
bc1a72af 5055 * ring_buffer_iter_advance - advance the iterator to the next location
7a8e76a3 5056 * @iter: The ring buffer iterator
7a8e76a3 5057 *
bc1a72af
SRV
5058 * Move the location of the iterator such that the next read will
5059 * be the next location of the iterator.
7a8e76a3 5060 */
bc1a72af 5061void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
7a8e76a3 5062{
f83c9d0f
SR
5063 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5064 unsigned long flags;
7a8e76a3 5065
5389f6fa 5066 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7e9391cf 5067
7a8e76a3
SR
5068 rb_advance_iter(iter);
5069
bc1a72af 5070 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 5071}
bc1a72af 5072EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
7a8e76a3
SR
5073
5074/**
5075 * ring_buffer_size - return the size of the ring buffer (in bytes)
5076 * @buffer: The ring buffer.
59e7cffe 5077 * @cpu: The CPU to get ring buffer size from.
7a8e76a3 5078 */
13292494 5079unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
7a8e76a3 5080{
438ced17
VN
5081 /*
5082 * Earlier, this method returned
5083 * BUF_PAGE_SIZE * buffer->nr_pages
5084 * Since the nr_pages field is now removed, we have converted this to
5085 * return the per cpu buffer value.
5086 */
5087 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5088 return 0;
5089
5090 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
7a8e76a3 5091}
c4f50183 5092EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
5093
5094static void
5095rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5096{
77ae365e
SR
5097 rb_head_page_deactivate(cpu_buffer);
5098
7a8e76a3 5099 cpu_buffer->head_page
3adc54fa 5100 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 5101 local_set(&cpu_buffer->head_page->write, 0);
778c55d4 5102 local_set(&cpu_buffer->head_page->entries, 0);
abc9b56d 5103 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 5104
6f807acd 5105 cpu_buffer->head_page->read = 0;
bf41a158
SR
5106
5107 cpu_buffer->tail_page = cpu_buffer->head_page;
5108 cpu_buffer->commit_page = cpu_buffer->head_page;
5109
5110 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5040b4b7 5111 INIT_LIST_HEAD(&cpu_buffer->new_pages);
bf41a158 5112 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 5113 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 5114 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 5115 cpu_buffer->reader_page->read = 0;
7a8e76a3 5116
c64e148a 5117 local_set(&cpu_buffer->entries_bytes, 0);
77ae365e 5118 local_set(&cpu_buffer->overrun, 0);
884bfe89
SP
5119 local_set(&cpu_buffer->commit_overrun, 0);
5120 local_set(&cpu_buffer->dropped_events, 0);
e4906eff 5121 local_set(&cpu_buffer->entries, 0);
fa743953
SR
5122 local_set(&cpu_buffer->committing, 0);
5123 local_set(&cpu_buffer->commits, 0);
2c2b0a78
SRV
5124 local_set(&cpu_buffer->pages_touched, 0);
5125 local_set(&cpu_buffer->pages_read, 0);
03329f99 5126 cpu_buffer->last_pages_touch = 0;
2c2b0a78 5127 cpu_buffer->shortest_full = 0;
77ae365e 5128 cpu_buffer->read = 0;
c64e148a 5129 cpu_buffer->read_bytes = 0;
69507c06 5130
10464b4a
SRV
5131 rb_time_set(&cpu_buffer->write_stamp, 0);
5132 rb_time_set(&cpu_buffer->before_stamp, 0);
77ae365e 5133
8672e494
SRV
5134 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5135
66a8cb95
SR
5136 cpu_buffer->lost_events = 0;
5137 cpu_buffer->last_overrun = 0;
5138
77ae365e 5139 rb_head_page_activate(cpu_buffer);
7a8e76a3
SR
5140}
5141
b23d7a5f
NP
5142/* Must have disabled the cpu buffer then done a synchronize_rcu */
5143static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5144{
5145 unsigned long flags;
5146
5147 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5148
5149 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5150 goto out;
5151
5152 arch_spin_lock(&cpu_buffer->lock);
5153
5154 rb_reset_cpu(cpu_buffer);
5155
5156 arch_spin_unlock(&cpu_buffer->lock);
5157
5158 out:
5159 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5160}
5161
7a8e76a3
SR
5162/**
5163 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5164 * @buffer: The ring buffer to reset a per cpu buffer of
5165 * @cpu: The CPU buffer to be reset
5166 */
13292494 5167void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
5168{
5169 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
7a8e76a3 5170
9e01c1b7 5171 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 5172 return;
7a8e76a3 5173
bbeb9746
GK
5174 /* prevent another thread from changing buffer sizes */
5175 mutex_lock(&buffer->mutex);
5176
07b8b10e 5177 atomic_inc(&cpu_buffer->resize_disabled);
41ede23e
SR
5178 atomic_inc(&cpu_buffer->record_disabled);
5179
83f40318 5180 /* Make sure all commits have finished */
74401729 5181 synchronize_rcu();
83f40318 5182
b23d7a5f 5183 reset_disabled_cpu_buffer(cpu_buffer);
f83c9d0f 5184
b23d7a5f
NP
5185 atomic_dec(&cpu_buffer->record_disabled);
5186 atomic_dec(&cpu_buffer->resize_disabled);
bbeb9746
GK
5187
5188 mutex_unlock(&buffer->mutex);
b23d7a5f
NP
5189}
5190EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
41b6a95d 5191
b23d7a5f
NP
5192/**
5193 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5194 * @buffer: The ring buffer to reset a per cpu buffer of
5195 * @cpu: The CPU buffer to be reset
5196 */
5197void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5198{
5199 struct ring_buffer_per_cpu *cpu_buffer;
5200 int cpu;
7a8e76a3 5201
bbeb9746
GK
5202 /* prevent another thread from changing buffer sizes */
5203 mutex_lock(&buffer->mutex);
5204
b23d7a5f
NP
5205 for_each_online_buffer_cpu(buffer, cpu) {
5206 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 5207
b23d7a5f
NP
5208 atomic_inc(&cpu_buffer->resize_disabled);
5209 atomic_inc(&cpu_buffer->record_disabled);
5210 }
f83c9d0f 5211
b23d7a5f
NP
5212 /* Make sure all commits have finished */
5213 synchronize_rcu();
41ede23e 5214
b23d7a5f
NP
5215 for_each_online_buffer_cpu(buffer, cpu) {
5216 cpu_buffer = buffer->buffers[cpu];
5217
5218 reset_disabled_cpu_buffer(cpu_buffer);
5219
5220 atomic_dec(&cpu_buffer->record_disabled);
5221 atomic_dec(&cpu_buffer->resize_disabled);
5222 }
bbeb9746
GK
5223
5224 mutex_unlock(&buffer->mutex);
7a8e76a3
SR
5225}
5226
5227/**
5228 * ring_buffer_reset - reset a ring buffer
5229 * @buffer: The ring buffer to reset all cpu buffers
5230 */
13292494 5231void ring_buffer_reset(struct trace_buffer *buffer)
7a8e76a3 5232{
b23d7a5f 5233 struct ring_buffer_per_cpu *cpu_buffer;
7a8e76a3
SR
5234 int cpu;
5235
f245b3f2
SRV
5236 /* prevent another thread from changing buffer sizes */
5237 mutex_lock(&buffer->mutex);
5238
b23d7a5f
NP
5239 for_each_buffer_cpu(buffer, cpu) {
5240 cpu_buffer = buffer->buffers[cpu];
5241
5242 atomic_inc(&cpu_buffer->resize_disabled);
5243 atomic_inc(&cpu_buffer->record_disabled);
5244 }
5245
5246 /* Make sure all commits have finished */
5247 synchronize_rcu();
5248
5249 for_each_buffer_cpu(buffer, cpu) {
5250 cpu_buffer = buffer->buffers[cpu];
5251
5252 reset_disabled_cpu_buffer(cpu_buffer);
5253
5254 atomic_dec(&cpu_buffer->record_disabled);
5255 atomic_dec(&cpu_buffer->resize_disabled);
5256 }
f245b3f2
SRV
5257
5258 mutex_unlock(&buffer->mutex);
7a8e76a3 5259}
c4f50183 5260EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
5261
5262/**
5263 * rind_buffer_empty - is the ring buffer empty?
5264 * @buffer: The ring buffer to test
5265 */
13292494 5266bool ring_buffer_empty(struct trace_buffer *buffer)
7a8e76a3
SR
5267{
5268 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 5269 unsigned long flags;
289a5a25 5270 bool dolock;
7a8e76a3 5271 int cpu;
d4788207 5272 int ret;
7a8e76a3
SR
5273
5274 /* yes this is racy, but if you don't like the race, lock the buffer */
5275 for_each_buffer_cpu(buffer, cpu) {
5276 cpu_buffer = buffer->buffers[cpu];
8d707e8e 5277 local_irq_save(flags);
289a5a25 5278 dolock = rb_reader_lock(cpu_buffer);
d4788207 5279 ret = rb_per_cpu_empty(cpu_buffer);
289a5a25 5280 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e
SR
5281 local_irq_restore(flags);
5282
d4788207 5283 if (!ret)
3d4e204d 5284 return false;
7a8e76a3 5285 }
554f786e 5286
3d4e204d 5287 return true;
7a8e76a3 5288}
c4f50183 5289EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
5290
5291/**
5292 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5293 * @buffer: The ring buffer
5294 * @cpu: The CPU buffer to test
5295 */
13292494 5296bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
7a8e76a3
SR
5297{
5298 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 5299 unsigned long flags;
289a5a25 5300 bool dolock;
8aabee57 5301 int ret;
7a8e76a3 5302
9e01c1b7 5303 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3d4e204d 5304 return true;
7a8e76a3
SR
5305
5306 cpu_buffer = buffer->buffers[cpu];
8d707e8e 5307 local_irq_save(flags);
289a5a25 5308 dolock = rb_reader_lock(cpu_buffer);
554f786e 5309 ret = rb_per_cpu_empty(cpu_buffer);
289a5a25 5310 rb_reader_unlock(cpu_buffer, dolock);
8d707e8e 5311 local_irq_restore(flags);
554f786e
SR
5312
5313 return ret;
7a8e76a3 5314}
c4f50183 5315EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3 5316
85bac32c 5317#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
7a8e76a3
SR
5318/**
5319 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5320 * @buffer_a: One buffer to swap with
5321 * @buffer_b: The other buffer to swap with
59e7cffe 5322 * @cpu: the CPU of the buffers to swap
7a8e76a3
SR
5323 *
5324 * This function is useful for tracers that want to take a "snapshot"
5325 * of a CPU buffer and has another back up buffer lying around.
5326 * it is expected that the tracer handles the cpu buffer not being
5327 * used at the moment.
5328 */
13292494
SRV
5329int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5330 struct trace_buffer *buffer_b, int cpu)
7a8e76a3
SR
5331{
5332 struct ring_buffer_per_cpu *cpu_buffer_a;
5333 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
5334 int ret = -EINVAL;
5335
9e01c1b7
RR
5336 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5337 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 5338 goto out;
7a8e76a3 5339
438ced17
VN
5340 cpu_buffer_a = buffer_a->buffers[cpu];
5341 cpu_buffer_b = buffer_b->buffers[cpu];
5342
7a8e76a3 5343 /* At least make sure the two buffers are somewhat the same */
438ced17 5344 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
554f786e
SR
5345 goto out;
5346
5347 ret = -EAGAIN;
7a8e76a3 5348
97b17efe 5349 if (atomic_read(&buffer_a->record_disabled))
554f786e 5350 goto out;
97b17efe
SR
5351
5352 if (atomic_read(&buffer_b->record_disabled))
554f786e 5353 goto out;
97b17efe 5354
97b17efe 5355 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 5356 goto out;
97b17efe
SR
5357
5358 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 5359 goto out;
97b17efe 5360
7a8e76a3 5361 /*
74401729 5362 * We can't do a synchronize_rcu here because this
7a8e76a3
SR
5363 * function can be called in atomic context.
5364 * Normally this will be called from the same CPU as cpu.
5365 * If not it's up to the caller to protect this.
5366 */
5367 atomic_inc(&cpu_buffer_a->record_disabled);
5368 atomic_inc(&cpu_buffer_b->record_disabled);
5369
98277991
SR
5370 ret = -EBUSY;
5371 if (local_read(&cpu_buffer_a->committing))
5372 goto out_dec;
5373 if (local_read(&cpu_buffer_b->committing))
5374 goto out_dec;
5375
7a8e76a3
SR
5376 buffer_a->buffers[cpu] = cpu_buffer_b;
5377 buffer_b->buffers[cpu] = cpu_buffer_a;
5378
5379 cpu_buffer_b->buffer = buffer_a;
5380 cpu_buffer_a->buffer = buffer_b;
5381
98277991
SR
5382 ret = 0;
5383
5384out_dec:
7a8e76a3
SR
5385 atomic_dec(&cpu_buffer_a->record_disabled);
5386 atomic_dec(&cpu_buffer_b->record_disabled);
554f786e 5387out:
554f786e 5388 return ret;
7a8e76a3 5389}
c4f50183 5390EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
85bac32c 5391#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
7a8e76a3 5392
8789a9e7
SR
5393/**
5394 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5395 * @buffer: the buffer to allocate for.
d611851b 5396 * @cpu: the cpu buffer to allocate.
8789a9e7
SR
5397 *
5398 * This function is used in conjunction with ring_buffer_read_page.
5399 * When reading a full page from the ring buffer, these functions
5400 * can be used to speed up the process. The calling function should
5401 * allocate a few pages first with this function. Then when it
5402 * needs to get pages from the ring buffer, it passes the result
5403 * of this function into ring_buffer_read_page, which will swap
5404 * the page that was allocated, with the read page of the buffer.
5405 *
5406 * Returns:
a7e52ad7 5407 * The page allocated, or ERR_PTR
8789a9e7 5408 */
13292494 5409void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
8789a9e7 5410{
a7e52ad7 5411 struct ring_buffer_per_cpu *cpu_buffer;
73a757e6
SRV
5412 struct buffer_data_page *bpage = NULL;
5413 unsigned long flags;
7ea59064 5414 struct page *page;
8789a9e7 5415
a7e52ad7
SRV
5416 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5417 return ERR_PTR(-ENODEV);
5418
5419 cpu_buffer = buffer->buffers[cpu];
73a757e6
SRV
5420 local_irq_save(flags);
5421 arch_spin_lock(&cpu_buffer->lock);
5422
5423 if (cpu_buffer->free_page) {
5424 bpage = cpu_buffer->free_page;
5425 cpu_buffer->free_page = NULL;
5426 }
5427
5428 arch_spin_unlock(&cpu_buffer->lock);
5429 local_irq_restore(flags);
5430
5431 if (bpage)
5432 goto out;
5433
d7ec4bfe
VN
5434 page = alloc_pages_node(cpu_to_node(cpu),
5435 GFP_KERNEL | __GFP_NORETRY, 0);
7ea59064 5436 if (!page)
a7e52ad7 5437 return ERR_PTR(-ENOMEM);
8789a9e7 5438
7ea59064 5439 bpage = page_address(page);
8789a9e7 5440
73a757e6 5441 out:
ef7a4a16
SR
5442 rb_init_page(bpage);
5443
044fa782 5444 return bpage;
8789a9e7 5445}
d6ce96da 5446EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
5447
5448/**
5449 * ring_buffer_free_read_page - free an allocated read page
5450 * @buffer: the buffer the page was allocate for
73a757e6 5451 * @cpu: the cpu buffer the page came from
8789a9e7
SR
5452 * @data: the page to free
5453 *
5454 * Free a page allocated from ring_buffer_alloc_read_page.
5455 */
13292494 5456void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
8789a9e7 5457{
73a757e6
SRV
5458 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5459 struct buffer_data_page *bpage = data;
ae415fa4 5460 struct page *page = virt_to_page(bpage);
73a757e6
SRV
5461 unsigned long flags;
5462
ae415fa4
SRV
5463 /* If the page is still in use someplace else, we can't reuse it */
5464 if (page_ref_count(page) > 1)
5465 goto out;
5466
73a757e6
SRV
5467 local_irq_save(flags);
5468 arch_spin_lock(&cpu_buffer->lock);
5469
5470 if (!cpu_buffer->free_page) {
5471 cpu_buffer->free_page = bpage;
5472 bpage = NULL;
5473 }
5474
5475 arch_spin_unlock(&cpu_buffer->lock);
5476 local_irq_restore(flags);
5477
ae415fa4 5478 out:
73a757e6 5479 free_page((unsigned long)bpage);
8789a9e7 5480}
d6ce96da 5481EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
5482
5483/**
5484 * ring_buffer_read_page - extract a page from the ring buffer
5485 * @buffer: buffer to extract from
5486 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 5487 * @len: amount to extract
8789a9e7
SR
5488 * @cpu: the cpu of the buffer to extract
5489 * @full: should the extraction only happen when the page is full.
5490 *
5491 * This function will pull out a page from the ring buffer and consume it.
5492 * @data_page must be the address of the variable that was returned
5493 * from ring_buffer_alloc_read_page. This is because the page might be used
5494 * to swap with a page in the ring buffer.
5495 *
5496 * for example:
d611851b 5497 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
a7e52ad7
SRV
5498 * if (IS_ERR(rpage))
5499 * return PTR_ERR(rpage);
ef7a4a16 5500 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
5501 * if (ret >= 0)
5502 * process_page(rpage, ret);
8789a9e7
SR
5503 *
5504 * When @full is set, the function will not return true unless
5505 * the writer is off the reader page.
5506 *
5507 * Note: it is up to the calling functions to handle sleeps and wakeups.
5508 * The ring buffer can be used anywhere in the kernel and can not
5509 * blindly call wake_up. The layer that uses the ring buffer must be
5510 * responsible for that.
5511 *
5512 * Returns:
667d2412
LJ
5513 * >=0 if data has been transferred, returns the offset of consumed data.
5514 * <0 if no data has been transferred.
8789a9e7 5515 */
13292494 5516int ring_buffer_read_page(struct trace_buffer *buffer,
ef7a4a16 5517 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
5518{
5519 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5520 struct ring_buffer_event *event;
044fa782 5521 struct buffer_data_page *bpage;
ef7a4a16 5522 struct buffer_page *reader;
ff0ff84a 5523 unsigned long missed_events;
8789a9e7 5524 unsigned long flags;
ef7a4a16 5525 unsigned int commit;
667d2412 5526 unsigned int read;
4f3640f8 5527 u64 save_timestamp;
667d2412 5528 int ret = -1;
8789a9e7 5529
554f786e
SR
5530 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5531 goto out;
5532
474d32b6
SR
5533 /*
5534 * If len is not big enough to hold the page header, then
5535 * we can not copy anything.
5536 */
5537 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 5538 goto out;
474d32b6
SR
5539
5540 len -= BUF_PAGE_HDR_SIZE;
5541
8789a9e7 5542 if (!data_page)
554f786e 5543 goto out;
8789a9e7 5544
044fa782
SR
5545 bpage = *data_page;
5546 if (!bpage)
554f786e 5547 goto out;
8789a9e7 5548
5389f6fa 5549 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
8789a9e7 5550
ef7a4a16
SR
5551 reader = rb_get_reader_page(cpu_buffer);
5552 if (!reader)
554f786e 5553 goto out_unlock;
8789a9e7 5554
ef7a4a16
SR
5555 event = rb_reader_event(cpu_buffer);
5556
5557 read = reader->read;
5558 commit = rb_page_commit(reader);
667d2412 5559
66a8cb95 5560 /* Check if any events were dropped */
ff0ff84a 5561 missed_events = cpu_buffer->lost_events;
66a8cb95 5562
8789a9e7 5563 /*
474d32b6
SR
5564 * If this page has been partially read or
5565 * if len is not big enough to read the rest of the page or
5566 * a writer is still on the page, then
5567 * we must copy the data from the page to the buffer.
5568 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 5569 */
474d32b6 5570 if (read || (len < (commit - read)) ||
ef7a4a16 5571 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 5572 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
5573 unsigned int rpos = read;
5574 unsigned int pos = 0;
ef7a4a16 5575 unsigned int size;
8789a9e7
SR
5576
5577 if (full)
554f786e 5578 goto out_unlock;
8789a9e7 5579
ef7a4a16
SR
5580 if (len > (commit - read))
5581 len = (commit - read);
5582
69d1b839
SR
5583 /* Always keep the time extend and data together */
5584 size = rb_event_ts_length(event);
ef7a4a16
SR
5585
5586 if (len < size)
554f786e 5587 goto out_unlock;
ef7a4a16 5588
4f3640f8
SR
5589 /* save the current timestamp, since the user will need it */
5590 save_timestamp = cpu_buffer->read_stamp;
5591
ef7a4a16
SR
5592 /* Need to copy one event at a time */
5593 do {
e1e35927
DS
5594 /* We need the size of one event, because
5595 * rb_advance_reader only advances by one event,
5596 * whereas rb_event_ts_length may include the size of
5597 * one or two events.
5598 * We have already ensured there's enough space if this
5599 * is a time extend. */
5600 size = rb_event_length(event);
474d32b6 5601 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
5602
5603 len -= size;
5604
5605 rb_advance_reader(cpu_buffer);
474d32b6
SR
5606 rpos = reader->read;
5607 pos += size;
ef7a4a16 5608
18fab912
HY
5609 if (rpos >= commit)
5610 break;
5611
ef7a4a16 5612 event = rb_reader_event(cpu_buffer);
69d1b839
SR
5613 /* Always keep the time extend and data together */
5614 size = rb_event_ts_length(event);
e1e35927 5615 } while (len >= size);
667d2412
LJ
5616
5617 /* update bpage */
ef7a4a16 5618 local_set(&bpage->commit, pos);
4f3640f8 5619 bpage->time_stamp = save_timestamp;
ef7a4a16 5620
474d32b6
SR
5621 /* we copied everything to the beginning */
5622 read = 0;
8789a9e7 5623 } else {
afbab76a 5624 /* update the entry counter */
77ae365e 5625 cpu_buffer->read += rb_page_entries(reader);
c64e148a 5626 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
afbab76a 5627
8789a9e7 5628 /* swap the pages */
044fa782 5629 rb_init_page(bpage);
ef7a4a16
SR
5630 bpage = reader->page;
5631 reader->page = *data_page;
5632 local_set(&reader->write, 0);
778c55d4 5633 local_set(&reader->entries, 0);
ef7a4a16 5634 reader->read = 0;
044fa782 5635 *data_page = bpage;
ff0ff84a
SR
5636
5637 /*
5638 * Use the real_end for the data size,
5639 * This gives us a chance to store the lost events
5640 * on the page.
5641 */
5642 if (reader->real_end)
5643 local_set(&bpage->commit, reader->real_end);
8789a9e7 5644 }
667d2412 5645 ret = read;
8789a9e7 5646
66a8cb95 5647 cpu_buffer->lost_events = 0;
2711ca23
SR
5648
5649 commit = local_read(&bpage->commit);
66a8cb95
SR
5650 /*
5651 * Set a flag in the commit field if we lost events
5652 */
ff0ff84a 5653 if (missed_events) {
ff0ff84a
SR
5654 /* If there is room at the end of the page to save the
5655 * missed events, then record it there.
5656 */
5657 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5658 memcpy(&bpage->data[commit], &missed_events,
5659 sizeof(missed_events));
5660 local_add(RB_MISSED_STORED, &bpage->commit);
2711ca23 5661 commit += sizeof(missed_events);
ff0ff84a 5662 }
66a8cb95 5663 local_add(RB_MISSED_EVENTS, &bpage->commit);
ff0ff84a 5664 }
66a8cb95 5665
2711ca23
SR
5666 /*
5667 * This page may be off to user land. Zero it out here.
5668 */
5669 if (commit < BUF_PAGE_SIZE)
5670 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
5671
554f786e 5672 out_unlock:
5389f6fa 5673 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
8789a9e7 5674
554f786e 5675 out:
8789a9e7
SR
5676 return ret;
5677}
d6ce96da 5678EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 5679
b32614c0
SAS
5680/*
5681 * We only allocate new buffers, never free them if the CPU goes down.
5682 * If we were to free the buffer, then the user would lose any trace that was in
5683 * the buffer.
5684 */
5685int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
554f786e 5686{
13292494 5687 struct trace_buffer *buffer;
9b94a8fb
SRRH
5688 long nr_pages_same;
5689 int cpu_i;
5690 unsigned long nr_pages;
554f786e 5691
13292494 5692 buffer = container_of(node, struct trace_buffer, node);
b32614c0
SAS
5693 if (cpumask_test_cpu(cpu, buffer->cpumask))
5694 return 0;
5695
5696 nr_pages = 0;
5697 nr_pages_same = 1;
5698 /* check if all cpu sizes are same */
5699 for_each_buffer_cpu(buffer, cpu_i) {
5700 /* fill in the size from first enabled cpu */
5701 if (nr_pages == 0)
5702 nr_pages = buffer->buffers[cpu_i]->nr_pages;
5703 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
5704 nr_pages_same = 0;
5705 break;
554f786e 5706 }
554f786e 5707 }
b32614c0
SAS
5708 /* allocate minimum pages, user can later expand it */
5709 if (!nr_pages_same)
5710 nr_pages = 2;
5711 buffer->buffers[cpu] =
5712 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
5713 if (!buffer->buffers[cpu]) {
5714 WARN(1, "failed to allocate ring buffer on CPU %u\n",
5715 cpu);
5716 return -ENOMEM;
5717 }
5718 smp_wmb();
5719 cpumask_set_cpu(cpu, buffer->cpumask);
5720 return 0;
554f786e 5721}
6c43e554
SRRH
5722
5723#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
5724/*
5725 * This is a basic integrity check of the ring buffer.
5726 * Late in the boot cycle this test will run when configured in.
5727 * It will kick off a thread per CPU that will go into a loop
5728 * writing to the per cpu ring buffer various sizes of data.
5729 * Some of the data will be large items, some small.
5730 *
5731 * Another thread is created that goes into a spin, sending out
5732 * IPIs to the other CPUs to also write into the ring buffer.
5733 * this is to test the nesting ability of the buffer.
5734 *
5735 * Basic stats are recorded and reported. If something in the
5736 * ring buffer should happen that's not expected, a big warning
5737 * is displayed and all ring buffers are disabled.
5738 */
5739static struct task_struct *rb_threads[NR_CPUS] __initdata;
5740
5741struct rb_test_data {
13292494 5742 struct trace_buffer *buffer;
6c43e554
SRRH
5743 unsigned long events;
5744 unsigned long bytes_written;
5745 unsigned long bytes_alloc;
5746 unsigned long bytes_dropped;
5747 unsigned long events_nested;
5748 unsigned long bytes_written_nested;
5749 unsigned long bytes_alloc_nested;
5750 unsigned long bytes_dropped_nested;
5751 int min_size_nested;
5752 int max_size_nested;
5753 int max_size;
5754 int min_size;
5755 int cpu;
5756 int cnt;
5757};
5758
5759static struct rb_test_data rb_data[NR_CPUS] __initdata;
5760
5761/* 1 meg per cpu */
5762#define RB_TEST_BUFFER_SIZE 1048576
5763
5764static char rb_string[] __initdata =
5765 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
5766 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
5767 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
5768
5769static bool rb_test_started __initdata;
5770
5771struct rb_item {
5772 int size;
5773 char str[];
5774};
5775
5776static __init int rb_write_something(struct rb_test_data *data, bool nested)
5777{
5778 struct ring_buffer_event *event;
5779 struct rb_item *item;
5780 bool started;
5781 int event_len;
5782 int size;
5783 int len;
5784 int cnt;
5785
5786 /* Have nested writes different that what is written */
5787 cnt = data->cnt + (nested ? 27 : 0);
5788
5789 /* Multiply cnt by ~e, to make some unique increment */
40ed29b3 5790 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6c43e554
SRRH
5791
5792 len = size + sizeof(struct rb_item);
5793
5794 started = rb_test_started;
5795 /* read rb_test_started before checking buffer enabled */
5796 smp_rmb();
5797
5798 event = ring_buffer_lock_reserve(data->buffer, len);
5799 if (!event) {
5800 /* Ignore dropped events before test starts. */
5801 if (started) {
5802 if (nested)
5803 data->bytes_dropped += len;
5804 else
5805 data->bytes_dropped_nested += len;
5806 }
5807 return len;
5808 }
5809
5810 event_len = ring_buffer_event_length(event);
5811
5812 if (RB_WARN_ON(data->buffer, event_len < len))
5813 goto out;
5814
5815 item = ring_buffer_event_data(event);
5816 item->size = size;
5817 memcpy(item->str, rb_string, size);
5818
5819 if (nested) {
5820 data->bytes_alloc_nested += event_len;
5821 data->bytes_written_nested += len;
5822 data->events_nested++;
5823 if (!data->min_size_nested || len < data->min_size_nested)
5824 data->min_size_nested = len;
5825 if (len > data->max_size_nested)
5826 data->max_size_nested = len;
5827 } else {
5828 data->bytes_alloc += event_len;
5829 data->bytes_written += len;
5830 data->events++;
5831 if (!data->min_size || len < data->min_size)
5832 data->max_size = len;
5833 if (len > data->max_size)
5834 data->max_size = len;
5835 }
5836
5837 out:
5838 ring_buffer_unlock_commit(data->buffer, event);
5839
5840 return 0;
5841}
5842
5843static __init int rb_test(void *arg)
5844{
5845 struct rb_test_data *data = arg;
5846
5847 while (!kthread_should_stop()) {
5848 rb_write_something(data, false);
5849 data->cnt++;
5850
5851 set_current_state(TASK_INTERRUPTIBLE);
5852 /* Now sleep between a min of 100-300us and a max of 1ms */
5853 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5854 }
5855
5856 return 0;
5857}
5858
5859static __init void rb_ipi(void *ignore)
5860{
5861 struct rb_test_data *data;
5862 int cpu = smp_processor_id();
5863
5864 data = &rb_data[cpu];
5865 rb_write_something(data, true);
5866}
5867
5868static __init int rb_hammer_test(void *arg)
5869{
5870 while (!kthread_should_stop()) {
5871
5872 /* Send an IPI to all cpus to write data! */
5873 smp_call_function(rb_ipi, NULL, 1);
5874 /* No sleep, but for non preempt, let others run */
5875 schedule();
5876 }
5877
5878 return 0;
5879}
5880
5881static __init int test_ringbuffer(void)
5882{
5883 struct task_struct *rb_hammer;
13292494 5884 struct trace_buffer *buffer;
6c43e554
SRRH
5885 int cpu;
5886 int ret = 0;
5887
a356646a 5888 if (security_locked_down(LOCKDOWN_TRACEFS)) {
ee195452 5889 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
a356646a
SRV
5890 return 0;
5891 }
5892
6c43e554
SRRH
5893 pr_info("Running ring buffer tests...\n");
5894
5895 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5896 if (WARN_ON(!buffer))
5897 return 0;
5898
5899 /* Disable buffer so that threads can't write to it yet */
5900 ring_buffer_record_off(buffer);
5901
5902 for_each_online_cpu(cpu) {
5903 rb_data[cpu].buffer = buffer;
5904 rb_data[cpu].cpu = cpu;
5905 rb_data[cpu].cnt = cpu;
5906 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5907 "rbtester/%d", cpu);
62277de7 5908 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6c43e554 5909 pr_cont("FAILED\n");
62277de7 5910 ret = PTR_ERR(rb_threads[cpu]);
6c43e554
SRRH
5911 goto out_free;
5912 }
5913
5914 kthread_bind(rb_threads[cpu], cpu);
5915 wake_up_process(rb_threads[cpu]);
5916 }
5917
5918 /* Now create the rb hammer! */
5919 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
62277de7 5920 if (WARN_ON(IS_ERR(rb_hammer))) {
6c43e554 5921 pr_cont("FAILED\n");
62277de7 5922 ret = PTR_ERR(rb_hammer);
6c43e554
SRRH
5923 goto out_free;
5924 }
5925
5926 ring_buffer_record_on(buffer);
5927 /*
5928 * Show buffer is enabled before setting rb_test_started.
5929 * Yes there's a small race window where events could be
5930 * dropped and the thread wont catch it. But when a ring
5931 * buffer gets enabled, there will always be some kind of
5932 * delay before other CPUs see it. Thus, we don't care about
5933 * those dropped events. We care about events dropped after
5934 * the threads see that the buffer is active.
5935 */
5936 smp_wmb();
5937 rb_test_started = true;
5938
5939 set_current_state(TASK_INTERRUPTIBLE);
5940 /* Just run for 10 seconds */;
5941 schedule_timeout(10 * HZ);
5942
5943 kthread_stop(rb_hammer);
5944
5945 out_free:
5946 for_each_online_cpu(cpu) {
5947 if (!rb_threads[cpu])
5948 break;
5949 kthread_stop(rb_threads[cpu]);
5950 }
5951 if (ret) {
5952 ring_buffer_free(buffer);
5953 return ret;
5954 }
5955
5956 /* Report! */
5957 pr_info("finished\n");
5958 for_each_online_cpu(cpu) {
5959 struct ring_buffer_event *event;
5960 struct rb_test_data *data = &rb_data[cpu];
5961 struct rb_item *item;
5962 unsigned long total_events;
5963 unsigned long total_dropped;
5964 unsigned long total_written;
5965 unsigned long total_alloc;
5966 unsigned long total_read = 0;
5967 unsigned long total_size = 0;
5968 unsigned long total_len = 0;
5969 unsigned long total_lost = 0;
5970 unsigned long lost;
5971 int big_event_size;
5972 int small_event_size;
5973
5974 ret = -1;
5975
5976 total_events = data->events + data->events_nested;
5977 total_written = data->bytes_written + data->bytes_written_nested;
5978 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5979 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5980
5981 big_event_size = data->max_size + data->max_size_nested;
5982 small_event_size = data->min_size + data->min_size_nested;
5983
5984 pr_info("CPU %d:\n", cpu);
5985 pr_info(" events: %ld\n", total_events);
5986 pr_info(" dropped bytes: %ld\n", total_dropped);
5987 pr_info(" alloced bytes: %ld\n", total_alloc);
5988 pr_info(" written bytes: %ld\n", total_written);
5989 pr_info(" biggest event: %d\n", big_event_size);
5990 pr_info(" smallest event: %d\n", small_event_size);
5991
5992 if (RB_WARN_ON(buffer, total_dropped))
5993 break;
5994
5995 ret = 0;
5996
5997 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5998 total_lost += lost;
5999 item = ring_buffer_event_data(event);
6000 total_len += ring_buffer_event_length(event);
6001 total_size += item->size + sizeof(struct rb_item);
6002 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6003 pr_info("FAILED!\n");
6004 pr_info("buffer had: %.*s\n", item->size, item->str);
6005 pr_info("expected: %.*s\n", item->size, rb_string);
6006 RB_WARN_ON(buffer, 1);
6007 ret = -1;
6008 break;
6009 }
6010 total_read++;
6011 }
6012 if (ret)
6013 break;
6014
6015 ret = -1;
6016
6017 pr_info(" read events: %ld\n", total_read);
6018 pr_info(" lost events: %ld\n", total_lost);
6019 pr_info(" total events: %ld\n", total_lost + total_read);
6020 pr_info(" recorded len bytes: %ld\n", total_len);
6021 pr_info(" recorded size bytes: %ld\n", total_size);
6022 if (total_lost)
6023 pr_info(" With dropped events, record len and size may not match\n"
6024 " alloced and written from above\n");
6025 if (!total_lost) {
6026 if (RB_WARN_ON(buffer, total_len != total_alloc ||
6027 total_size != total_written))
6028 break;
6029 }
6030 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6031 break;
6032
6033 ret = 0;
6034 }
6035 if (!ret)
6036 pr_info("Ring buffer PASSED!\n");
6037
6038 ring_buffer_free(buffer);
6039 return 0;
6040}
6041
6042late_initcall(test_ringbuffer);
6043#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */