#include "trace.h"
++ /* Global flag to disable all recording to ring buffers */
++ static int ring_buffers_off __read_mostly;
++
++ /**
++ * tracing_on - enable all tracing buffers
++ *
++ * This function enables all tracing buffers that may have been
++ * disabled with tracing_off.
++ */
++ void tracing_on(void)
++ {
++ ring_buffers_off = 0;
++ }
++
++ /**
++ * tracing_off - turn off all tracing buffers
++ *
++ * This function stops all tracing buffers from recording data.
++ * It does not disable any overhead the tracers themselves may
++ * be causing. This function simply causes all recording to
++ * the ring buffers to fail.
++ */
++ void tracing_off(void)
++ {
++ ring_buffers_off = 1;
++ }
++
+++#include "trace.h"
+++
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
- spinlock_t lock;
+ + spinlock_t reader_lock; /* serialize readers */
+ raw_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head pages;
struct buffer_page *head_page; /* read from head */
u64 read_stamp;
};
+/* buffer may be either ring_buffer or ring_buffer_per_cpu */
#define RB_WARN_ON(buffer, cond) \
- do { \
- if (unlikely(cond)) { \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- } \
- } while (0)
-
-#define RB_WARN_ON_RET(buffer, cond) \
- - do { \
- - if (unlikely(cond)) { \
+ + ({ \
+ + int _____ret = unlikely(cond); \
+ + if (_____ret) { \
atomic_inc(&buffer->record_disabled); \
WARN_ON(1); \
- return -1; \
} \
- } while (0)
-
- #define RB_WARN_ON_RET(buffer, cond) \
- do { \
- if (unlikely(cond)) { \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- return; \
- } \
- } while (0)
-
- #define RB_WARN_ON_RET_INT(buffer, cond) \
- do { \
- if (unlikely(cond)) { \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- return -1; \
- } \
- } while (0)
-
- #define RB_WARN_ON_RET_NULL(buffer, cond) \
- do { \
- if (unlikely(cond)) { \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- return NULL; \
- } \
- - } while (0)
- -
- -#define RB_WARN_ON_ONCE(buffer, cond) \
- - do { \
- - static int once; \
- - if (unlikely(cond) && !once) { \
- - once++; \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- } \
- } while (0)
-
- /* buffer must be ring_buffer not per_cpu */
- #define RB_WARN_ON_UNLOCK(buffer, cond) \
- do { \
- if (unlikely(cond)) { \
- mutex_unlock(&buffer->mutex); \
- - atomic_inc(&buffer->record_disabled); \
- - WARN_ON(1); \
- return -1; \
- - } \
- - } while (0)
+ + _____ret; \
+ + })
/**
* check_pages - integrity check of buffer pages
struct list_head *head = &cpu_buffer->pages;
struct buffer_page *page, *tmp;
- RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head);
- RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head);
- RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
- RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
+ + if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
+ + return -1;
+ + if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
+ + return -1;
list_for_each_entry_safe(page, tmp, head, list) {
- RB_WARN_ON_RET_INT(cpu_buffer,
- RB_WARN_ON_RET(cpu_buffer,
- - page->list.next->prev != &page->list);
- RB_WARN_ON_RET_INT(cpu_buffer,
- RB_WARN_ON_RET(cpu_buffer,
- - page->list.prev->next != &page->list);
+ + if (RB_WARN_ON(cpu_buffer,
+ + page->list.next->prev != &page->list))
+ + return -1;
+ + if (RB_WARN_ON(cpu_buffer,
+ + page->list.prev->next != &page->list))
+ + return -1;
}
return 0;
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
- spin_lock_init(&cpu_buffer->lock);
+ + spin_lock_init(&cpu_buffer->reader_lock);
+ cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);
page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
synchronize_sched();
for (i = 0; i < nr_pages; i++) {
- RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
- BUG_ON(list_empty(&cpu_buffer->pages));
+ + if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+ + return;
p = cpu_buffer->pages.next;
page = list_entry(p, struct buffer_page, list);
list_del_init(&page->list);
free_buffer_page(page);
}
- RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
- BUG_ON(list_empty(&cpu_buffer->pages));
+ + if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+ + return;
rb_reset_cpu(cpu_buffer);
synchronize_sched();
for (i = 0; i < nr_pages; i++) {
- RB_WARN_ON_RET(cpu_buffer, list_empty(pages));
- BUG_ON(list_empty(pages));
+ + if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
+ + return;
p = pages->next;
page = list_entry(p, struct buffer_page, list);
list_del_init(&page->list);
if (size < buffer_size) {
/* easy case, just free pages */
- RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages);
- BUG_ON(nr_pages >= buffer->pages);
+ + if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
+ + mutex_unlock(&buffer->mutex);
+ + return -1;
+ + }
rm_pages = buffer->pages - nr_pages;
* add these pages to the cpu_buffers. Otherwise we just free
* them all and return -ENOMEM;
*/
- RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages);
- BUG_ON(nr_pages <= buffer->pages);
+ + if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
+ + mutex_unlock(&buffer->mutex);
+ + return -1;
+ + }
+
new_pages = nr_pages - buffer->pages;
for_each_buffer_cpu(buffer, cpu) {
rb_insert_pages(cpu_buffer, &pages, new_pages);
}
- RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages));
- BUG_ON(!list_empty(&pages));
+ + if (RB_WARN_ON(buffer, !list_empty(&pages))) {
+ + mutex_unlock(&buffer->mutex);
+ + return -1;
+ + }
out:
buffer->pages = nr_pages;
head += rb_event_length(event)) {
event = __rb_page_index(cpu_buffer->head_page, head);
- RB_WARN_ON_RET(cpu_buffer, rb_null_event(event));
- BUG_ON(rb_null_event(event));
+ + if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
+ + return;
/* Only count data entries */
if (event->type != RINGBUF_TYPE_DATA)
continue;
/* We reserved something on the buffer */
- RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE);
- BUG_ON(write > BUF_PAGE_SIZE);
+ + if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
+ + return NULL;
event = __rb_page_index(tail_page, tail);
rb_update_event(event, type, length);
reader = rb_get_reader_page(cpu_buffer);
/* This function should not be called when buffer is empty */
- RB_WARN_ON_RET(cpu_buffer, !reader);
- BUG_ON(!reader);
+ + if (RB_WARN_ON(cpu_buffer, !reader))
+ + return;
event = rb_reader_event(cpu_buffer);
* Check if we are at the end of the buffer.
*/
if (iter->head >= rb_page_size(iter->head_page)) {
- RB_WARN_ON_RET(buffer,
- iter->head_page == cpu_buffer->commit_page);
- BUG_ON(iter->head_page == cpu_buffer->commit_page);
+ + if (RB_WARN_ON(buffer,
+ + iter->head_page == cpu_buffer->commit_page))
+ + return;
rb_inc_iter(iter);
return;
}
* This should not be called to advance the header if we are
* at the tail of the buffer.
*/
- RB_WARN_ON_RET(cpu_buffer,
- BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
- (iter->head + length > rb_commit_index(cpu_buffer)));
+ + if (RB_WARN_ON(cpu_buffer,
+ (iter->head_page == cpu_buffer->commit_page) &&
- (iter->head + length > rb_commit_index(cpu_buffer)));
+ + (iter->head + length > rb_commit_index(cpu_buffer))))
+ + return;
rb_update_iter_read_stamp(iter, event);
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
- local_irq_save(flags);
- spin_lock_irqsave(&cpu_buffer->lock, flags);
+ + spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ __raw_spin_lock(&cpu_buffer->lock);
ring_buffer_iter_reset(iter);
- spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+ __raw_spin_unlock(&cpu_buffer->lock);
- local_irq_restore(flags);
+ + spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
}
if (!cpu_isset(cpu, buffer->cpumask))
return;
- local_irq_save(flags);
- spin_lock_irqsave(&cpu_buffer->lock, flags);
+ + spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ +
+ __raw_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
- spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+ __raw_spin_unlock(&cpu_buffer->lock);
- local_irq_restore(flags);
+ +
+ + spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
/**