]>
Commit | Line | Data |
---|---|---|
76369139 FW |
1 | #ifndef _KERNEL_EVENTS_INTERNAL_H |
2 | #define _KERNEL_EVENTS_INTERNAL_H | |
3 | ||
9251f904 | 4 | #include <linux/hardirq.h> |
91d7753a | 5 | #include <linux/uaccess.h> |
9251f904 BP |
6 | |
7 | /* Buffer handling */ | |
8 | ||
76369139 FW |
9 | #define RING_BUFFER_WRITABLE 0x01 |
10 | ||
11 | struct ring_buffer { | |
12 | atomic_t refcount; | |
13 | struct rcu_head rcu_head; | |
14 | #ifdef CONFIG_PERF_USE_VMALLOC | |
15 | struct work_struct work; | |
16 | int page_order; /* allocation order */ | |
17 | #endif | |
18 | int nr_pages; /* nr of data pages */ | |
19 | int writable; /* are we writable */ | |
20 | ||
21 | atomic_t poll; /* POLL_ for wakeups */ | |
22 | ||
23 | local_t head; /* write position */ | |
24 | local_t nest; /* nested writers */ | |
25 | local_t events; /* event limit */ | |
26 | local_t wakeup; /* wakeup stamp */ | |
27 | local_t lost; /* nr records lost */ | |
28 | ||
29 | long watermark; /* wakeup watermark */ | |
10c6db11 PZ |
30 | /* poll crap */ |
31 | spinlock_t event_lock; | |
32 | struct list_head event_list; | |
76369139 FW |
33 | |
34 | struct perf_event_mmap_page *user_page; | |
35 | void *data_pages[0]; | |
36 | }; | |
37 | ||
76369139 FW |
38 | extern void rb_free(struct ring_buffer *rb); |
39 | extern struct ring_buffer * | |
40 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | |
41 | extern void perf_event_wakeup(struct perf_event *event); | |
42 | ||
43 | extern void | |
44 | perf_event_header__init_id(struct perf_event_header *header, | |
45 | struct perf_sample_data *data, | |
46 | struct perf_event *event); | |
47 | extern void | |
48 | perf_event__output_id_sample(struct perf_event *event, | |
49 | struct perf_output_handle *handle, | |
50 | struct perf_sample_data *sample); | |
51 | ||
52 | extern struct page * | |
53 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); | |
54 | ||
55 | #ifdef CONFIG_PERF_USE_VMALLOC | |
56 | /* | |
57 | * Back perf_mmap() with vmalloc memory. | |
58 | * | |
59 | * Required for architectures that have d-cache aliasing issues. | |
60 | */ | |
61 | ||
62 | static inline int page_order(struct ring_buffer *rb) | |
63 | { | |
64 | return rb->page_order; | |
65 | } | |
66 | ||
67 | #else | |
68 | ||
69 | static inline int page_order(struct ring_buffer *rb) | |
70 | { | |
71 | return 0; | |
72 | } | |
73 | #endif | |
74 | ||
9251f904 | 75 | static inline unsigned long perf_data_size(struct ring_buffer *rb) |
76369139 FW |
76 | { |
77 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | |
78 | } | |
79 | ||
91d7753a FW |
80 | #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ |
81 | static inline unsigned int \ | |
82 | func_name(struct perf_output_handle *handle, \ | |
83 | const void *buf, unsigned int len) \ | |
84 | { \ | |
85 | unsigned long size, written; \ | |
86 | \ | |
87 | do { \ | |
88 | size = min_t(unsigned long, handle->size, len); \ | |
89 | \ | |
90 | written = memcpy_func(handle->addr, buf, size); \ | |
91 | \ | |
92 | len -= written; \ | |
93 | handle->addr += written; \ | |
94 | buf += written; \ | |
95 | handle->size -= written; \ | |
96 | if (!handle->size) { \ | |
97 | struct ring_buffer *rb = handle->rb; \ | |
98 | \ | |
99 | handle->page++; \ | |
100 | handle->page &= rb->nr_pages - 1; \ | |
101 | handle->addr = rb->data_pages[handle->page]; \ | |
102 | handle->size = PAGE_SIZE << page_order(rb); \ | |
103 | } \ | |
104 | } while (len && written == size); \ | |
105 | \ | |
106 | return len; \ | |
107 | } | |
108 | ||
109 | static inline int memcpy_common(void *dst, const void *src, size_t n) | |
76369139 | 110 | { |
91d7753a FW |
111 | memcpy(dst, src, n); |
112 | return n; | |
76369139 FW |
113 | } |
114 | ||
91d7753a FW |
115 | DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) |
116 | ||
117 | #ifndef arch_perf_out_copy_user | |
118 | #define arch_perf_out_copy_user __copy_from_user_inatomic | |
119 | #endif | |
120 | ||
121 | DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) | |
122 | ||
9251f904 | 123 | /* Callchain handling */ |
e6dab5ff AV |
124 | extern struct perf_callchain_entry * |
125 | perf_callchain(struct perf_event *event, struct pt_regs *regs); | |
9251f904 BP |
126 | extern int get_callchain_buffers(void); |
127 | extern void put_callchain_buffers(void); | |
128 | ||
129 | static inline int get_recursion_context(int *recursion) | |
130 | { | |
131 | int rctx; | |
132 | ||
133 | if (in_nmi()) | |
134 | rctx = 3; | |
135 | else if (in_irq()) | |
136 | rctx = 2; | |
137 | else if (in_softirq()) | |
138 | rctx = 1; | |
139 | else | |
140 | rctx = 0; | |
141 | ||
142 | if (recursion[rctx]) | |
143 | return -1; | |
144 | ||
145 | recursion[rctx]++; | |
146 | barrier(); | |
147 | ||
148 | return rctx; | |
149 | } | |
150 | ||
151 | static inline void put_recursion_context(int *recursion, int rctx) | |
152 | { | |
153 | barrier(); | |
154 | recursion[rctx]--; | |
155 | } | |
156 | ||
76369139 | 157 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |