]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
76369139 FW |
2 | #ifndef _KERNEL_EVENTS_INTERNAL_H |
3 | #define _KERNEL_EVENTS_INTERNAL_H | |
4 | ||
9251f904 | 5 | #include <linux/hardirq.h> |
91d7753a | 6 | #include <linux/uaccess.h> |
fecb8ed2 | 7 | #include <linux/refcount.h> |
9251f904 BP |
8 | |
9 | /* Buffer handling */ | |
10 | ||
76369139 FW |
11 | #define RING_BUFFER_WRITABLE 0x01 |
12 | ||
56de4e8f | 13 | struct perf_buffer { |
fecb8ed2 | 14 | refcount_t refcount; |
76369139 FW |
15 | struct rcu_head rcu_head; |
16 | #ifdef CONFIG_PERF_USE_VMALLOC | |
17 | struct work_struct work; | |
18 | int page_order; /* allocation order */ | |
19 | #endif | |
20 | int nr_pages; /* nr of data pages */ | |
dd9c086d | 21 | int overwrite; /* can overwrite itself */ |
86e7972f | 22 | int paused; /* can write into ring buffer */ |
76369139 FW |
23 | |
24 | atomic_t poll; /* POLL_ for wakeups */ | |
25 | ||
26 | local_t head; /* write position */ | |
5322ea58 | 27 | unsigned int nest; /* nested writers */ |
76369139 FW |
28 | local_t events; /* event limit */ |
29 | local_t wakeup; /* wakeup stamp */ | |
30 | local_t lost; /* nr records lost */ | |
31 | ||
32 | long watermark; /* wakeup watermark */ | |
1a594131 | 33 | long aux_watermark; |
10c6db11 PZ |
34 | /* poll crap */ |
35 | spinlock_t event_lock; | |
36 | struct list_head event_list; | |
76369139 | 37 | |
9bb5d40c PZ |
38 | atomic_t mmap_count; |
39 | unsigned long mmap_locked; | |
26cb63ad PZ |
40 | struct user_struct *mmap_user; |
41 | ||
45bfb2e5 | 42 | /* AUX area */ |
2ab346cf | 43 | long aux_head; |
5322ea58 | 44 | unsigned int aux_nest; |
d9a50b02 | 45 | long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ |
45bfb2e5 PZ |
46 | unsigned long aux_pgoff; |
47 | int aux_nr_pages; | |
2023a0d2 | 48 | int aux_overwrite; |
45bfb2e5 PZ |
49 | atomic_t aux_mmap_count; |
50 | unsigned long aux_mmap_locked; | |
51 | void (*free_aux)(void *); | |
ca3bb3d0 | 52 | refcount_t aux_refcount; |
a4faf00d | 53 | int aux_in_sampling; |
45bfb2e5 PZ |
54 | void **aux_pages; |
55 | void *aux_priv; | |
56 | ||
76369139 | 57 | struct perf_event_mmap_page *user_page; |
c50c75e9 | 58 | void *data_pages[]; |
76369139 FW |
59 | }; |
60 | ||
56de4e8f | 61 | extern void rb_free(struct perf_buffer *rb); |
57ffc5ca PZ |
62 | |
63 | static inline void rb_free_rcu(struct rcu_head *rcu_head) | |
64 | { | |
56de4e8f | 65 | struct perf_buffer *rb; |
57ffc5ca | 66 | |
56de4e8f | 67 | rb = container_of(rcu_head, struct perf_buffer, rcu_head); |
57ffc5ca PZ |
68 | rb_free(rb); |
69 | } | |
70 | ||
56de4e8f | 71 | static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause) |
86e7972f WN |
72 | { |
73 | if (!pause && rb->nr_pages) | |
74 | rb->paused = 0; | |
75 | else | |
76 | rb->paused = 1; | |
77 | } | |
78 | ||
56de4e8f | 79 | extern struct perf_buffer * |
76369139 FW |
80 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); |
81 | extern void perf_event_wakeup(struct perf_event *event); | |
56de4e8f | 82 | extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, |
1a594131 | 83 | pgoff_t pgoff, int nr_pages, long watermark, int flags); |
56de4e8f SRV |
84 | extern void rb_free_aux(struct perf_buffer *rb); |
85 | extern struct perf_buffer *ring_buffer_get(struct perf_event *event); | |
86 | extern void ring_buffer_put(struct perf_buffer *rb); | |
45bfb2e5 | 87 | |
56de4e8f | 88 | static inline bool rb_has_aux(struct perf_buffer *rb) |
45bfb2e5 PZ |
89 | { |
90 | return !!rb->aux_nr_pages; | |
91 | } | |
76369139 | 92 | |
68db7e98 AS |
93 | void perf_event_aux_event(struct perf_event *event, unsigned long head, |
94 | unsigned long size, u64 flags); | |
95 | ||
76369139 | 96 | extern struct page * |
56de4e8f | 97 | perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff); |
76369139 FW |
98 | |
99 | #ifdef CONFIG_PERF_USE_VMALLOC | |
100 | /* | |
101 | * Back perf_mmap() with vmalloc memory. | |
102 | * | |
103 | * Required for architectures that have d-cache aliasing issues. | |
104 | */ | |
105 | ||
56de4e8f | 106 | static inline int page_order(struct perf_buffer *rb) |
76369139 FW |
107 | { |
108 | return rb->page_order; | |
109 | } | |
110 | ||
111 | #else | |
112 | ||
56de4e8f | 113 | static inline int page_order(struct perf_buffer *rb) |
76369139 FW |
114 | { |
115 | return 0; | |
116 | } | |
117 | #endif | |
118 | ||
56de4e8f | 119 | static inline unsigned long perf_data_size(struct perf_buffer *rb) |
76369139 FW |
120 | { |
121 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | |
122 | } | |
123 | ||
56de4e8f | 124 | static inline unsigned long perf_aux_size(struct perf_buffer *rb) |
45bfb2e5 PZ |
125 | { |
126 | return rb->aux_nr_pages << PAGE_SHIFT; | |
127 | } | |
128 | ||
aa7145c1 | 129 | #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \ |
91d7753a FW |
130 | { \ |
131 | unsigned long size, written; \ | |
132 | \ | |
133 | do { \ | |
0a196848 | 134 | size = min(handle->size, len); \ |
aa7145c1 | 135 | written = memcpy_func(__VA_ARGS__); \ |
0a196848 | 136 | written = size - written; \ |
91d7753a FW |
137 | \ |
138 | len -= written; \ | |
139 | handle->addr += written; \ | |
aa7145c1 DB |
140 | if (advance_buf) \ |
141 | buf += written; \ | |
91d7753a FW |
142 | handle->size -= written; \ |
143 | if (!handle->size) { \ | |
56de4e8f | 144 | struct perf_buffer *rb = handle->rb; \ |
91d7753a FW |
145 | \ |
146 | handle->page++; \ | |
147 | handle->page &= rb->nr_pages - 1; \ | |
148 | handle->addr = rb->data_pages[handle->page]; \ | |
149 | handle->size = PAGE_SIZE << page_order(rb); \ | |
150 | } \ | |
151 | } while (len && written == size); \ | |
152 | \ | |
153 | return len; \ | |
154 | } | |
155 | ||
7e3f977e DB |
156 | #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ |
157 | static inline unsigned long \ | |
158 | func_name(struct perf_output_handle *handle, \ | |
159 | const void *buf, unsigned long len) \ | |
aa7145c1 | 160 | __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size) |
7e3f977e DB |
161 | |
162 | static inline unsigned long | |
163 | __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func, | |
164 | const void *buf, unsigned long len) | |
aa7145c1 DB |
165 | { |
166 | unsigned long orig_len = len; | |
167 | __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf, | |
168 | orig_len - len, size) | |
169 | } | |
7e3f977e | 170 | |
0a196848 PZ |
171 | static inline unsigned long |
172 | memcpy_common(void *dst, const void *src, unsigned long n) | |
76369139 | 173 | { |
91d7753a | 174 | memcpy(dst, src, n); |
0a196848 | 175 | return 0; |
76369139 FW |
176 | } |
177 | ||
91d7753a FW |
178 | DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) |
179 | ||
0a196848 PZ |
180 | static inline unsigned long |
181 | memcpy_skip(void *dst, const void *src, unsigned long n) | |
182 | { | |
183 | return 0; | |
184 | } | |
5685e0ff | 185 | |
0a196848 | 186 | DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) |
5685e0ff | 187 | |
91d7753a | 188 | #ifndef arch_perf_out_copy_user |
0a196848 PZ |
189 | #define arch_perf_out_copy_user arch_perf_out_copy_user |
190 | ||
191 | static inline unsigned long | |
192 | arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) | |
193 | { | |
194 | unsigned long ret; | |
195 | ||
196 | pagefault_disable(); | |
197 | ret = __copy_from_user_inatomic(dst, src, n); | |
198 | pagefault_enable(); | |
199 | ||
200 | return ret; | |
201 | } | |
91d7753a FW |
202 | #endif |
203 | ||
204 | DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) | |
205 | ||
9251f904 BP |
206 | static inline int get_recursion_context(int *recursion) |
207 | { | |
09da9c81 PZ |
208 | unsigned int pc = preempt_count(); |
209 | unsigned char rctx = 0; | |
210 | ||
211 | rctx += !!(pc & (NMI_MASK)); | |
212 | rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK)); | |
213 | rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)); | |
9251f904 BP |
214 | |
215 | if (recursion[rctx]) | |
216 | return -1; | |
217 | ||
218 | recursion[rctx]++; | |
219 | barrier(); | |
220 | ||
221 | return rctx; | |
222 | } | |
223 | ||
224 | static inline void put_recursion_context(int *recursion, int rctx) | |
225 | { | |
226 | barrier(); | |
227 | recursion[rctx]--; | |
228 | } | |
229 | ||
c5ebcedb JO |
230 | #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP |
231 | static inline bool arch_perf_have_user_stack_dump(void) | |
232 | { | |
233 | return true; | |
234 | } | |
235 | ||
236 | #define perf_user_stack_pointer(regs) user_stack_pointer(regs) | |
237 | #else | |
238 | static inline bool arch_perf_have_user_stack_dump(void) | |
239 | { | |
240 | return false; | |
241 | } | |
242 | ||
243 | #define perf_user_stack_pointer(regs) 0 | |
244 | #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ | |
245 | ||
76369139 | 246 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |