]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file cpu_buffer.c | |
3 | * | |
4 | * @remark Copyright 2002 OProfile authors | |
5 | * @remark Read the file COPYING | |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
345c2573 | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
1da177e4 LT |
9 | * |
10 | * Each CPU has a local buffer that stores PC value/event | |
11 | * pairs. We also log context switches when we notice them. | |
12 | * Eventually each CPU's buffer is processed into the global | |
13 | * event buffer by sync_buffer(). | |
14 | * | |
15 | * We use a local buffer for two reasons: an NMI or similar | |
16 | * interrupt cannot synchronise, and high sampling rates | |
17 | * would lead to catastrophic global synchronisation if | |
18 | * a global buffer was used. | |
19 | */ | |
20 | ||
21 | #include <linux/sched.h> | |
22 | #include <linux/oprofile.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/errno.h> | |
25 | ||
26 | #include "event_buffer.h" | |
27 | #include "cpu_buffer.h" | |
28 | #include "buffer_sync.h" | |
29 | #include "oprof.h" | |
30 | ||
8b8b4988 | 31 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
1da177e4 | 32 | |
c4028958 | 33 | static void wq_sync_buffer(struct work_struct *work); |
1da177e4 LT |
34 | |
35 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | |
36 | static int work_enabled; | |
37 | ||
38 | void free_cpu_buffers(void) | |
39 | { | |
40 | int i; | |
41 | ||
394e3902 | 42 | for_each_online_cpu(i) |
608dfddd | 43 | vfree(per_cpu(cpu_buffer, i).buffer); |
1da177e4 | 44 | } |
77933d72 | 45 | |
1da177e4 LT |
46 | int alloc_cpu_buffers(void) |
47 | { | |
48 | int i; | |
49 | ||
50 | unsigned long buffer_size = fs_cpu_buffer_size; | |
51 | ||
52 | for_each_online_cpu(i) { | |
608dfddd | 53 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 | 54 | |
25ab7cd8 ED |
55 | b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, |
56 | cpu_to_node(i)); | |
1da177e4 LT |
57 | if (!b->buffer) |
58 | goto fail; | |
59 | ||
60 | b->last_task = NULL; | |
61 | b->last_is_kernel = -1; | |
62 | b->tracing = 0; | |
63 | b->buffer_size = buffer_size; | |
64 | b->tail_pos = 0; | |
65 | b->head_pos = 0; | |
66 | b->sample_received = 0; | |
67 | b->sample_lost_overflow = 0; | |
df9d177a PE |
68 | b->backtrace_aborted = 0; |
69 | b->sample_invalid_eip = 0; | |
1da177e4 | 70 | b->cpu = i; |
c4028958 | 71 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
1da177e4 LT |
72 | } |
73 | return 0; | |
74 | ||
75 | fail: | |
76 | free_cpu_buffers(); | |
77 | return -ENOMEM; | |
78 | } | |
1da177e4 LT |
79 | |
80 | void start_cpu_work(void) | |
81 | { | |
82 | int i; | |
83 | ||
84 | work_enabled = 1; | |
85 | ||
86 | for_each_online_cpu(i) { | |
608dfddd | 87 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 LT |
88 | |
89 | /* | |
90 | * Spread the work by 1 jiffy per cpu so they dont all | |
91 | * fire at once. | |
92 | */ | |
93 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); | |
94 | } | |
95 | } | |
96 | ||
1da177e4 LT |
97 | void end_cpu_work(void) |
98 | { | |
99 | int i; | |
100 | ||
101 | work_enabled = 0; | |
102 | ||
103 | for_each_online_cpu(i) { | |
608dfddd | 104 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 LT |
105 | |
106 | cancel_delayed_work(&b->work); | |
107 | } | |
108 | ||
109 | flush_scheduled_work(); | |
110 | } | |
111 | ||
1da177e4 LT |
112 | /* Resets the cpu buffer to a sane state. */ |
113 | void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) | |
114 | { | |
115 | /* reset these to invalid values; the next sample | |
116 | * collected will populate the buffer with proper | |
117 | * values to initialize the buffer | |
118 | */ | |
119 | cpu_buf->last_is_kernel = -1; | |
120 | cpu_buf->last_task = NULL; | |
121 | } | |
122 | ||
1da177e4 LT |
123 | /* compute number of available slots in cpu_buffer queue */ |
124 | static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) | |
125 | { | |
126 | unsigned long head = b->head_pos; | |
127 | unsigned long tail = b->tail_pos; | |
128 | ||
129 | if (tail > head) | |
130 | return (tail - head) - 1; | |
131 | ||
132 | return tail + (b->buffer_size - head) - 1; | |
133 | } | |
134 | ||
1da177e4 LT |
135 | static void increment_head(struct oprofile_cpu_buffer * b) |
136 | { | |
137 | unsigned long new_head = b->head_pos + 1; | |
138 | ||
139 | /* Ensure anything written to the slot before we | |
140 | * increment is visible */ | |
141 | wmb(); | |
142 | ||
143 | if (new_head < b->buffer_size) | |
144 | b->head_pos = new_head; | |
145 | else | |
146 | b->head_pos = 0; | |
147 | } | |
148 | ||
77933d72 | 149 | static inline void |
1da177e4 LT |
150 | add_sample(struct oprofile_cpu_buffer * cpu_buf, |
151 | unsigned long pc, unsigned long event) | |
152 | { | |
153 | struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos]; | |
154 | entry->eip = pc; | |
155 | entry->event = event; | |
156 | increment_head(cpu_buf); | |
157 | } | |
158 | ||
77933d72 | 159 | static inline void |
1da177e4 LT |
160 | add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) |
161 | { | |
162 | add_sample(buffer, ESCAPE_CODE, value); | |
163 | } | |
164 | ||
1da177e4 LT |
165 | /* This must be safe from any context. It's safe writing here |
166 | * because of the head/tail separation of the writer and reader | |
167 | * of the CPU buffer. | |
168 | * | |
169 | * is_kernel is needed because on some architectures you cannot | |
170 | * tell if you are in kernel or user space simply by looking at | |
171 | * pc. We tag this in the buffer by generating kernel enter/exit | |
172 | * events whenever is_kernel changes | |
173 | */ | |
174 | static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, | |
175 | int is_kernel, unsigned long event) | |
176 | { | |
177 | struct task_struct * task; | |
178 | ||
179 | cpu_buf->sample_received++; | |
180 | ||
df9d177a PE |
181 | if (pc == ESCAPE_CODE) { |
182 | cpu_buf->sample_invalid_eip++; | |
183 | return 0; | |
184 | } | |
185 | ||
1da177e4 LT |
186 | if (nr_available_slots(cpu_buf) < 3) { |
187 | cpu_buf->sample_lost_overflow++; | |
188 | return 0; | |
189 | } | |
190 | ||
191 | is_kernel = !!is_kernel; | |
192 | ||
193 | task = current; | |
194 | ||
195 | /* notice a switch from user->kernel or vice versa */ | |
196 | if (cpu_buf->last_is_kernel != is_kernel) { | |
197 | cpu_buf->last_is_kernel = is_kernel; | |
198 | add_code(cpu_buf, is_kernel); | |
199 | } | |
200 | ||
201 | /* notice a task switch */ | |
202 | if (cpu_buf->last_task != task) { | |
203 | cpu_buf->last_task = task; | |
204 | add_code(cpu_buf, (unsigned long)task); | |
205 | } | |
206 | ||
207 | add_sample(cpu_buf, pc, event); | |
208 | return 1; | |
209 | } | |
210 | ||
345c2573 | 211 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 LT |
212 | { |
213 | if (nr_available_slots(cpu_buf) < 4) { | |
214 | cpu_buf->sample_lost_overflow++; | |
215 | return 0; | |
216 | } | |
217 | ||
218 | add_code(cpu_buf, CPU_TRACE_BEGIN); | |
219 | cpu_buf->tracing = 1; | |
220 | return 1; | |
221 | } | |
222 | ||
1da177e4 LT |
223 | static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) |
224 | { | |
225 | cpu_buf->tracing = 0; | |
226 | } | |
227 | ||
27357716 BR |
228 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
229 | unsigned long event, int is_kernel) | |
1da177e4 | 230 | { |
608dfddd | 231 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 LT |
232 | |
233 | if (!backtrace_depth) { | |
234 | log_sample(cpu_buf, pc, is_kernel, event); | |
235 | return; | |
236 | } | |
237 | ||
238 | if (!oprofile_begin_trace(cpu_buf)) | |
239 | return; | |
240 | ||
241 | /* if log_sample() fail we can't backtrace since we lost the source | |
242 | * of this event */ | |
243 | if (log_sample(cpu_buf, pc, is_kernel, event)) | |
244 | oprofile_ops.backtrace(regs, backtrace_depth); | |
245 | oprofile_end_trace(cpu_buf); | |
246 | } | |
247 | ||
27357716 BR |
248 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
249 | { | |
250 | int is_kernel = !user_mode(regs); | |
251 | unsigned long pc = profile_pc(regs); | |
252 | ||
253 | oprofile_add_ext_sample(pc, regs, event, is_kernel); | |
254 | } | |
255 | ||
345c2573 BK |
256 | #define MAX_IBS_SAMPLE_SIZE 14 |
257 | static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, | |
258 | unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) | |
259 | { | |
260 | struct task_struct *task; | |
261 | ||
262 | cpu_buf->sample_received++; | |
263 | ||
264 | if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { | |
265 | cpu_buf->sample_lost_overflow++; | |
266 | return 0; | |
267 | } | |
268 | ||
269 | is_kernel = !!is_kernel; | |
270 | ||
271 | /* notice a switch from user->kernel or vice versa */ | |
272 | if (cpu_buf->last_is_kernel != is_kernel) { | |
273 | cpu_buf->last_is_kernel = is_kernel; | |
274 | add_code(cpu_buf, is_kernel); | |
275 | } | |
276 | ||
277 | /* notice a task switch */ | |
278 | if (!is_kernel) { | |
279 | task = current; | |
280 | ||
281 | if (cpu_buf->last_task != task) { | |
282 | cpu_buf->last_task = task; | |
283 | add_code(cpu_buf, (unsigned long)task); | |
284 | } | |
285 | } | |
286 | ||
287 | add_code(cpu_buf, ibs_code); | |
288 | add_sample(cpu_buf, ibs[0], ibs[1]); | |
289 | add_sample(cpu_buf, ibs[2], ibs[3]); | |
290 | add_sample(cpu_buf, ibs[4], ibs[5]); | |
291 | ||
292 | if (ibs_code == IBS_OP_BEGIN) { | |
293 | add_sample(cpu_buf, ibs[6], ibs[7]); | |
294 | add_sample(cpu_buf, ibs[8], ibs[9]); | |
295 | add_sample(cpu_buf, ibs[10], ibs[11]); | |
296 | } | |
297 | ||
298 | return 1; | |
299 | } | |
300 | ||
301 | void oprofile_add_ibs_sample(struct pt_regs *const regs, | |
302 | unsigned int * const ibs_sample, u8 code) | |
303 | { | |
304 | int is_kernel = !user_mode(regs); | |
305 | unsigned long pc = profile_pc(regs); | |
306 | ||
307 | struct oprofile_cpu_buffer *cpu_buf = | |
308 | &per_cpu(cpu_buffer, smp_processor_id()); | |
309 | ||
310 | if (!backtrace_depth) { | |
311 | log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code); | |
312 | return; | |
313 | } | |
314 | ||
315 | /* if log_sample() fails we can't backtrace since we lost the source | |
316 | * of this event */ | |
317 | if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code)) | |
318 | oprofile_ops.backtrace(regs, backtrace_depth); | |
319 | } | |
320 | ||
1da177e4 LT |
321 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
322 | { | |
608dfddd | 323 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 LT |
324 | log_sample(cpu_buf, pc, is_kernel, event); |
325 | } | |
326 | ||
1da177e4 LT |
327 | void oprofile_add_trace(unsigned long pc) |
328 | { | |
608dfddd | 329 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 LT |
330 | |
331 | if (!cpu_buf->tracing) | |
332 | return; | |
333 | ||
334 | if (nr_available_slots(cpu_buf) < 1) { | |
335 | cpu_buf->tracing = 0; | |
336 | cpu_buf->sample_lost_overflow++; | |
337 | return; | |
338 | } | |
339 | ||
340 | /* broken frame can give an eip with the same value as an escape code, | |
341 | * abort the trace if we get it */ | |
342 | if (pc == ESCAPE_CODE) { | |
343 | cpu_buf->tracing = 0; | |
344 | cpu_buf->backtrace_aborted++; | |
345 | return; | |
346 | } | |
347 | ||
348 | add_sample(cpu_buf, pc, 0); | |
349 | } | |
350 | ||
1da177e4 LT |
351 | /* |
352 | * This serves to avoid cpu buffer overflow, and makes sure | |
353 | * the task mortuary progresses | |
354 | * | |
355 | * By using schedule_delayed_work_on and then schedule_delayed_work | |
356 | * we guarantee this will stay on the correct cpu | |
357 | */ | |
c4028958 | 358 | static void wq_sync_buffer(struct work_struct *work) |
1da177e4 | 359 | { |
c4028958 DH |
360 | struct oprofile_cpu_buffer * b = |
361 | container_of(work, struct oprofile_cpu_buffer, work.work); | |
1da177e4 LT |
362 | if (b->cpu != smp_processor_id()) { |
363 | printk("WQ on CPU%d, prefer CPU%d\n", | |
364 | smp_processor_id(), b->cpu); | |
365 | } | |
366 | sync_buffer(b->cpu); | |
367 | ||
368 | /* don't re-add the work if we're shutting down */ | |
369 | if (work_enabled) | |
370 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); | |
371 | } |