]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file buffer_sync.c | |
3 | * | |
ae735e99 | 4 | * @remark Copyright 2002-2009 OProfile authors |
1da177e4 LT |
5 | * @remark Read the file COPYING |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
345c2573 | 8 | * @author Barry Kasindorf |
ae735e99 | 9 | * @author Robert Richter <robert.richter@amd.com> |
1da177e4 LT |
10 | * |
11 | * This is the core of the buffer management. Each | |
12 | * CPU buffer is processed and entered into the | |
13 | * global event buffer. Such processing is necessary | |
14 | * in several circumstances, mentioned below. | |
15 | * | |
16 | * The processing does the job of converting the | |
17 | * transitory EIP value into a persistent dentry/offset | |
18 | * value that the profiler can record at its leisure. | |
19 | * | |
20 | * See fs/dcookies.c for a description of the dentry/offset | |
21 | * objects. | |
22 | */ | |
23 | ||
24 | #include <linux/mm.h> | |
25 | #include <linux/workqueue.h> | |
26 | #include <linux/notifier.h> | |
27 | #include <linux/dcookies.h> | |
28 | #include <linux/profile.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/fs.h> | |
1474855d | 31 | #include <linux/oprofile.h> |
e8edc6e0 | 32 | #include <linux/sched.h> |
1474855d | 33 | |
1da177e4 LT |
34 | #include "oprofile_stats.h" |
35 | #include "event_buffer.h" | |
36 | #include "cpu_buffer.h" | |
37 | #include "buffer_sync.h" | |
73185e0a | 38 | |
1da177e4 LT |
39 | static LIST_HEAD(dying_tasks); |
40 | static LIST_HEAD(dead_tasks); | |
41 | static cpumask_t marked_cpus = CPU_MASK_NONE; | |
42 | static DEFINE_SPINLOCK(task_mortuary); | |
43 | static void process_task_mortuary(void); | |
44 | ||
1da177e4 LT |
45 | /* Take ownership of the task struct and place it on the |
46 | * list for processing. Only after two full buffer syncs | |
47 | * does the task eventually get freed, because by then | |
48 | * we are sure we will not reference it again. | |
4369ef3c PM |
49 | * Can be invoked from softirq via RCU callback due to |
50 | * call_rcu() of the task struct, hence the _irqsave. | |
1da177e4 | 51 | */ |
73185e0a RR |
52 | static int |
53 | task_free_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 | 54 | { |
4369ef3c | 55 | unsigned long flags; |
73185e0a | 56 | struct task_struct *task = data; |
4369ef3c | 57 | spin_lock_irqsave(&task_mortuary, flags); |
1da177e4 | 58 | list_add(&task->tasks, &dying_tasks); |
4369ef3c | 59 | spin_unlock_irqrestore(&task_mortuary, flags); |
1da177e4 LT |
60 | return NOTIFY_OK; |
61 | } | |
62 | ||
63 | ||
64 | /* The task is on its way out. A sync of the buffer means we can catch | |
65 | * any remaining samples for this task. | |
66 | */ | |
73185e0a RR |
67 | static int |
68 | task_exit_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 LT |
69 | { |
70 | /* To avoid latency problems, we only process the current CPU, | |
71 | * hoping that most samples for the task are on this CPU | |
72 | */ | |
39c715b7 | 73 | sync_buffer(raw_smp_processor_id()); |
73185e0a | 74 | return 0; |
1da177e4 LT |
75 | } |
76 | ||
77 | ||
78 | /* The task is about to try a do_munmap(). We peek at what it's going to | |
79 | * do, and if it's an executable region, process the samples first, so | |
80 | * we don't lose any. This does not have to be exact, it's a QoI issue | |
81 | * only. | |
82 | */ | |
73185e0a RR |
83 | static int |
84 | munmap_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 LT |
85 | { |
86 | unsigned long addr = (unsigned long)data; | |
73185e0a RR |
87 | struct mm_struct *mm = current->mm; |
88 | struct vm_area_struct *mpnt; | |
1da177e4 LT |
89 | |
90 | down_read(&mm->mmap_sem); | |
91 | ||
92 | mpnt = find_vma(mm, addr); | |
93 | if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { | |
94 | up_read(&mm->mmap_sem); | |
95 | /* To avoid latency problems, we only process the current CPU, | |
96 | * hoping that most samples for the task are on this CPU | |
97 | */ | |
39c715b7 | 98 | sync_buffer(raw_smp_processor_id()); |
1da177e4 LT |
99 | return 0; |
100 | } | |
101 | ||
102 | up_read(&mm->mmap_sem); | |
103 | return 0; | |
104 | } | |
105 | ||
73185e0a | 106 | |
1da177e4 LT |
107 | /* We need to be told about new modules so we don't attribute to a previously |
108 | * loaded module, or drop the samples on the floor. | |
109 | */ | |
73185e0a RR |
110 | static int |
111 | module_load_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 LT |
112 | { |
113 | #ifdef CONFIG_MODULES | |
114 | if (val != MODULE_STATE_COMING) | |
115 | return 0; | |
116 | ||
117 | /* FIXME: should we process all CPU buffers ? */ | |
59cc185a | 118 | mutex_lock(&buffer_mutex); |
1da177e4 LT |
119 | add_event_entry(ESCAPE_CODE); |
120 | add_event_entry(MODULE_LOADED_CODE); | |
59cc185a | 121 | mutex_unlock(&buffer_mutex); |
1da177e4 LT |
122 | #endif |
123 | return 0; | |
124 | } | |
125 | ||
73185e0a | 126 | |
1da177e4 LT |
127 | static struct notifier_block task_free_nb = { |
128 | .notifier_call = task_free_notify, | |
129 | }; | |
130 | ||
131 | static struct notifier_block task_exit_nb = { | |
132 | .notifier_call = task_exit_notify, | |
133 | }; | |
134 | ||
135 | static struct notifier_block munmap_nb = { | |
136 | .notifier_call = munmap_notify, | |
137 | }; | |
138 | ||
139 | static struct notifier_block module_load_nb = { | |
140 | .notifier_call = module_load_notify, | |
141 | }; | |
142 | ||
73185e0a | 143 | |
1da177e4 LT |
144 | static void end_sync(void) |
145 | { | |
146 | end_cpu_work(); | |
147 | /* make sure we don't leak task structs */ | |
148 | process_task_mortuary(); | |
149 | process_task_mortuary(); | |
150 | } | |
151 | ||
152 | ||
153 | int sync_start(void) | |
154 | { | |
155 | int err; | |
156 | ||
157 | start_cpu_work(); | |
158 | ||
159 | err = task_handoff_register(&task_free_nb); | |
160 | if (err) | |
161 | goto out1; | |
162 | err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); | |
163 | if (err) | |
164 | goto out2; | |
165 | err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); | |
166 | if (err) | |
167 | goto out3; | |
168 | err = register_module_notifier(&module_load_nb); | |
169 | if (err) | |
170 | goto out4; | |
171 | ||
172 | out: | |
173 | return err; | |
174 | out4: | |
175 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | |
176 | out3: | |
177 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | |
178 | out2: | |
179 | task_handoff_unregister(&task_free_nb); | |
180 | out1: | |
181 | end_sync(); | |
182 | goto out; | |
183 | } | |
184 | ||
185 | ||
186 | void sync_stop(void) | |
187 | { | |
188 | unregister_module_notifier(&module_load_nb); | |
189 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | |
190 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | |
191 | task_handoff_unregister(&task_free_nb); | |
192 | end_sync(); | |
193 | } | |
194 | ||
448678a0 | 195 | |
1da177e4 LT |
196 | /* Optimisation. We can manage without taking the dcookie sem |
197 | * because we cannot reach this code without at least one | |
198 | * dcookie user still being registered (namely, the reader | |
199 | * of the event buffer). */ | |
448678a0 | 200 | static inline unsigned long fast_get_dcookie(struct path *path) |
1da177e4 LT |
201 | { |
202 | unsigned long cookie; | |
448678a0 JB |
203 | |
204 | if (path->dentry->d_cookie) | |
205 | return (unsigned long)path->dentry; | |
206 | get_dcookie(path, &cookie); | |
1da177e4 LT |
207 | return cookie; |
208 | } | |
209 | ||
448678a0 | 210 | |
1da177e4 LT |
211 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, |
212 | * which corresponds loosely to "application name". This is | |
213 | * not strictly necessary but allows oprofile to associate | |
214 | * shared-library samples with particular applications | |
215 | */ | |
73185e0a | 216 | static unsigned long get_exec_dcookie(struct mm_struct *mm) |
1da177e4 | 217 | { |
0c0a400d | 218 | unsigned long cookie = NO_COOKIE; |
73185e0a RR |
219 | struct vm_area_struct *vma; |
220 | ||
1da177e4 LT |
221 | if (!mm) |
222 | goto out; | |
73185e0a | 223 | |
1da177e4 LT |
224 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
225 | if (!vma->vm_file) | |
226 | continue; | |
227 | if (!(vma->vm_flags & VM_EXECUTABLE)) | |
228 | continue; | |
448678a0 | 229 | cookie = fast_get_dcookie(&vma->vm_file->f_path); |
1da177e4 LT |
230 | break; |
231 | } | |
232 | ||
233 | out: | |
234 | return cookie; | |
235 | } | |
236 | ||
237 | ||
238 | /* Convert the EIP value of a sample into a persistent dentry/offset | |
239 | * pair that can then be added to the global event buffer. We make | |
240 | * sure to do this lookup before a mm->mmap modification happens so | |
241 | * we don't lose track. | |
242 | */ | |
73185e0a RR |
243 | static unsigned long |
244 | lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) | |
1da177e4 | 245 | { |
0c0a400d | 246 | unsigned long cookie = NO_COOKIE; |
73185e0a | 247 | struct vm_area_struct *vma; |
1da177e4 LT |
248 | |
249 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
73185e0a | 250 | |
1da177e4 LT |
251 | if (addr < vma->vm_start || addr >= vma->vm_end) |
252 | continue; | |
253 | ||
0c0a400d | 254 | if (vma->vm_file) { |
448678a0 | 255 | cookie = fast_get_dcookie(&vma->vm_file->f_path); |
0c0a400d JL |
256 | *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - |
257 | vma->vm_start; | |
258 | } else { | |
259 | /* must be an anonymous map */ | |
260 | *offset = addr; | |
261 | } | |
262 | ||
1da177e4 LT |
263 | break; |
264 | } | |
265 | ||
0c0a400d JL |
266 | if (!vma) |
267 | cookie = INVALID_COOKIE; | |
268 | ||
1da177e4 LT |
269 | return cookie; |
270 | } | |
271 | ||
0c0a400d | 272 | static unsigned long last_cookie = INVALID_COOKIE; |
73185e0a | 273 | |
1da177e4 LT |
274 | static void add_cpu_switch(int i) |
275 | { | |
276 | add_event_entry(ESCAPE_CODE); | |
277 | add_event_entry(CPU_SWITCH_CODE); | |
278 | add_event_entry(i); | |
0c0a400d | 279 | last_cookie = INVALID_COOKIE; |
1da177e4 LT |
280 | } |
281 | ||
282 | static void add_kernel_ctx_switch(unsigned int in_kernel) | |
283 | { | |
284 | add_event_entry(ESCAPE_CODE); | |
285 | if (in_kernel) | |
73185e0a | 286 | add_event_entry(KERNEL_ENTER_SWITCH_CODE); |
1da177e4 | 287 | else |
73185e0a | 288 | add_event_entry(KERNEL_EXIT_SWITCH_CODE); |
1da177e4 | 289 | } |
73185e0a | 290 | |
1da177e4 | 291 | static void |
73185e0a | 292 | add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) |
1da177e4 LT |
293 | { |
294 | add_event_entry(ESCAPE_CODE); | |
73185e0a | 295 | add_event_entry(CTX_SWITCH_CODE); |
1da177e4 LT |
296 | add_event_entry(task->pid); |
297 | add_event_entry(cookie); | |
298 | /* Another code for daemon back-compat */ | |
299 | add_event_entry(ESCAPE_CODE); | |
300 | add_event_entry(CTX_TGID_CODE); | |
301 | add_event_entry(task->tgid); | |
302 | } | |
303 | ||
73185e0a | 304 | |
1da177e4 LT |
305 | static void add_cookie_switch(unsigned long cookie) |
306 | { | |
307 | add_event_entry(ESCAPE_CODE); | |
308 | add_event_entry(COOKIE_SWITCH_CODE); | |
309 | add_event_entry(cookie); | |
310 | } | |
311 | ||
73185e0a | 312 | |
1da177e4 LT |
313 | static void add_trace_begin(void) |
314 | { | |
315 | add_event_entry(ESCAPE_CODE); | |
316 | add_event_entry(TRACE_BEGIN_CODE); | |
317 | } | |
318 | ||
852402cc RR |
319 | #ifdef CONFIG_OPROFILE_IBS |
320 | ||
345c2573 BK |
321 | #define IBS_FETCH_CODE_SIZE 2 |
322 | #define IBS_OP_CODE_SIZE 5 | |
345c2573 BK |
323 | |
324 | /* | |
325 | * Add IBS fetch and op entries to event buffer | |
326 | */ | |
6dad828b | 327 | static void add_ibs_begin(int cpu, int code, struct mm_struct *mm) |
345c2573 | 328 | { |
d358e75f | 329 | unsigned long pc; |
345c2573 | 330 | int i, count; |
d358e75f | 331 | unsigned long cookie = 0; |
345c2573 | 332 | off_t offset; |
2d87b14c | 333 | struct op_entry entry; |
6dad828b | 334 | struct op_sample *sample; |
345c2573 | 335 | |
2d87b14c | 336 | sample = op_cpu_buffer_read_entry(&entry, cpu); |
6dad828b | 337 | if (!sample) |
dbe6e283 | 338 | return; |
d358e75f | 339 | pc = sample->eip; |
345c2573 BK |
340 | |
341 | #ifdef __LP64__ | |
d358e75f | 342 | pc += sample->event << 32; |
345c2573 BK |
343 | #endif |
344 | ||
345 | if (mm) { | |
d358e75f | 346 | cookie = lookup_dcookie(mm, pc, &offset); |
345c2573 | 347 | |
d358e75f RR |
348 | if (cookie == NO_COOKIE) |
349 | offset = pc; | |
350 | if (cookie == INVALID_COOKIE) { | |
345c2573 | 351 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
d358e75f | 352 | offset = pc; |
345c2573 | 353 | } |
d358e75f RR |
354 | if (cookie != last_cookie) { |
355 | add_cookie_switch(cookie); | |
356 | last_cookie = cookie; | |
345c2573 BK |
357 | } |
358 | } else | |
d358e75f | 359 | offset = pc; |
345c2573 BK |
360 | |
361 | add_event_entry(ESCAPE_CODE); | |
362 | add_event_entry(code); | |
363 | add_event_entry(offset); /* Offset from Dcookie */ | |
364 | ||
365 | /* we send the Dcookie offset, but send the raw Linear Add also*/ | |
6dad828b RR |
366 | add_event_entry(sample->eip); |
367 | add_event_entry(sample->event); | |
345c2573 BK |
368 | |
369 | if (code == IBS_FETCH_CODE) | |
370 | count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ | |
371 | else | |
372 | count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ | |
373 | ||
374 | for (i = 0; i < count; i++) { | |
2d87b14c | 375 | sample = op_cpu_buffer_read_entry(&entry, cpu); |
6dad828b | 376 | if (!sample) |
dbe6e283 | 377 | return; |
6dad828b RR |
378 | add_event_entry(sample->eip); |
379 | add_event_entry(sample->event); | |
345c2573 | 380 | } |
6dad828b RR |
381 | |
382 | return; | |
345c2573 | 383 | } |
1da177e4 | 384 | |
852402cc RR |
385 | #endif |
386 | ||
6368a1f4 | 387 | static inline void add_sample_entry(unsigned long offset, unsigned long event) |
1da177e4 LT |
388 | { |
389 | add_event_entry(offset); | |
390 | add_event_entry(event); | |
391 | } | |
392 | ||
393 | ||
9741b309 RR |
394 | /* |
395 | * Add a sample to the global event buffer. If possible the | |
396 | * sample is converted into a persistent dentry/offset pair | |
397 | * for later lookup from userspace. Return 0 on failure. | |
398 | */ | |
399 | static int | |
400 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) | |
1da177e4 LT |
401 | { |
402 | unsigned long cookie; | |
403 | off_t offset; | |
73185e0a | 404 | |
9741b309 RR |
405 | if (in_kernel) { |
406 | add_sample_entry(s->eip, s->event); | |
407 | return 1; | |
408 | } | |
409 | ||
410 | /* add userspace sample */ | |
411 | ||
412 | if (!mm) { | |
413 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | |
414 | return 0; | |
415 | } | |
416 | ||
73185e0a RR |
417 | cookie = lookup_dcookie(mm, s->eip, &offset); |
418 | ||
0c0a400d | 419 | if (cookie == INVALID_COOKIE) { |
1da177e4 LT |
420 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
421 | return 0; | |
422 | } | |
423 | ||
424 | if (cookie != last_cookie) { | |
425 | add_cookie_switch(cookie); | |
426 | last_cookie = cookie; | |
427 | } | |
428 | ||
429 | add_sample_entry(offset, s->event); | |
430 | ||
431 | return 1; | |
432 | } | |
433 | ||
73185e0a | 434 | |
73185e0a | 435 | static void release_mm(struct mm_struct *mm) |
1da177e4 LT |
436 | { |
437 | if (!mm) | |
438 | return; | |
439 | up_read(&mm->mmap_sem); | |
440 | mmput(mm); | |
441 | } | |
442 | ||
443 | ||
73185e0a | 444 | static struct mm_struct *take_tasks_mm(struct task_struct *task) |
1da177e4 | 445 | { |
73185e0a | 446 | struct mm_struct *mm = get_task_mm(task); |
1da177e4 LT |
447 | if (mm) |
448 | down_read(&mm->mmap_sem); | |
449 | return mm; | |
450 | } | |
451 | ||
452 | ||
453 | static inline int is_code(unsigned long val) | |
454 | { | |
455 | return val == ESCAPE_CODE; | |
456 | } | |
73185e0a | 457 | |
1da177e4 | 458 | |
1da177e4 LT |
459 | /* Move tasks along towards death. Any tasks on dead_tasks |
460 | * will definitely have no remaining references in any | |
461 | * CPU buffers at this point, because we use two lists, | |
462 | * and to have reached the list, it must have gone through | |
463 | * one full sync already. | |
464 | */ | |
465 | static void process_task_mortuary(void) | |
466 | { | |
4369ef3c PM |
467 | unsigned long flags; |
468 | LIST_HEAD(local_dead_tasks); | |
73185e0a RR |
469 | struct task_struct *task; |
470 | struct task_struct *ttask; | |
1da177e4 | 471 | |
4369ef3c | 472 | spin_lock_irqsave(&task_mortuary, flags); |
1da177e4 | 473 | |
4369ef3c PM |
474 | list_splice_init(&dead_tasks, &local_dead_tasks); |
475 | list_splice_init(&dying_tasks, &dead_tasks); | |
1da177e4 | 476 | |
4369ef3c PM |
477 | spin_unlock_irqrestore(&task_mortuary, flags); |
478 | ||
479 | list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { | |
1da177e4 | 480 | list_del(&task->tasks); |
4369ef3c | 481 | free_task(task); |
1da177e4 | 482 | } |
1da177e4 LT |
483 | } |
484 | ||
485 | ||
486 | static void mark_done(int cpu) | |
487 | { | |
488 | int i; | |
489 | ||
490 | cpu_set(cpu, marked_cpus); | |
491 | ||
492 | for_each_online_cpu(i) { | |
493 | if (!cpu_isset(i, marked_cpus)) | |
494 | return; | |
495 | } | |
496 | ||
497 | /* All CPUs have been processed at least once, | |
498 | * we can process the mortuary once | |
499 | */ | |
500 | process_task_mortuary(); | |
501 | ||
502 | cpus_clear(marked_cpus); | |
503 | } | |
504 | ||
505 | ||
506 | /* FIXME: this is not sufficient if we implement syscall barrier backtrace | |
507 | * traversal, the code switch to sb_sample_start at first kernel enter/exit | |
508 | * switch so we need a fifth state and some special handling in sync_buffer() | |
509 | */ | |
510 | typedef enum { | |
511 | sb_bt_ignore = -2, | |
512 | sb_buffer_start, | |
513 | sb_bt_start, | |
514 | sb_sample_start, | |
515 | } sync_buffer_state; | |
516 | ||
517 | /* Sync one of the CPU's buffers into the global event buffer. | |
518 | * Here we need to go through each batch of samples punctuated | |
519 | * by context switch notes, taking the task's mmap_sem and doing | |
520 | * lookup in task->mm->mmap to convert EIP into dcookie/offset | |
521 | * value. | |
522 | */ | |
523 | void sync_buffer(int cpu) | |
524 | { | |
1da177e4 | 525 | struct mm_struct *mm = NULL; |
fd7826d5 | 526 | struct mm_struct *oldmm; |
bd7dc46f | 527 | unsigned long val; |
73185e0a | 528 | struct task_struct *new; |
1da177e4 LT |
529 | unsigned long cookie = 0; |
530 | int in_kernel = 1; | |
1da177e4 | 531 | sync_buffer_state state = sb_buffer_start; |
9b1f2611 | 532 | unsigned int i; |
1da177e4 | 533 | unsigned long available; |
ae735e99 | 534 | unsigned long flags; |
2d87b14c RR |
535 | struct op_entry entry; |
536 | struct op_sample *sample; | |
1da177e4 | 537 | |
59cc185a | 538 | mutex_lock(&buffer_mutex); |
73185e0a | 539 | |
1da177e4 LT |
540 | add_cpu_switch(cpu); |
541 | ||
6d2c53f3 RR |
542 | op_cpu_buffer_reset(cpu); |
543 | available = op_cpu_buffer_entries(cpu); | |
1da177e4 LT |
544 | |
545 | for (i = 0; i < available; ++i) { | |
2d87b14c RR |
546 | sample = op_cpu_buffer_read_entry(&entry, cpu); |
547 | if (!sample) | |
6dad828b | 548 | break; |
73185e0a | 549 | |
2d87b14c | 550 | if (is_code(sample->eip)) { |
ae735e99 RR |
551 | flags = sample->event; |
552 | if (flags & TRACE_BEGIN) { | |
553 | state = sb_bt_start; | |
554 | add_trace_begin(); | |
555 | } | |
556 | if (flags & KERNEL_CTX_SWITCH) { | |
1da177e4 | 557 | /* kernel/userspace switch */ |
ae735e99 | 558 | in_kernel = flags & IS_KERNEL; |
1da177e4 LT |
559 | if (state == sb_buffer_start) |
560 | state = sb_sample_start; | |
ae735e99 RR |
561 | add_kernel_ctx_switch(flags & IS_KERNEL); |
562 | } | |
bd7dc46f RR |
563 | if (flags & USER_CTX_SWITCH |
564 | && op_cpu_buffer_get_data(&entry, &val)) { | |
1da177e4 | 565 | /* userspace context switch */ |
bd7dc46f | 566 | new = (struct task_struct *)val; |
fd7826d5 | 567 | oldmm = mm; |
1da177e4 LT |
568 | release_mm(oldmm); |
569 | mm = take_tasks_mm(new); | |
570 | if (mm != oldmm) | |
571 | cookie = get_exec_dcookie(mm); | |
572 | add_user_ctx_switch(new, cookie); | |
573 | } | |
ae735e99 RR |
574 | #ifdef CONFIG_OPROFILE_IBS |
575 | if (flags & IBS_FETCH_BEGIN) | |
576 | add_ibs_begin(cpu, IBS_FETCH_CODE, mm); | |
577 | if (flags & IBS_OP_BEGIN) | |
578 | add_ibs_begin(cpu, IBS_OP_CODE, mm); | |
579 | #endif | |
317f33bc RR |
580 | continue; |
581 | } | |
582 | ||
583 | if (state < sb_bt_start) | |
584 | /* ignore sample */ | |
585 | continue; | |
586 | ||
2d87b14c | 587 | if (add_sample(mm, sample, in_kernel)) |
317f33bc RR |
588 | continue; |
589 | ||
590 | /* ignore backtraces if failed to add a sample */ | |
591 | if (state == sb_bt_start) { | |
592 | state = sb_bt_ignore; | |
593 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | |
1da177e4 | 594 | } |
1da177e4 LT |
595 | } |
596 | release_mm(mm); | |
597 | ||
598 | mark_done(cpu); | |
599 | ||
59cc185a | 600 | mutex_unlock(&buffer_mutex); |
1da177e4 | 601 | } |
a5598ca0 CL |
602 | |
603 | /* The function can be used to add a buffer worth of data directly to | |
604 | * the kernel buffer. The buffer is assumed to be a circular buffer. | |
605 | * Take the entries from index start and end at index end, wrapping | |
606 | * at max_entries. | |
607 | */ | |
608 | void oprofile_put_buff(unsigned long *buf, unsigned int start, | |
609 | unsigned int stop, unsigned int max) | |
610 | { | |
611 | int i; | |
612 | ||
613 | i = start; | |
614 | ||
615 | mutex_lock(&buffer_mutex); | |
616 | while (i != stop) { | |
617 | add_event_entry(buf[i++]); | |
618 | ||
619 | if (i >= max) | |
620 | i = 0; | |
621 | } | |
622 | ||
623 | mutex_unlock(&buffer_mutex); | |
624 | } | |
625 |