4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf
9 * @author Robert Richter <robert.richter@amd.com>
11 * This is the core of the buffer management. Each
12 * CPU buffer is processed and entered into the
13 * global event buffer. Such processing is necessary
14 * in several circumstances, mentioned below.
16 * The processing does the job of converting the
17 * transitory EIP value into a persistent dentry/offset
18 * value that the profiler can record at its leisure.
20 * See fs/dcookies.c for a description of the dentry/offset
24 #include <linux/file.h>
26 #include <linux/workqueue.h>
27 #include <linux/notifier.h>
28 #include <linux/dcookies.h>
29 #include <linux/profile.h>
30 #include <linux/module.h>
32 #include <linux/oprofile.h>
33 #include <linux/sched.h>
34 #include <linux/gfp.h>
36 #include "oprofile_stats.h"
37 #include "event_buffer.h"
38 #include "cpu_buffer.h"
39 #include "buffer_sync.h"
41 static LIST_HEAD(dying_tasks
);
42 static LIST_HEAD(dead_tasks
);
43 static cpumask_var_t marked_cpus
;
44 static DEFINE_SPINLOCK(task_mortuary
);
45 static void process_task_mortuary(void);
47 /* Take ownership of the task struct and place it on the
48 * list for processing. Only after two full buffer syncs
49 * does the task eventually get freed, because by then
50 * we are sure we will not reference it again.
51 * Can be invoked from softirq via RCU callback due to
52 * call_rcu() of the task struct, hence the _irqsave.
55 task_free_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
58 struct task_struct
*task
= data
;
59 spin_lock_irqsave(&task_mortuary
, flags
);
60 list_add(&task
->tasks
, &dying_tasks
);
61 spin_unlock_irqrestore(&task_mortuary
, flags
);
66 /* The task is on its way out. A sync of the buffer means we can catch
67 * any remaining samples for this task.
70 task_exit_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
72 /* To avoid latency problems, we only process the current CPU,
73 * hoping that most samples for the task are on this CPU
75 sync_buffer(raw_smp_processor_id());
80 /* The task is about to try a do_munmap(). We peek at what it's going to
81 * do, and if it's an executable region, process the samples first, so
82 * we don't lose any. This does not have to be exact, it's a QoI issue
86 munmap_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
88 unsigned long addr
= (unsigned long)data
;
89 struct mm_struct
*mm
= current
->mm
;
90 struct vm_area_struct
*mpnt
;
92 down_read(&mm
->mmap_sem
);
94 mpnt
= find_vma(mm
, addr
);
95 if (mpnt
&& mpnt
->vm_file
&& (mpnt
->vm_flags
& VM_EXEC
)) {
96 up_read(&mm
->mmap_sem
);
97 /* To avoid latency problems, we only process the current CPU,
98 * hoping that most samples for the task are on this CPU
100 sync_buffer(raw_smp_processor_id());
104 up_read(&mm
->mmap_sem
);
109 /* We need to be told about new modules so we don't attribute to a previously
110 * loaded module, or drop the samples on the floor.
113 module_load_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
115 #ifdef CONFIG_MODULES
116 if (val
!= MODULE_STATE_COMING
)
119 /* FIXME: should we process all CPU buffers ? */
120 mutex_lock(&buffer_mutex
);
121 add_event_entry(ESCAPE_CODE
);
122 add_event_entry(MODULE_LOADED_CODE
);
123 mutex_unlock(&buffer_mutex
);
129 static struct notifier_block task_free_nb
= {
130 .notifier_call
= task_free_notify
,
133 static struct notifier_block task_exit_nb
= {
134 .notifier_call
= task_exit_notify
,
137 static struct notifier_block munmap_nb
= {
138 .notifier_call
= munmap_notify
,
141 static struct notifier_block module_load_nb
= {
142 .notifier_call
= module_load_notify
,
145 static void free_all_tasks(void)
147 /* make sure we don't leak task structs */
148 process_task_mortuary();
149 process_task_mortuary();
156 if (!zalloc_cpumask_var(&marked_cpus
, GFP_KERNEL
))
159 err
= task_handoff_register(&task_free_nb
);
162 err
= profile_event_register(PROFILE_TASK_EXIT
, &task_exit_nb
);
165 err
= profile_event_register(PROFILE_MUNMAP
, &munmap_nb
);
168 err
= register_module_notifier(&module_load_nb
);
177 profile_event_unregister(PROFILE_MUNMAP
, &munmap_nb
);
179 profile_event_unregister(PROFILE_TASK_EXIT
, &task_exit_nb
);
181 task_handoff_unregister(&task_free_nb
);
184 free_cpumask_var(marked_cpus
);
192 unregister_module_notifier(&module_load_nb
);
193 profile_event_unregister(PROFILE_MUNMAP
, &munmap_nb
);
194 profile_event_unregister(PROFILE_TASK_EXIT
, &task_exit_nb
);
195 task_handoff_unregister(&task_free_nb
);
196 barrier(); /* do all of the above first */
201 free_cpumask_var(marked_cpus
);
205 /* Optimisation. We can manage without taking the dcookie sem
206 * because we cannot reach this code without at least one
207 * dcookie user still being registered (namely, the reader
208 * of the event buffer). */
209 static inline unsigned long fast_get_dcookie(struct path
*path
)
211 unsigned long cookie
;
213 if (path
->dentry
->d_flags
& DCACHE_COOKIE
)
214 return (unsigned long)path
->dentry
;
215 get_dcookie(path
, &cookie
);
220 /* Look up the dcookie for the task's mm->exe_file,
221 * which corresponds loosely to "application name". This is
222 * not strictly necessary but allows oprofile to associate
223 * shared-library samples with particular applications
225 static unsigned long get_exec_dcookie(struct mm_struct
*mm
)
227 unsigned long cookie
= NO_COOKIE
;
228 struct file
*exe_file
;
233 exe_file
= get_mm_exe_file(mm
);
237 cookie
= fast_get_dcookie(&exe_file
->f_path
);
244 /* Convert the EIP value of a sample into a persistent dentry/offset
245 * pair that can then be added to the global event buffer. We make
246 * sure to do this lookup before a mm->mmap modification happens so
247 * we don't lose track.
249 * The caller must ensure the mm is not nil (ie: not a kernel thread).
252 lookup_dcookie(struct mm_struct
*mm
, unsigned long addr
, off_t
*offset
)
254 unsigned long cookie
= NO_COOKIE
;
255 struct vm_area_struct
*vma
;
257 down_read(&mm
->mmap_sem
);
258 for (vma
= find_vma(mm
, addr
); vma
; vma
= vma
->vm_next
) {
260 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
264 cookie
= fast_get_dcookie(&vma
->vm_file
->f_path
);
265 *offset
= (vma
->vm_pgoff
<< PAGE_SHIFT
) + addr
-
268 /* must be an anonymous map */
276 cookie
= INVALID_COOKIE
;
277 up_read(&mm
->mmap_sem
);
282 static unsigned long last_cookie
= INVALID_COOKIE
;
284 static void add_cpu_switch(int i
)
286 add_event_entry(ESCAPE_CODE
);
287 add_event_entry(CPU_SWITCH_CODE
);
289 last_cookie
= INVALID_COOKIE
;
292 static void add_kernel_ctx_switch(unsigned int in_kernel
)
294 add_event_entry(ESCAPE_CODE
);
296 add_event_entry(KERNEL_ENTER_SWITCH_CODE
);
298 add_event_entry(KERNEL_EXIT_SWITCH_CODE
);
302 add_user_ctx_switch(struct task_struct
const *task
, unsigned long cookie
)
304 add_event_entry(ESCAPE_CODE
);
305 add_event_entry(CTX_SWITCH_CODE
);
306 add_event_entry(task
->pid
);
307 add_event_entry(cookie
);
308 /* Another code for daemon back-compat */
309 add_event_entry(ESCAPE_CODE
);
310 add_event_entry(CTX_TGID_CODE
);
311 add_event_entry(task
->tgid
);
315 static void add_cookie_switch(unsigned long cookie
)
317 add_event_entry(ESCAPE_CODE
);
318 add_event_entry(COOKIE_SWITCH_CODE
);
319 add_event_entry(cookie
);
323 static void add_trace_begin(void)
325 add_event_entry(ESCAPE_CODE
);
326 add_event_entry(TRACE_BEGIN_CODE
);
329 static void add_data(struct op_entry
*entry
, struct mm_struct
*mm
)
331 unsigned long code
, pc
, val
;
332 unsigned long cookie
;
335 if (!op_cpu_buffer_get_data(entry
, &code
))
337 if (!op_cpu_buffer_get_data(entry
, &pc
))
339 if (!op_cpu_buffer_get_size(entry
))
343 cookie
= lookup_dcookie(mm
, pc
, &offset
);
345 if (cookie
== NO_COOKIE
)
347 if (cookie
== INVALID_COOKIE
) {
348 atomic_inc(&oprofile_stats
.sample_lost_no_mapping
);
351 if (cookie
!= last_cookie
) {
352 add_cookie_switch(cookie
);
353 last_cookie
= cookie
;
358 add_event_entry(ESCAPE_CODE
);
359 add_event_entry(code
);
360 add_event_entry(offset
); /* Offset from Dcookie */
362 while (op_cpu_buffer_get_data(entry
, &val
))
363 add_event_entry(val
);
366 static inline void add_sample_entry(unsigned long offset
, unsigned long event
)
368 add_event_entry(offset
);
369 add_event_entry(event
);
374 * Add a sample to the global event buffer. If possible the
375 * sample is converted into a persistent dentry/offset pair
376 * for later lookup from userspace. Return 0 on failure.
379 add_sample(struct mm_struct
*mm
, struct op_sample
*s
, int in_kernel
)
381 unsigned long cookie
;
385 add_sample_entry(s
->eip
, s
->event
);
389 /* add userspace sample */
392 atomic_inc(&oprofile_stats
.sample_lost_no_mm
);
396 cookie
= lookup_dcookie(mm
, s
->eip
, &offset
);
398 if (cookie
== INVALID_COOKIE
) {
399 atomic_inc(&oprofile_stats
.sample_lost_no_mapping
);
403 if (cookie
!= last_cookie
) {
404 add_cookie_switch(cookie
);
405 last_cookie
= cookie
;
408 add_sample_entry(offset
, s
->event
);
414 static void release_mm(struct mm_struct
*mm
)
421 static inline int is_code(unsigned long val
)
423 return val
== ESCAPE_CODE
;
427 /* Move tasks along towards death. Any tasks on dead_tasks
428 * will definitely have no remaining references in any
429 * CPU buffers at this point, because we use two lists,
430 * and to have reached the list, it must have gone through
431 * one full sync already.
433 static void process_task_mortuary(void)
436 LIST_HEAD(local_dead_tasks
);
437 struct task_struct
*task
;
438 struct task_struct
*ttask
;
440 spin_lock_irqsave(&task_mortuary
, flags
);
442 list_splice_init(&dead_tasks
, &local_dead_tasks
);
443 list_splice_init(&dying_tasks
, &dead_tasks
);
445 spin_unlock_irqrestore(&task_mortuary
, flags
);
447 list_for_each_entry_safe(task
, ttask
, &local_dead_tasks
, tasks
) {
448 list_del(&task
->tasks
);
454 static void mark_done(int cpu
)
458 cpumask_set_cpu(cpu
, marked_cpus
);
460 for_each_online_cpu(i
) {
461 if (!cpumask_test_cpu(i
, marked_cpus
))
465 /* All CPUs have been processed at least once,
466 * we can process the mortuary once
468 process_task_mortuary();
470 cpumask_clear(marked_cpus
);
474 /* FIXME: this is not sufficient if we implement syscall barrier backtrace
475 * traversal, the code switch to sb_sample_start at first kernel enter/exit
476 * switch so we need a fifth state and some special handling in sync_buffer()
485 /* Sync one of the CPU's buffers into the global event buffer.
486 * Here we need to go through each batch of samples punctuated
487 * by context switch notes, taking the task's mmap_sem and doing
488 * lookup in task->mm->mmap to convert EIP into dcookie/offset
491 void sync_buffer(int cpu
)
493 struct mm_struct
*mm
= NULL
;
494 struct mm_struct
*oldmm
;
496 struct task_struct
*new;
497 unsigned long cookie
= 0;
499 sync_buffer_state state
= sb_buffer_start
;
501 unsigned long available
;
503 struct op_entry entry
;
504 struct op_sample
*sample
;
506 mutex_lock(&buffer_mutex
);
510 op_cpu_buffer_reset(cpu
);
511 available
= op_cpu_buffer_entries(cpu
);
513 for (i
= 0; i
< available
; ++i
) {
514 sample
= op_cpu_buffer_read_entry(&entry
, cpu
);
518 if (is_code(sample
->eip
)) {
519 flags
= sample
->event
;
520 if (flags
& TRACE_BEGIN
) {
524 if (flags
& KERNEL_CTX_SWITCH
) {
525 /* kernel/userspace switch */
526 in_kernel
= flags
& IS_KERNEL
;
527 if (state
== sb_buffer_start
)
528 state
= sb_sample_start
;
529 add_kernel_ctx_switch(flags
& IS_KERNEL
);
531 if (flags
& USER_CTX_SWITCH
532 && op_cpu_buffer_get_data(&entry
, &val
)) {
533 /* userspace context switch */
534 new = (struct task_struct
*)val
;
537 mm
= get_task_mm(new);
539 cookie
= get_exec_dcookie(mm
);
540 add_user_ctx_switch(new, cookie
);
542 if (op_cpu_buffer_get_size(&entry
))
543 add_data(&entry
, mm
);
547 if (state
< sb_bt_start
)
551 if (add_sample(mm
, sample
, in_kernel
))
554 /* ignore backtraces if failed to add a sample */
555 if (state
== sb_bt_start
) {
556 state
= sb_bt_ignore
;
557 atomic_inc(&oprofile_stats
.bt_lost_no_mapping
);
564 mutex_unlock(&buffer_mutex
);
567 /* The function can be used to add a buffer worth of data directly to
568 * the kernel buffer. The buffer is assumed to be a circular buffer.
569 * Take the entries from index start and end at index end, wrapping
572 void oprofile_put_buff(unsigned long *buf
, unsigned int start
,
573 unsigned int stop
, unsigned int max
)
579 mutex_lock(&buffer_mutex
);
581 add_event_entry(buf
[i
++]);
587 mutex_unlock(&buffer_mutex
);