]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/trace/trace_event_profile.c
Merge branch 'for-linus' of git://www.linux-m32r.org/git/takata/linux-2.6_dev
[mirror_ubuntu-hirsute-kernel.git] / kernel / trace / trace_event_profile.c
CommitLineData
ac199db0
PZ
1/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
558e6547 8#include <linux/module.h>
ac199db0
PZ
9#include "trace.h"
10
20ab4425
FW
11/*
12 * We can't use a size but a type in alloc_percpu()
13 * So let's create a dummy type that matches the desired size
14 */
15typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17char *trace_profile_buf;
05bafda8
PZ
18EXPORT_SYMBOL_GPL(trace_profile_buf);
19
20char *trace_profile_buf_nmi;
21EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
20ab4425
FW
22
23/* Count the events in use (per event id, not per instance) */
24static int total_profile_count;
25
e5e25cf4
FW
26static int ftrace_profile_enable_event(struct ftrace_event_call *event)
27{
20ab4425
FW
28 char *buf;
29 int ret = -ENOMEM;
30
e5e25cf4
FW
31 if (atomic_inc_return(&event->profile_count))
32 return 0;
33
20ab4425
FW
34 if (!total_profile_count++) {
35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf)
37 goto fail_buf;
38
39 rcu_assign_pointer(trace_profile_buf, buf);
40
41 buf = (char *)alloc_percpu(profile_buf_t);
42 if (!buf)
43 goto fail_buf_nmi;
44
45 rcu_assign_pointer(trace_profile_buf_nmi, buf);
46 }
47
48 ret = event->profile_enable();
49 if (!ret)
50 return 0;
51
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi:
54 kfree(trace_profile_buf);
55fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count);
58
59 return ret;
e5e25cf4
FW
60}
61
ac199db0
PZ
62int ftrace_profile_enable(int event_id)
63{
64 struct ftrace_event_call *event;
20c8928a 65 int ret = -EINVAL;
ac199db0 66
20c8928a 67 mutex_lock(&event_mutex);
a59fd602 68 list_for_each_entry(event, &ftrace_events, list) {
558e6547
LZ
69 if (event->id == event_id && event->profile_enable &&
70 try_module_get(event->mod)) {
e5e25cf4 71 ret = ftrace_profile_enable_event(event);
20c8928a
LZ
72 break;
73 }
ac199db0 74 }
20c8928a 75 mutex_unlock(&event_mutex);
ac199db0 76
20c8928a 77 return ret;
ac199db0
PZ
78}
79
e5e25cf4
FW
80static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{
20ab4425
FW
82 char *buf, *nmi_buf;
83
e5e25cf4
FW
84 if (!atomic_add_negative(-1, &event->profile_count))
85 return;
86
87 event->profile_disable();
20ab4425
FW
88
89 if (!--total_profile_count) {
90 buf = trace_profile_buf;
91 rcu_assign_pointer(trace_profile_buf, NULL);
92
93 nmi_buf = trace_profile_buf_nmi;
94 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
95
96 /*
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
99 */
100 synchronize_sched();
101
102 free_percpu(buf);
103 free_percpu(nmi_buf);
104 }
e5e25cf4
FW
105}
106
ac199db0
PZ
107void ftrace_profile_disable(int event_id)
108{
109 struct ftrace_event_call *event;
110
20c8928a 111 mutex_lock(&event_mutex);
a59fd602 112 list_for_each_entry(event, &ftrace_events, list) {
20c8928a 113 if (event->id == event_id) {
e5e25cf4 114 ftrace_profile_disable_event(event);
558e6547 115 module_put(event->mod);
20c8928a
LZ
116 break;
117 }
ac199db0 118 }
20c8928a 119 mutex_unlock(&event_mutex);
ac199db0 120}