]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/trace/trace_sysprof.c
ftrace: extend sysprof plugin some more
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace_sysprof.c
CommitLineData
f06c3810
IM
1/*
2 * trace stack traces
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
56a08bdc 6 * Copyright (C) 2004, 2005, Soeren Sandmann
f06c3810 7 */
f06c3810 8#include <linux/kallsyms.h>
0075fa80
IM
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
f06c3810 11#include <linux/uaccess.h>
f06c3810 12#include <linux/ftrace.h>
0075fa80 13#include <linux/module.h>
56a08bdc 14#include <linux/irq.h>
0075fa80 15#include <linux/fs.h>
f06c3810
IM
16
17#include "trace.h"
18
56a08bdc 19static struct trace_array *sysprof_trace;
f06c3810
IM
20static int __read_mostly tracer_enabled;
21
56a08bdc
IM
22/*
23 * 10 msecs for now:
24 */
0075fa80
IM
25static const unsigned long sample_period = 1000000;
26
27/*
28 * Per CPU hrtimers that do the profiling:
29 */
30static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
31
56a08bdc
IM
32struct stack_frame {
33 const void __user *next_fp;
34 unsigned long return_address;
35};
36
37static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
38{
39 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
40 return 0;
41
42 if (__copy_from_user_inatomic(frame, frame_pointer, sizeof(*frame)))
43 return 0;
44
45 return 1;
46}
47
48#define SYSPROF_MAX_ADDRESSES 512
49
50static void timer_notify(struct pt_regs *regs, int cpu)
51{
52 const void __user *frame_pointer;
53 struct trace_array_cpu *data;
54 struct stack_frame frame;
55 struct trace_array *tr;
56 int is_user;
57 int i;
58
59 if (!regs)
60 return;
61
62 tr = sysprof_trace;
63 data = tr->data[cpu];
64 is_user = user_mode(regs);
65
66 if (!current || current->pid == 0)
67 return;
68
69 if (is_user && current->state != TASK_RUNNING)
70 return;
71
72 if (!is_user) {
73 /* kernel */
74 ftrace(tr, data, current->pid, 1, 0);
75 return;
76
77 }
78
79 trace_special(tr, data, 0, current->pid, regs->ip);
80
81 frame_pointer = (void __user *)regs->bp;
82
83 for (i = 0; i < SYSPROF_MAX_ADDRESSES; i++) {
84 if (!copy_stack_frame(frame_pointer, &frame))
85 break;
86 if ((unsigned long)frame_pointer < regs->sp)
87 break;
88
89 trace_special(tr, data, 1, frame.return_address,
90 (unsigned long)frame_pointer);
91 frame_pointer = frame.next_fp;
92 }
93
94 trace_special(tr, data, 2, current->pid, i);
95
96 if (i == SYSPROF_MAX_ADDRESSES)
97 trace_special(tr, data, -1, -1, -1);
98}
99
0075fa80
IM
100static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
101{
102 /* trace here */
56a08bdc 103 timer_notify(get_irq_regs(), smp_processor_id());
0075fa80
IM
104
105 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
106
107 return HRTIMER_RESTART;
108}
109
110static void start_stack_timer(int cpu)
111{
112 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
113
114 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
115 hrtimer->function = stack_trace_timer_fn;
116 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
117
118 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
119}
120
121static void start_stack_timers(void)
122{
123 cpumask_t saved_mask = current->cpus_allowed;
124 int cpu;
125
126 for_each_online_cpu(cpu) {
127 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
128 start_stack_timer(cpu);
129 printk("started timer on cpu%d\n", cpu);
130 }
131 set_cpus_allowed_ptr(current, &saved_mask);
132}
133
134static void stop_stack_timer(int cpu)
135{
136 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
137
138 hrtimer_cancel(hrtimer);
139 printk("cancelled timer on cpu%d\n", cpu);
140}
141
142static void stop_stack_timers(void)
143{
144 int cpu;
145
146 for_each_online_cpu(cpu)
147 stop_stack_timer(cpu);
148}
149
f06c3810
IM
150static notrace void stack_reset(struct trace_array *tr)
151{
152 int cpu;
153
154 tr->time_start = ftrace_now(tr->cpu);
155
156 for_each_online_cpu(cpu)
157 tracing_reset(tr->data[cpu]);
158}
159
160static notrace void start_stack_trace(struct trace_array *tr)
161{
162 stack_reset(tr);
0075fa80 163 start_stack_timers();
f06c3810
IM
164 tracer_enabled = 1;
165}
166
167static notrace void stop_stack_trace(struct trace_array *tr)
168{
0075fa80 169 stop_stack_timers();
f06c3810
IM
170 tracer_enabled = 0;
171}
172
173static notrace void stack_trace_init(struct trace_array *tr)
174{
56a08bdc 175 sysprof_trace = tr;
f06c3810
IM
176
177 if (tr->ctrl)
178 start_stack_trace(tr);
179}
180
181static notrace void stack_trace_reset(struct trace_array *tr)
182{
183 if (tr->ctrl)
184 stop_stack_trace(tr);
185}
186
187static void stack_trace_ctrl_update(struct trace_array *tr)
188{
189 /* When starting a new trace, reset the buffers */
190 if (tr->ctrl)
191 start_stack_trace(tr);
192 else
193 stop_stack_trace(tr);
194}
195
196static struct tracer stack_trace __read_mostly =
197{
198 .name = "sysprof",
199 .init = stack_trace_init,
200 .reset = stack_trace_reset,
201 .ctrl_update = stack_trace_ctrl_update,
202#ifdef CONFIG_FTRACE_SELFTEST
203 .selftest = trace_selftest_startup_stack,
204#endif
205};
206
207__init static int init_stack_trace(void)
208{
209 return register_tracer(&stack_trace);
210}
211device_initcall(init_stack_trace);