]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/trace/trace_syscalls.c
tracing: Add perf counter support for syscalls tracing
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <linux/kernel.h>
3 #include <linux/ftrace.h>
4 #include <linux/perf_counter.h>
5 #include <asm/syscall.h>
6
7 #include "trace_output.h"
8 #include "trace.h"
9
10 static DEFINE_MUTEX(syscall_trace_lock);
11 static int sys_refcount_enter;
12 static int sys_refcount_exit;
13 static DECLARE_BITMAP(enabled_enter_syscalls, FTRACE_SYSCALL_MAX);
14 static DECLARE_BITMAP(enabled_exit_syscalls, FTRACE_SYSCALL_MAX);
15
16 /* Option to display the parameters types */
17 enum {
18 TRACE_SYSCALLS_OPT_TYPES = 0x1,
19 };
20
21 static struct tracer_opt syscalls_opts[] = {
22 { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
23 { }
24 };
25
26 static struct tracer_flags syscalls_flags = {
27 .val = 0, /* By default: no parameters types */
28 .opts = syscalls_opts
29 };
30
31 enum print_line_t
32 print_syscall_enter(struct trace_iterator *iter, int flags)
33 {
34 struct trace_seq *s = &iter->seq;
35 struct trace_entry *ent = iter->ent;
36 struct syscall_trace_enter *trace;
37 struct syscall_metadata *entry;
38 int i, ret, syscall;
39
40 trace = (typeof(trace))ent;
41 syscall = trace->nr;
42 entry = syscall_nr_to_meta(syscall);
43
44 if (!entry)
45 goto end;
46
47 if (entry->enter_id != ent->type) {
48 WARN_ON_ONCE(1);
49 goto end;
50 }
51
52 ret = trace_seq_printf(s, "%s(", entry->name);
53 if (!ret)
54 return TRACE_TYPE_PARTIAL_LINE;
55
56 for (i = 0; i < entry->nb_args; i++) {
57 /* parameter types */
58 if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
59 ret = trace_seq_printf(s, "%s ", entry->types[i]);
60 if (!ret)
61 return TRACE_TYPE_PARTIAL_LINE;
62 }
63 /* parameter values */
64 ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
65 trace->args[i],
66 i == entry->nb_args - 1 ? ")" : ",");
67 if (!ret)
68 return TRACE_TYPE_PARTIAL_LINE;
69 }
70
71 end:
72 trace_seq_printf(s, "\n");
73 return TRACE_TYPE_HANDLED;
74 }
75
76 enum print_line_t
77 print_syscall_exit(struct trace_iterator *iter, int flags)
78 {
79 struct trace_seq *s = &iter->seq;
80 struct trace_entry *ent = iter->ent;
81 struct syscall_trace_exit *trace;
82 int syscall;
83 struct syscall_metadata *entry;
84 int ret;
85
86 trace = (typeof(trace))ent;
87 syscall = trace->nr;
88 entry = syscall_nr_to_meta(syscall);
89
90 if (!entry) {
91 trace_seq_printf(s, "\n");
92 return TRACE_TYPE_HANDLED;
93 }
94
95 if (entry->exit_id != ent->type) {
96 WARN_ON_ONCE(1);
97 return TRACE_TYPE_UNHANDLED;
98 }
99
100 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
101 trace->ret);
102 if (!ret)
103 return TRACE_TYPE_PARTIAL_LINE;
104
105 return TRACE_TYPE_HANDLED;
106 }
107
108 void ftrace_syscall_enter(struct pt_regs *regs, long id)
109 {
110 struct syscall_trace_enter *entry;
111 struct syscall_metadata *sys_data;
112 struct ring_buffer_event *event;
113 int size;
114 int syscall_nr;
115
116 syscall_nr = syscall_get_nr(current, regs);
117 if (!test_bit(syscall_nr, enabled_enter_syscalls))
118 return;
119
120 sys_data = syscall_nr_to_meta(syscall_nr);
121 if (!sys_data)
122 return;
123
124 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
125
126 event = trace_current_buffer_lock_reserve(sys_data->enter_id, size,
127 0, 0);
128 if (!event)
129 return;
130
131 entry = ring_buffer_event_data(event);
132 entry->nr = syscall_nr;
133 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
134
135 trace_current_buffer_unlock_commit(event, 0, 0);
136 trace_wake_up();
137 }
138
139 void ftrace_syscall_exit(struct pt_regs *regs, long ret)
140 {
141 struct syscall_trace_exit *entry;
142 struct syscall_metadata *sys_data;
143 struct ring_buffer_event *event;
144 int syscall_nr;
145
146 syscall_nr = syscall_get_nr(current, regs);
147 if (!test_bit(syscall_nr, enabled_exit_syscalls))
148 return;
149
150 sys_data = syscall_nr_to_meta(syscall_nr);
151 if (!sys_data)
152 return;
153
154 event = trace_current_buffer_lock_reserve(sys_data->exit_id,
155 sizeof(*entry), 0, 0);
156 if (!event)
157 return;
158
159 entry = ring_buffer_event_data(event);
160 entry->nr = syscall_nr;
161 entry->ret = syscall_get_return_value(current, regs);
162
163 trace_current_buffer_unlock_commit(event, 0, 0);
164 trace_wake_up();
165 }
166
167 int reg_event_syscall_enter(void *ptr)
168 {
169 int ret = 0;
170 int num;
171 char *name;
172
173 name = (char *)ptr;
174 num = syscall_name_to_nr(name);
175 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
176 return -ENOSYS;
177 mutex_lock(&syscall_trace_lock);
178 if (!sys_refcount_enter)
179 ret = register_trace_syscall_enter(ftrace_syscall_enter);
180 if (ret) {
181 pr_info("event trace: Could not activate"
182 "syscall entry trace point");
183 } else {
184 set_bit(num, enabled_enter_syscalls);
185 sys_refcount_enter++;
186 }
187 mutex_unlock(&syscall_trace_lock);
188 return ret;
189 }
190
191 void unreg_event_syscall_enter(void *ptr)
192 {
193 int num;
194 char *name;
195
196 name = (char *)ptr;
197 num = syscall_name_to_nr(name);
198 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
199 return;
200 mutex_lock(&syscall_trace_lock);
201 sys_refcount_enter--;
202 clear_bit(num, enabled_enter_syscalls);
203 if (!sys_refcount_enter)
204 unregister_trace_syscall_enter(ftrace_syscall_enter);
205 mutex_unlock(&syscall_trace_lock);
206 }
207
208 int reg_event_syscall_exit(void *ptr)
209 {
210 int ret = 0;
211 int num;
212 char *name;
213
214 name = (char *)ptr;
215 num = syscall_name_to_nr(name);
216 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
217 return -ENOSYS;
218 mutex_lock(&syscall_trace_lock);
219 if (!sys_refcount_exit)
220 ret = register_trace_syscall_exit(ftrace_syscall_exit);
221 if (ret) {
222 pr_info("event trace: Could not activate"
223 "syscall exit trace point");
224 } else {
225 set_bit(num, enabled_exit_syscalls);
226 sys_refcount_exit++;
227 }
228 mutex_unlock(&syscall_trace_lock);
229 return ret;
230 }
231
232 void unreg_event_syscall_exit(void *ptr)
233 {
234 int num;
235 char *name;
236
237 name = (char *)ptr;
238 num = syscall_name_to_nr(name);
239 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
240 return;
241 mutex_lock(&syscall_trace_lock);
242 sys_refcount_exit--;
243 clear_bit(num, enabled_exit_syscalls);
244 if (!sys_refcount_exit)
245 unregister_trace_syscall_exit(ftrace_syscall_exit);
246 mutex_unlock(&syscall_trace_lock);
247 }
248
249 struct trace_event event_syscall_enter = {
250 .trace = print_syscall_enter,
251 };
252
253 struct trace_event event_syscall_exit = {
254 .trace = print_syscall_exit,
255 };
256
257 #ifdef CONFIG_EVENT_PROFILE
258 static DECLARE_BITMAP(enabled_prof_enter_syscalls, FTRACE_SYSCALL_MAX);
259 static DECLARE_BITMAP(enabled_prof_exit_syscalls, FTRACE_SYSCALL_MAX);
260 static int sys_prof_refcount_enter;
261 static int sys_prof_refcount_exit;
262
263 static void prof_syscall_enter(struct pt_regs *regs, long id)
264 {
265 struct syscall_metadata *sys_data;
266 int syscall_nr;
267
268 syscall_nr = syscall_get_nr(current, regs);
269 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
270 return;
271
272 sys_data = syscall_nr_to_meta(syscall_nr);
273 if (!sys_data)
274 return;
275
276 perf_tpcounter_event(sys_data->enter_id, 0, 1, NULL, 0);
277 }
278
279 int reg_prof_syscall_enter(char *name)
280 {
281 int ret = 0;
282 int num;
283
284 num = syscall_name_to_nr(name);
285 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
286 return -ENOSYS;
287
288 mutex_lock(&syscall_trace_lock);
289 if (!sys_prof_refcount_enter)
290 ret = register_trace_syscall_enter(prof_syscall_enter);
291 if (ret) {
292 pr_info("event trace: Could not activate"
293 "syscall entry trace point");
294 } else {
295 set_bit(num, enabled_prof_enter_syscalls);
296 sys_prof_refcount_enter++;
297 }
298 mutex_unlock(&syscall_trace_lock);
299 return ret;
300 }
301
302 void unreg_prof_syscall_enter(char *name)
303 {
304 int num;
305
306 num = syscall_name_to_nr(name);
307 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
308 return;
309
310 mutex_lock(&syscall_trace_lock);
311 sys_prof_refcount_enter--;
312 clear_bit(num, enabled_prof_enter_syscalls);
313 if (!sys_prof_refcount_enter)
314 unregister_trace_syscall_enter(prof_syscall_enter);
315 mutex_unlock(&syscall_trace_lock);
316 }
317
318 static void prof_syscall_exit(struct pt_regs *regs, long ret)
319 {
320 struct syscall_metadata *sys_data;
321 int syscall_nr;
322
323 syscall_nr = syscall_get_nr(current, regs);
324 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
325 return;
326
327 sys_data = syscall_nr_to_meta(syscall_nr);
328 if (!sys_data)
329 return;
330
331 perf_tpcounter_event(sys_data->exit_id, 0, 1, NULL, 0);
332 }
333
334 int reg_prof_syscall_exit(char *name)
335 {
336 int ret = 0;
337 int num;
338
339 num = syscall_name_to_nr(name);
340 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
341 return -ENOSYS;
342
343 mutex_lock(&syscall_trace_lock);
344 if (!sys_prof_refcount_exit)
345 ret = register_trace_syscall_exit(prof_syscall_exit);
346 if (ret) {
347 pr_info("event trace: Could not activate"
348 "syscall entry trace point");
349 } else {
350 set_bit(num, enabled_prof_exit_syscalls);
351 sys_prof_refcount_exit++;
352 }
353 mutex_unlock(&syscall_trace_lock);
354 return ret;
355 }
356
357 void unreg_prof_syscall_exit(char *name)
358 {
359 int num;
360
361 num = syscall_name_to_nr(name);
362 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
363 return;
364
365 mutex_lock(&syscall_trace_lock);
366 sys_prof_refcount_exit--;
367 clear_bit(num, enabled_prof_exit_syscalls);
368 if (!sys_prof_refcount_exit)
369 unregister_trace_syscall_exit(prof_syscall_exit);
370 mutex_unlock(&syscall_trace_lock);
371 }
372
373 #endif
374
375