]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - kernel/trace/trace_syscalls.c
tracing: Support for syscall events raw records in perfcounters
[mirror_ubuntu-kernels.git] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <linux/kernel.h>
3 #include <linux/ftrace.h>
4 #include <linux/perf_counter.h>
5 #include <asm/syscall.h>
6
7 #include "trace_output.h"
8 #include "trace.h"
9
10 static DEFINE_MUTEX(syscall_trace_lock);
11 static int sys_refcount_enter;
12 static int sys_refcount_exit;
13 static DECLARE_BITMAP(enabled_enter_syscalls, FTRACE_SYSCALL_MAX);
14 static DECLARE_BITMAP(enabled_exit_syscalls, FTRACE_SYSCALL_MAX);
15
16 /* Option to display the parameters types */
17 enum {
18 TRACE_SYSCALLS_OPT_TYPES = 0x1,
19 };
20
21 static struct tracer_opt syscalls_opts[] = {
22 { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
23 { }
24 };
25
26 static struct tracer_flags syscalls_flags = {
27 .val = 0, /* By default: no parameters types */
28 .opts = syscalls_opts
29 };
30
31 enum print_line_t
32 print_syscall_enter(struct trace_iterator *iter, int flags)
33 {
34 struct trace_seq *s = &iter->seq;
35 struct trace_entry *ent = iter->ent;
36 struct syscall_trace_enter *trace;
37 struct syscall_metadata *entry;
38 int i, ret, syscall;
39
40 trace = (typeof(trace))ent;
41 syscall = trace->nr;
42 entry = syscall_nr_to_meta(syscall);
43
44 if (!entry)
45 goto end;
46
47 if (entry->enter_id != ent->type) {
48 WARN_ON_ONCE(1);
49 goto end;
50 }
51
52 ret = trace_seq_printf(s, "%s(", entry->name);
53 if (!ret)
54 return TRACE_TYPE_PARTIAL_LINE;
55
56 for (i = 0; i < entry->nb_args; i++) {
57 /* parameter types */
58 if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
59 ret = trace_seq_printf(s, "%s ", entry->types[i]);
60 if (!ret)
61 return TRACE_TYPE_PARTIAL_LINE;
62 }
63 /* parameter values */
64 ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
65 trace->args[i],
66 i == entry->nb_args - 1 ? ")" : ",");
67 if (!ret)
68 return TRACE_TYPE_PARTIAL_LINE;
69 }
70
71 end:
72 trace_seq_printf(s, "\n");
73 return TRACE_TYPE_HANDLED;
74 }
75
76 enum print_line_t
77 print_syscall_exit(struct trace_iterator *iter, int flags)
78 {
79 struct trace_seq *s = &iter->seq;
80 struct trace_entry *ent = iter->ent;
81 struct syscall_trace_exit *trace;
82 int syscall;
83 struct syscall_metadata *entry;
84 int ret;
85
86 trace = (typeof(trace))ent;
87 syscall = trace->nr;
88 entry = syscall_nr_to_meta(syscall);
89
90 if (!entry) {
91 trace_seq_printf(s, "\n");
92 return TRACE_TYPE_HANDLED;
93 }
94
95 if (entry->exit_id != ent->type) {
96 WARN_ON_ONCE(1);
97 return TRACE_TYPE_UNHANDLED;
98 }
99
100 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
101 trace->ret);
102 if (!ret)
103 return TRACE_TYPE_PARTIAL_LINE;
104
105 return TRACE_TYPE_HANDLED;
106 }
107
108 int ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s)
109 {
110 int i;
111 int nr;
112 int ret = 0;
113 struct syscall_metadata *entry;
114 int offset = sizeof(struct trace_entry);
115
116 nr = syscall_name_to_nr((char *)call->data);
117 entry = syscall_nr_to_meta(nr);
118
119 if (!entry)
120 return ret;
121
122 for (i = 0; i < entry->nb_args; i++) {
123 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
124 entry->args[i]);
125 if (!ret)
126 return 0;
127 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%lu;\n", offset,
128 sizeof(unsigned long));
129 if (!ret)
130 return 0;
131 offset += sizeof(unsigned long);
132 }
133
134 trace_seq_printf(s, "\nprint fmt: \"");
135 for (i = 0; i < entry->nb_args; i++) {
136 ret = trace_seq_printf(s, "%s: 0x%%0%lulx%s", entry->args[i],
137 sizeof(unsigned long),
138 i == entry->nb_args - 1 ? "\", " : ", ");
139 if (!ret)
140 return 0;
141 }
142
143 for (i = 0; i < entry->nb_args; i++) {
144 ret = trace_seq_printf(s, "((unsigned long)(REC->%s))%s",
145 entry->args[i],
146 i == entry->nb_args - 1 ? "\n" : ", ");
147 if (!ret)
148 return 0;
149 }
150
151 return ret;
152 }
153
154 void ftrace_syscall_enter(struct pt_regs *regs, long id)
155 {
156 struct syscall_trace_enter *entry;
157 struct syscall_metadata *sys_data;
158 struct ring_buffer_event *event;
159 int size;
160 int syscall_nr;
161
162 syscall_nr = syscall_get_nr(current, regs);
163 if (!test_bit(syscall_nr, enabled_enter_syscalls))
164 return;
165
166 sys_data = syscall_nr_to_meta(syscall_nr);
167 if (!sys_data)
168 return;
169
170 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
171
172 event = trace_current_buffer_lock_reserve(sys_data->enter_id, size,
173 0, 0);
174 if (!event)
175 return;
176
177 entry = ring_buffer_event_data(event);
178 entry->nr = syscall_nr;
179 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
180
181 trace_current_buffer_unlock_commit(event, 0, 0);
182 trace_wake_up();
183 }
184
185 void ftrace_syscall_exit(struct pt_regs *regs, long ret)
186 {
187 struct syscall_trace_exit *entry;
188 struct syscall_metadata *sys_data;
189 struct ring_buffer_event *event;
190 int syscall_nr;
191
192 syscall_nr = syscall_get_nr(current, regs);
193 if (!test_bit(syscall_nr, enabled_exit_syscalls))
194 return;
195
196 sys_data = syscall_nr_to_meta(syscall_nr);
197 if (!sys_data)
198 return;
199
200 event = trace_current_buffer_lock_reserve(sys_data->exit_id,
201 sizeof(*entry), 0, 0);
202 if (!event)
203 return;
204
205 entry = ring_buffer_event_data(event);
206 entry->nr = syscall_nr;
207 entry->ret = syscall_get_return_value(current, regs);
208
209 trace_current_buffer_unlock_commit(event, 0, 0);
210 trace_wake_up();
211 }
212
213 int reg_event_syscall_enter(void *ptr)
214 {
215 int ret = 0;
216 int num;
217 char *name;
218
219 name = (char *)ptr;
220 num = syscall_name_to_nr(name);
221 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
222 return -ENOSYS;
223 mutex_lock(&syscall_trace_lock);
224 if (!sys_refcount_enter)
225 ret = register_trace_syscall_enter(ftrace_syscall_enter);
226 if (ret) {
227 pr_info("event trace: Could not activate"
228 "syscall entry trace point");
229 } else {
230 set_bit(num, enabled_enter_syscalls);
231 sys_refcount_enter++;
232 }
233 mutex_unlock(&syscall_trace_lock);
234 return ret;
235 }
236
237 void unreg_event_syscall_enter(void *ptr)
238 {
239 int num;
240 char *name;
241
242 name = (char *)ptr;
243 num = syscall_name_to_nr(name);
244 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
245 return;
246 mutex_lock(&syscall_trace_lock);
247 sys_refcount_enter--;
248 clear_bit(num, enabled_enter_syscalls);
249 if (!sys_refcount_enter)
250 unregister_trace_syscall_enter(ftrace_syscall_enter);
251 mutex_unlock(&syscall_trace_lock);
252 }
253
254 int reg_event_syscall_exit(void *ptr)
255 {
256 int ret = 0;
257 int num;
258 char *name;
259
260 name = (char *)ptr;
261 num = syscall_name_to_nr(name);
262 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
263 return -ENOSYS;
264 mutex_lock(&syscall_trace_lock);
265 if (!sys_refcount_exit)
266 ret = register_trace_syscall_exit(ftrace_syscall_exit);
267 if (ret) {
268 pr_info("event trace: Could not activate"
269 "syscall exit trace point");
270 } else {
271 set_bit(num, enabled_exit_syscalls);
272 sys_refcount_exit++;
273 }
274 mutex_unlock(&syscall_trace_lock);
275 return ret;
276 }
277
278 void unreg_event_syscall_exit(void *ptr)
279 {
280 int num;
281 char *name;
282
283 name = (char *)ptr;
284 num = syscall_name_to_nr(name);
285 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
286 return;
287 mutex_lock(&syscall_trace_lock);
288 sys_refcount_exit--;
289 clear_bit(num, enabled_exit_syscalls);
290 if (!sys_refcount_exit)
291 unregister_trace_syscall_exit(ftrace_syscall_exit);
292 mutex_unlock(&syscall_trace_lock);
293 }
294
295 struct trace_event event_syscall_enter = {
296 .trace = print_syscall_enter,
297 };
298
299 struct trace_event event_syscall_exit = {
300 .trace = print_syscall_exit,
301 };
302
303 #ifdef CONFIG_EVENT_PROFILE
304
305 struct syscall_enter_record {
306 struct trace_entry entry;
307 unsigned long args[0];
308 };
309
310 struct syscall_exit_record {
311 struct trace_entry entry;
312 unsigned long ret;
313 };
314
315 static DECLARE_BITMAP(enabled_prof_enter_syscalls, FTRACE_SYSCALL_MAX);
316 static DECLARE_BITMAP(enabled_prof_exit_syscalls, FTRACE_SYSCALL_MAX);
317 static int sys_prof_refcount_enter;
318 static int sys_prof_refcount_exit;
319
320 static void prof_syscall_enter(struct pt_regs *regs, long id)
321 {
322 struct syscall_enter_record *rec;
323 struct syscall_metadata *sys_data;
324 int syscall_nr;
325 int size;
326
327 syscall_nr = syscall_get_nr(current, regs);
328 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
329 return;
330
331 sys_data = syscall_nr_to_meta(syscall_nr);
332 if (!sys_data)
333 return;
334
335 /* get the size after alignment with the u32 buffer size field */
336 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
337 size = ALIGN(size + sizeof(u32), sizeof(u64));
338 size -= sizeof(u32);
339
340 do {
341 char raw_data[size];
342
343 /* zero the dead bytes from align to not leak stack to user */
344 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
345
346 rec = (struct syscall_enter_record *) raw_data;
347 tracing_generic_entry_update(&rec->entry, 0, 0);
348 rec->entry.type = sys_data->enter_id;
349 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
350 (unsigned long *)&rec->args);
351 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
352 } while(0);
353 }
354
355 int reg_prof_syscall_enter(char *name)
356 {
357 int ret = 0;
358 int num;
359
360 num = syscall_name_to_nr(name);
361 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
362 return -ENOSYS;
363
364 mutex_lock(&syscall_trace_lock);
365 if (!sys_prof_refcount_enter)
366 ret = register_trace_syscall_enter(prof_syscall_enter);
367 if (ret) {
368 pr_info("event trace: Could not activate"
369 "syscall entry trace point");
370 } else {
371 set_bit(num, enabled_prof_enter_syscalls);
372 sys_prof_refcount_enter++;
373 }
374 mutex_unlock(&syscall_trace_lock);
375 return ret;
376 }
377
378 void unreg_prof_syscall_enter(char *name)
379 {
380 int num;
381
382 num = syscall_name_to_nr(name);
383 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
384 return;
385
386 mutex_lock(&syscall_trace_lock);
387 sys_prof_refcount_enter--;
388 clear_bit(num, enabled_prof_enter_syscalls);
389 if (!sys_prof_refcount_enter)
390 unregister_trace_syscall_enter(prof_syscall_enter);
391 mutex_unlock(&syscall_trace_lock);
392 }
393
394 static void prof_syscall_exit(struct pt_regs *regs, long ret)
395 {
396 struct syscall_metadata *sys_data;
397 struct syscall_exit_record rec;
398 int syscall_nr;
399
400 syscall_nr = syscall_get_nr(current, regs);
401 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
402 return;
403
404 sys_data = syscall_nr_to_meta(syscall_nr);
405 if (!sys_data)
406 return;
407
408 tracing_generic_entry_update(&rec.entry, 0, 0);
409 rec.entry.type = sys_data->exit_id;
410 rec.ret = syscall_get_return_value(current, regs);
411
412 perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
413 }
414
415 int reg_prof_syscall_exit(char *name)
416 {
417 int ret = 0;
418 int num;
419
420 num = syscall_name_to_nr(name);
421 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
422 return -ENOSYS;
423
424 mutex_lock(&syscall_trace_lock);
425 if (!sys_prof_refcount_exit)
426 ret = register_trace_syscall_exit(prof_syscall_exit);
427 if (ret) {
428 pr_info("event trace: Could not activate"
429 "syscall entry trace point");
430 } else {
431 set_bit(num, enabled_prof_exit_syscalls);
432 sys_prof_refcount_exit++;
433 }
434 mutex_unlock(&syscall_trace_lock);
435 return ret;
436 }
437
438 void unreg_prof_syscall_exit(char *name)
439 {
440 int num;
441
442 num = syscall_name_to_nr(name);
443 if (num < 0 || num >= FTRACE_SYSCALL_MAX)
444 return;
445
446 mutex_lock(&syscall_trace_lock);
447 sys_prof_refcount_exit--;
448 clear_bit(num, enabled_prof_exit_syscalls);
449 if (!sys_prof_refcount_exit)
450 unregister_trace_syscall_exit(prof_syscall_exit);
451 mutex_unlock(&syscall_trace_lock);
452 }
453
454 #endif
455
456