]>
Commit | Line | Data |
---|---|---|
1 | #include <trace/syscall.h> | |
2 | #include <trace/events/syscalls.h> | |
3 | #include <linux/syscalls.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ | |
7 | #include <linux/ftrace.h> | |
8 | #include <linux/perf_event.h> | |
9 | #include <asm/syscall.h> | |
10 | ||
11 | #include "trace_output.h" | |
12 | #include "trace.h" | |
13 | ||
14 | static DEFINE_MUTEX(syscall_trace_lock); | |
15 | ||
16 | static int syscall_enter_register(struct trace_event_call *event, | |
17 | enum trace_reg type, void *data); | |
18 | static int syscall_exit_register(struct trace_event_call *event, | |
19 | enum trace_reg type, void *data); | |
20 | ||
21 | static struct list_head * | |
22 | syscall_get_enter_fields(struct trace_event_call *call) | |
23 | { | |
24 | struct syscall_metadata *entry = call->data; | |
25 | ||
26 | return &entry->enter_fields; | |
27 | } | |
28 | ||
29 | extern struct syscall_metadata *__start_syscalls_metadata[]; | |
30 | extern struct syscall_metadata *__stop_syscalls_metadata[]; | |
31 | ||
32 | static struct syscall_metadata **syscalls_metadata; | |
33 | ||
34 | #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME | |
35 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) | |
36 | { | |
37 | /* | |
38 | * Only compare after the "sys" prefix. Archs that use | |
39 | * syscall wrappers may have syscalls symbols aliases prefixed | |
40 | * with ".SyS" or ".sys" instead of "sys", leading to an unwanted | |
41 | * mismatch. | |
42 | */ | |
43 | return !strcmp(sym + 3, name + 3); | |
44 | } | |
45 | #endif | |
46 | ||
47 | #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS | |
48 | /* | |
49 | * Some architectures that allow for 32bit applications | |
50 | * to run on a 64bit kernel, do not map the syscalls for | |
51 | * the 32bit tasks the same as they do for 64bit tasks. | |
52 | * | |
53 | * *cough*x86*cough* | |
54 | * | |
55 | * In such a case, instead of reporting the wrong syscalls, | |
56 | * simply ignore them. | |
57 | * | |
58 | * For an arch to ignore the compat syscalls it needs to | |
59 | * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as | |
60 | * define the function arch_trace_is_compat_syscall() to let | |
61 | * the tracing system know that it should ignore it. | |
62 | */ | |
63 | static int | |
64 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) | |
65 | { | |
66 | if (unlikely(arch_trace_is_compat_syscall(regs))) | |
67 | return -1; | |
68 | ||
69 | return syscall_get_nr(task, regs); | |
70 | } | |
71 | #else | |
72 | static inline int | |
73 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) | |
74 | { | |
75 | return syscall_get_nr(task, regs); | |
76 | } | |
77 | #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ | |
78 | ||
79 | static __init struct syscall_metadata * | |
80 | find_syscall_meta(unsigned long syscall) | |
81 | { | |
82 | struct syscall_metadata **start; | |
83 | struct syscall_metadata **stop; | |
84 | char str[KSYM_SYMBOL_LEN]; | |
85 | ||
86 | ||
87 | start = __start_syscalls_metadata; | |
88 | stop = __stop_syscalls_metadata; | |
89 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); | |
90 | ||
91 | if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) | |
92 | return NULL; | |
93 | ||
94 | for ( ; start < stop; start++) { | |
95 | if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) | |
96 | return *start; | |
97 | } | |
98 | return NULL; | |
99 | } | |
100 | ||
101 | static struct syscall_metadata *syscall_nr_to_meta(int nr) | |
102 | { | |
103 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) | |
104 | return NULL; | |
105 | ||
106 | return syscalls_metadata[nr]; | |
107 | } | |
108 | ||
109 | const char *get_syscall_name(int syscall) | |
110 | { | |
111 | struct syscall_metadata *entry; | |
112 | ||
113 | entry = syscall_nr_to_meta(syscall); | |
114 | if (!entry) | |
115 | return NULL; | |
116 | ||
117 | return entry->name; | |
118 | } | |
119 | ||
120 | static enum print_line_t | |
121 | print_syscall_enter(struct trace_iterator *iter, int flags, | |
122 | struct trace_event *event) | |
123 | { | |
124 | struct trace_array *tr = iter->tr; | |
125 | struct trace_seq *s = &iter->seq; | |
126 | struct trace_entry *ent = iter->ent; | |
127 | struct syscall_trace_enter *trace; | |
128 | struct syscall_metadata *entry; | |
129 | int i, syscall; | |
130 | ||
131 | trace = (typeof(trace))ent; | |
132 | syscall = trace->nr; | |
133 | entry = syscall_nr_to_meta(syscall); | |
134 | ||
135 | if (!entry) | |
136 | goto end; | |
137 | ||
138 | if (entry->enter_event->event.type != ent->type) { | |
139 | WARN_ON_ONCE(1); | |
140 | goto end; | |
141 | } | |
142 | ||
143 | trace_seq_printf(s, "%s(", entry->name); | |
144 | ||
145 | for (i = 0; i < entry->nb_args; i++) { | |
146 | ||
147 | if (trace_seq_has_overflowed(s)) | |
148 | goto end; | |
149 | ||
150 | /* parameter types */ | |
151 | if (tr->trace_flags & TRACE_ITER_VERBOSE) | |
152 | trace_seq_printf(s, "%s ", entry->types[i]); | |
153 | ||
154 | /* parameter values */ | |
155 | trace_seq_printf(s, "%s: %lx%s", entry->args[i], | |
156 | trace->args[i], | |
157 | i == entry->nb_args - 1 ? "" : ", "); | |
158 | } | |
159 | ||
160 | trace_seq_putc(s, ')'); | |
161 | end: | |
162 | trace_seq_putc(s, '\n'); | |
163 | ||
164 | return trace_handle_return(s); | |
165 | } | |
166 | ||
167 | static enum print_line_t | |
168 | print_syscall_exit(struct trace_iterator *iter, int flags, | |
169 | struct trace_event *event) | |
170 | { | |
171 | struct trace_seq *s = &iter->seq; | |
172 | struct trace_entry *ent = iter->ent; | |
173 | struct syscall_trace_exit *trace; | |
174 | int syscall; | |
175 | struct syscall_metadata *entry; | |
176 | ||
177 | trace = (typeof(trace))ent; | |
178 | syscall = trace->nr; | |
179 | entry = syscall_nr_to_meta(syscall); | |
180 | ||
181 | if (!entry) { | |
182 | trace_seq_putc(s, '\n'); | |
183 | goto out; | |
184 | } | |
185 | ||
186 | if (entry->exit_event->event.type != ent->type) { | |
187 | WARN_ON_ONCE(1); | |
188 | return TRACE_TYPE_UNHANDLED; | |
189 | } | |
190 | ||
191 | trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, | |
192 | trace->ret); | |
193 | ||
194 | out: | |
195 | return trace_handle_return(s); | |
196 | } | |
197 | ||
198 | extern char *__bad_type_size(void); | |
199 | ||
200 | #define SYSCALL_FIELD(type, field, name) \ | |
201 | sizeof(type) != sizeof(trace.field) ? \ | |
202 | __bad_type_size() : \ | |
203 | #type, #name, offsetof(typeof(trace), field), \ | |
204 | sizeof(trace.field), is_signed_type(type) | |
205 | ||
206 | static int __init | |
207 | __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) | |
208 | { | |
209 | int i; | |
210 | int pos = 0; | |
211 | ||
212 | /* When len=0, we just calculate the needed length */ | |
213 | #define LEN_OR_ZERO (len ? len - pos : 0) | |
214 | ||
215 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); | |
216 | for (i = 0; i < entry->nb_args; i++) { | |
217 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", | |
218 | entry->args[i], sizeof(unsigned long), | |
219 | i == entry->nb_args - 1 ? "" : ", "); | |
220 | } | |
221 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); | |
222 | ||
223 | for (i = 0; i < entry->nb_args; i++) { | |
224 | pos += snprintf(buf + pos, LEN_OR_ZERO, | |
225 | ", ((unsigned long)(REC->%s))", entry->args[i]); | |
226 | } | |
227 | ||
228 | #undef LEN_OR_ZERO | |
229 | ||
230 | /* return the length of print_fmt */ | |
231 | return pos; | |
232 | } | |
233 | ||
234 | static int __init set_syscall_print_fmt(struct trace_event_call *call) | |
235 | { | |
236 | char *print_fmt; | |
237 | int len; | |
238 | struct syscall_metadata *entry = call->data; | |
239 | ||
240 | if (entry->enter_event != call) { | |
241 | call->print_fmt = "\"0x%lx\", REC->ret"; | |
242 | return 0; | |
243 | } | |
244 | ||
245 | /* First: called with 0 length to calculate the needed length */ | |
246 | len = __set_enter_print_fmt(entry, NULL, 0); | |
247 | ||
248 | print_fmt = kmalloc(len + 1, GFP_KERNEL); | |
249 | if (!print_fmt) | |
250 | return -ENOMEM; | |
251 | ||
252 | /* Second: actually write the @print_fmt */ | |
253 | __set_enter_print_fmt(entry, print_fmt, len + 1); | |
254 | call->print_fmt = print_fmt; | |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
259 | static void __init free_syscall_print_fmt(struct trace_event_call *call) | |
260 | { | |
261 | struct syscall_metadata *entry = call->data; | |
262 | ||
263 | if (entry->enter_event == call) | |
264 | kfree(call->print_fmt); | |
265 | } | |
266 | ||
267 | static int __init syscall_enter_define_fields(struct trace_event_call *call) | |
268 | { | |
269 | struct syscall_trace_enter trace; | |
270 | struct syscall_metadata *meta = call->data; | |
271 | int ret; | |
272 | int i; | |
273 | int offset = offsetof(typeof(trace), args); | |
274 | ||
275 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), | |
276 | FILTER_OTHER); | |
277 | if (ret) | |
278 | return ret; | |
279 | ||
280 | for (i = 0; i < meta->nb_args; i++) { | |
281 | ret = trace_define_field(call, meta->types[i], | |
282 | meta->args[i], offset, | |
283 | sizeof(unsigned long), 0, | |
284 | FILTER_OTHER); | |
285 | offset += sizeof(unsigned long); | |
286 | } | |
287 | ||
288 | return ret; | |
289 | } | |
290 | ||
291 | static int __init syscall_exit_define_fields(struct trace_event_call *call) | |
292 | { | |
293 | struct syscall_trace_exit trace; | |
294 | int ret; | |
295 | ||
296 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), | |
297 | FILTER_OTHER); | |
298 | if (ret) | |
299 | return ret; | |
300 | ||
301 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret), | |
302 | FILTER_OTHER); | |
303 | ||
304 | return ret; | |
305 | } | |
306 | ||
307 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |
308 | { | |
309 | struct trace_array *tr = data; | |
310 | struct trace_event_file *trace_file; | |
311 | struct syscall_trace_enter *entry; | |
312 | struct syscall_metadata *sys_data; | |
313 | struct ring_buffer_event *event; | |
314 | struct ring_buffer *buffer; | |
315 | unsigned long irq_flags; | |
316 | int pc; | |
317 | int syscall_nr; | |
318 | int size; | |
319 | ||
320 | syscall_nr = trace_get_syscall_nr(current, regs); | |
321 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) | |
322 | return; | |
323 | ||
324 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | |
325 | trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); | |
326 | if (!trace_file) | |
327 | return; | |
328 | ||
329 | if (trace_trigger_soft_disabled(trace_file)) | |
330 | return; | |
331 | ||
332 | sys_data = syscall_nr_to_meta(syscall_nr); | |
333 | if (!sys_data) | |
334 | return; | |
335 | ||
336 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; | |
337 | ||
338 | local_save_flags(irq_flags); | |
339 | pc = preempt_count(); | |
340 | ||
341 | buffer = tr->trace_buffer.buffer; | |
342 | event = trace_buffer_lock_reserve(buffer, | |
343 | sys_data->enter_event->event.type, size, irq_flags, pc); | |
344 | if (!event) | |
345 | return; | |
346 | ||
347 | entry = ring_buffer_event_data(event); | |
348 | entry->nr = syscall_nr; | |
349 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | |
350 | ||
351 | event_trigger_unlock_commit(trace_file, buffer, event, entry, | |
352 | irq_flags, pc); | |
353 | } | |
354 | ||
355 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |
356 | { | |
357 | struct trace_array *tr = data; | |
358 | struct trace_event_file *trace_file; | |
359 | struct syscall_trace_exit *entry; | |
360 | struct syscall_metadata *sys_data; | |
361 | struct ring_buffer_event *event; | |
362 | struct ring_buffer *buffer; | |
363 | unsigned long irq_flags; | |
364 | int pc; | |
365 | int syscall_nr; | |
366 | ||
367 | syscall_nr = trace_get_syscall_nr(current, regs); | |
368 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) | |
369 | return; | |
370 | ||
371 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | |
372 | trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); | |
373 | if (!trace_file) | |
374 | return; | |
375 | ||
376 | if (trace_trigger_soft_disabled(trace_file)) | |
377 | return; | |
378 | ||
379 | sys_data = syscall_nr_to_meta(syscall_nr); | |
380 | if (!sys_data) | |
381 | return; | |
382 | ||
383 | local_save_flags(irq_flags); | |
384 | pc = preempt_count(); | |
385 | ||
386 | buffer = tr->trace_buffer.buffer; | |
387 | event = trace_buffer_lock_reserve(buffer, | |
388 | sys_data->exit_event->event.type, sizeof(*entry), | |
389 | irq_flags, pc); | |
390 | if (!event) | |
391 | return; | |
392 | ||
393 | entry = ring_buffer_event_data(event); | |
394 | entry->nr = syscall_nr; | |
395 | entry->ret = syscall_get_return_value(current, regs); | |
396 | ||
397 | event_trigger_unlock_commit(trace_file, buffer, event, entry, | |
398 | irq_flags, pc); | |
399 | } | |
400 | ||
401 | static int reg_event_syscall_enter(struct trace_event_file *file, | |
402 | struct trace_event_call *call) | |
403 | { | |
404 | struct trace_array *tr = file->tr; | |
405 | int ret = 0; | |
406 | int num; | |
407 | ||
408 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
409 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | |
410 | return -ENOSYS; | |
411 | mutex_lock(&syscall_trace_lock); | |
412 | if (!tr->sys_refcount_enter) | |
413 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); | |
414 | if (!ret) { | |
415 | rcu_assign_pointer(tr->enter_syscall_files[num], file); | |
416 | tr->sys_refcount_enter++; | |
417 | } | |
418 | mutex_unlock(&syscall_trace_lock); | |
419 | return ret; | |
420 | } | |
421 | ||
422 | static void unreg_event_syscall_enter(struct trace_event_file *file, | |
423 | struct trace_event_call *call) | |
424 | { | |
425 | struct trace_array *tr = file->tr; | |
426 | int num; | |
427 | ||
428 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
429 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | |
430 | return; | |
431 | mutex_lock(&syscall_trace_lock); | |
432 | tr->sys_refcount_enter--; | |
433 | RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); | |
434 | if (!tr->sys_refcount_enter) | |
435 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | |
436 | mutex_unlock(&syscall_trace_lock); | |
437 | } | |
438 | ||
439 | static int reg_event_syscall_exit(struct trace_event_file *file, | |
440 | struct trace_event_call *call) | |
441 | { | |
442 | struct trace_array *tr = file->tr; | |
443 | int ret = 0; | |
444 | int num; | |
445 | ||
446 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
447 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | |
448 | return -ENOSYS; | |
449 | mutex_lock(&syscall_trace_lock); | |
450 | if (!tr->sys_refcount_exit) | |
451 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); | |
452 | if (!ret) { | |
453 | rcu_assign_pointer(tr->exit_syscall_files[num], file); | |
454 | tr->sys_refcount_exit++; | |
455 | } | |
456 | mutex_unlock(&syscall_trace_lock); | |
457 | return ret; | |
458 | } | |
459 | ||
460 | static void unreg_event_syscall_exit(struct trace_event_file *file, | |
461 | struct trace_event_call *call) | |
462 | { | |
463 | struct trace_array *tr = file->tr; | |
464 | int num; | |
465 | ||
466 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
467 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | |
468 | return; | |
469 | mutex_lock(&syscall_trace_lock); | |
470 | tr->sys_refcount_exit--; | |
471 | RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); | |
472 | if (!tr->sys_refcount_exit) | |
473 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | |
474 | mutex_unlock(&syscall_trace_lock); | |
475 | } | |
476 | ||
477 | static int __init init_syscall_trace(struct trace_event_call *call) | |
478 | { | |
479 | int id; | |
480 | int num; | |
481 | ||
482 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
483 | if (num < 0 || num >= NR_syscalls) { | |
484 | pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", | |
485 | ((struct syscall_metadata *)call->data)->name); | |
486 | return -ENOSYS; | |
487 | } | |
488 | ||
489 | if (set_syscall_print_fmt(call) < 0) | |
490 | return -ENOMEM; | |
491 | ||
492 | id = trace_event_raw_init(call); | |
493 | ||
494 | if (id < 0) { | |
495 | free_syscall_print_fmt(call); | |
496 | return id; | |
497 | } | |
498 | ||
499 | return id; | |
500 | } | |
501 | ||
502 | struct trace_event_functions enter_syscall_print_funcs = { | |
503 | .trace = print_syscall_enter, | |
504 | }; | |
505 | ||
506 | struct trace_event_functions exit_syscall_print_funcs = { | |
507 | .trace = print_syscall_exit, | |
508 | }; | |
509 | ||
510 | struct trace_event_class __refdata event_class_syscall_enter = { | |
511 | .system = "syscalls", | |
512 | .reg = syscall_enter_register, | |
513 | .define_fields = syscall_enter_define_fields, | |
514 | .get_fields = syscall_get_enter_fields, | |
515 | .raw_init = init_syscall_trace, | |
516 | }; | |
517 | ||
518 | struct trace_event_class __refdata event_class_syscall_exit = { | |
519 | .system = "syscalls", | |
520 | .reg = syscall_exit_register, | |
521 | .define_fields = syscall_exit_define_fields, | |
522 | .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), | |
523 | .raw_init = init_syscall_trace, | |
524 | }; | |
525 | ||
526 | unsigned long __init __weak arch_syscall_addr(int nr) | |
527 | { | |
528 | return (unsigned long)sys_call_table[nr]; | |
529 | } | |
530 | ||
531 | void __init init_ftrace_syscalls(void) | |
532 | { | |
533 | struct syscall_metadata *meta; | |
534 | unsigned long addr; | |
535 | int i; | |
536 | ||
537 | syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), | |
538 | GFP_KERNEL); | |
539 | if (!syscalls_metadata) { | |
540 | WARN_ON(1); | |
541 | return; | |
542 | } | |
543 | ||
544 | for (i = 0; i < NR_syscalls; i++) { | |
545 | addr = arch_syscall_addr(i); | |
546 | meta = find_syscall_meta(addr); | |
547 | if (!meta) | |
548 | continue; | |
549 | ||
550 | meta->syscall_nr = i; | |
551 | syscalls_metadata[i] = meta; | |
552 | } | |
553 | } | |
554 | ||
555 | #ifdef CONFIG_PERF_EVENTS | |
556 | ||
557 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); | |
558 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); | |
559 | static int sys_perf_refcount_enter; | |
560 | static int sys_perf_refcount_exit; | |
561 | ||
562 | static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |
563 | { | |
564 | struct syscall_metadata *sys_data; | |
565 | struct syscall_trace_enter *rec; | |
566 | struct hlist_head *head; | |
567 | int syscall_nr; | |
568 | int rctx; | |
569 | int size; | |
570 | ||
571 | syscall_nr = trace_get_syscall_nr(current, regs); | |
572 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) | |
573 | return; | |
574 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | |
575 | return; | |
576 | ||
577 | sys_data = syscall_nr_to_meta(syscall_nr); | |
578 | if (!sys_data) | |
579 | return; | |
580 | ||
581 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | |
582 | if (hlist_empty(head)) | |
583 | return; | |
584 | ||
585 | /* get the size after alignment with the u32 buffer size field */ | |
586 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); | |
587 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | |
588 | size -= sizeof(u32); | |
589 | ||
590 | rec = perf_trace_buf_alloc(size, NULL, &rctx); | |
591 | if (!rec) | |
592 | return; | |
593 | ||
594 | rec->nr = syscall_nr; | |
595 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | |
596 | (unsigned long *)&rec->args); | |
597 | perf_trace_buf_submit(rec, size, rctx, | |
598 | sys_data->enter_event->event.type, 1, regs, | |
599 | head, NULL); | |
600 | } | |
601 | ||
602 | static int perf_sysenter_enable(struct trace_event_call *call) | |
603 | { | |
604 | int ret = 0; | |
605 | int num; | |
606 | ||
607 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
608 | ||
609 | mutex_lock(&syscall_trace_lock); | |
610 | if (!sys_perf_refcount_enter) | |
611 | ret = register_trace_sys_enter(perf_syscall_enter, NULL); | |
612 | if (ret) { | |
613 | pr_info("event trace: Could not activate syscall entry trace point"); | |
614 | } else { | |
615 | set_bit(num, enabled_perf_enter_syscalls); | |
616 | sys_perf_refcount_enter++; | |
617 | } | |
618 | mutex_unlock(&syscall_trace_lock); | |
619 | return ret; | |
620 | } | |
621 | ||
622 | static void perf_sysenter_disable(struct trace_event_call *call) | |
623 | { | |
624 | int num; | |
625 | ||
626 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
627 | ||
628 | mutex_lock(&syscall_trace_lock); | |
629 | sys_perf_refcount_enter--; | |
630 | clear_bit(num, enabled_perf_enter_syscalls); | |
631 | if (!sys_perf_refcount_enter) | |
632 | unregister_trace_sys_enter(perf_syscall_enter, NULL); | |
633 | mutex_unlock(&syscall_trace_lock); | |
634 | } | |
635 | ||
636 | static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |
637 | { | |
638 | struct syscall_metadata *sys_data; | |
639 | struct syscall_trace_exit *rec; | |
640 | struct hlist_head *head; | |
641 | int syscall_nr; | |
642 | int rctx; | |
643 | int size; | |
644 | ||
645 | syscall_nr = trace_get_syscall_nr(current, regs); | |
646 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) | |
647 | return; | |
648 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | |
649 | return; | |
650 | ||
651 | sys_data = syscall_nr_to_meta(syscall_nr); | |
652 | if (!sys_data) | |
653 | return; | |
654 | ||
655 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | |
656 | if (hlist_empty(head)) | |
657 | return; | |
658 | ||
659 | /* We can probably do that at build time */ | |
660 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); | |
661 | size -= sizeof(u32); | |
662 | ||
663 | rec = perf_trace_buf_alloc(size, NULL, &rctx); | |
664 | if (!rec) | |
665 | return; | |
666 | ||
667 | rec->nr = syscall_nr; | |
668 | rec->ret = syscall_get_return_value(current, regs); | |
669 | perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, | |
670 | 1, regs, head, NULL); | |
671 | } | |
672 | ||
673 | static int perf_sysexit_enable(struct trace_event_call *call) | |
674 | { | |
675 | int ret = 0; | |
676 | int num; | |
677 | ||
678 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
679 | ||
680 | mutex_lock(&syscall_trace_lock); | |
681 | if (!sys_perf_refcount_exit) | |
682 | ret = register_trace_sys_exit(perf_syscall_exit, NULL); | |
683 | if (ret) { | |
684 | pr_info("event trace: Could not activate syscall exit trace point"); | |
685 | } else { | |
686 | set_bit(num, enabled_perf_exit_syscalls); | |
687 | sys_perf_refcount_exit++; | |
688 | } | |
689 | mutex_unlock(&syscall_trace_lock); | |
690 | return ret; | |
691 | } | |
692 | ||
693 | static void perf_sysexit_disable(struct trace_event_call *call) | |
694 | { | |
695 | int num; | |
696 | ||
697 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | |
698 | ||
699 | mutex_lock(&syscall_trace_lock); | |
700 | sys_perf_refcount_exit--; | |
701 | clear_bit(num, enabled_perf_exit_syscalls); | |
702 | if (!sys_perf_refcount_exit) | |
703 | unregister_trace_sys_exit(perf_syscall_exit, NULL); | |
704 | mutex_unlock(&syscall_trace_lock); | |
705 | } | |
706 | ||
707 | #endif /* CONFIG_PERF_EVENTS */ | |
708 | ||
709 | static int syscall_enter_register(struct trace_event_call *event, | |
710 | enum trace_reg type, void *data) | |
711 | { | |
712 | struct trace_event_file *file = data; | |
713 | ||
714 | switch (type) { | |
715 | case TRACE_REG_REGISTER: | |
716 | return reg_event_syscall_enter(file, event); | |
717 | case TRACE_REG_UNREGISTER: | |
718 | unreg_event_syscall_enter(file, event); | |
719 | return 0; | |
720 | ||
721 | #ifdef CONFIG_PERF_EVENTS | |
722 | case TRACE_REG_PERF_REGISTER: | |
723 | return perf_sysenter_enable(event); | |
724 | case TRACE_REG_PERF_UNREGISTER: | |
725 | perf_sysenter_disable(event); | |
726 | return 0; | |
727 | case TRACE_REG_PERF_OPEN: | |
728 | case TRACE_REG_PERF_CLOSE: | |
729 | case TRACE_REG_PERF_ADD: | |
730 | case TRACE_REG_PERF_DEL: | |
731 | return 0; | |
732 | #endif | |
733 | } | |
734 | return 0; | |
735 | } | |
736 | ||
737 | static int syscall_exit_register(struct trace_event_call *event, | |
738 | enum trace_reg type, void *data) | |
739 | { | |
740 | struct trace_event_file *file = data; | |
741 | ||
742 | switch (type) { | |
743 | case TRACE_REG_REGISTER: | |
744 | return reg_event_syscall_exit(file, event); | |
745 | case TRACE_REG_UNREGISTER: | |
746 | unreg_event_syscall_exit(file, event); | |
747 | return 0; | |
748 | ||
749 | #ifdef CONFIG_PERF_EVENTS | |
750 | case TRACE_REG_PERF_REGISTER: | |
751 | return perf_sysexit_enable(event); | |
752 | case TRACE_REG_PERF_UNREGISTER: | |
753 | perf_sysexit_disable(event); | |
754 | return 0; | |
755 | case TRACE_REG_PERF_OPEN: | |
756 | case TRACE_REG_PERF_CLOSE: | |
757 | case TRACE_REG_PERF_ADD: | |
758 | case TRACE_REG_PERF_DEL: | |
759 | return 0; | |
760 | #endif | |
761 | } | |
762 | return 0; | |
763 | } |