1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/syscalls.h>
4 #include <linux/slab.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7 #include <linux/ftrace.h>
8 #include <linux/perf_event.h>
9 #include <asm/syscall.h>
11 #include "trace_output.h"
14 static DEFINE_MUTEX(syscall_trace_lock
);
16 static int syscall_enter_register(struct trace_event_call
*event
,
17 enum trace_reg type
, void *data
);
18 static int syscall_exit_register(struct trace_event_call
*event
,
19 enum trace_reg type
, void *data
);
21 static struct list_head
*
22 syscall_get_enter_fields(struct trace_event_call
*call
)
24 struct syscall_metadata
*entry
= call
->data
;
26 return &entry
->enter_fields
;
29 extern struct syscall_metadata
*__start_syscalls_metadata
[];
30 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
32 static struct syscall_metadata
**syscalls_metadata
;
34 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
35 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
38 * Only compare after the "sys" prefix. Archs that use
39 * syscall wrappers may have syscalls symbols aliases prefixed
40 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
43 return !strcmp(sym
+ 3, name
+ 3);
47 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
49 * Some architectures that allow for 32bit applications
50 * to run on a 64bit kernel, do not map the syscalls for
51 * the 32bit tasks the same as they do for 64bit tasks.
55 * In such a case, instead of reporting the wrong syscalls,
58 * For an arch to ignore the compat syscalls it needs to
59 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
60 * define the function arch_trace_is_compat_syscall() to let
61 * the tracing system know that it should ignore it.
64 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
66 if (unlikely(arch_trace_is_compat_syscall(regs
)))
69 return syscall_get_nr(task
, regs
);
73 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
75 return syscall_get_nr(task
, regs
);
77 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
79 static __init
struct syscall_metadata
*
80 find_syscall_meta(unsigned long syscall
)
82 struct syscall_metadata
**start
;
83 struct syscall_metadata
**stop
;
84 char str
[KSYM_SYMBOL_LEN
];
87 start
= __start_syscalls_metadata
;
88 stop
= __stop_syscalls_metadata
;
89 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
91 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
94 for ( ; start
< stop
; start
++) {
95 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
101 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
103 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
106 return syscalls_metadata
[nr
];
109 static enum print_line_t
110 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
111 struct trace_event
*event
)
113 struct trace_array
*tr
= iter
->tr
;
114 struct trace_seq
*s
= &iter
->seq
;
115 struct trace_entry
*ent
= iter
->ent
;
116 struct syscall_trace_enter
*trace
;
117 struct syscall_metadata
*entry
;
120 trace
= (typeof(trace
))ent
;
122 entry
= syscall_nr_to_meta(syscall
);
127 if (entry
->enter_event
->event
.type
!= ent
->type
) {
132 trace_seq_printf(s
, "%s(", entry
->name
);
134 for (i
= 0; i
< entry
->nb_args
; i
++) {
136 if (trace_seq_has_overflowed(s
))
139 /* parameter types */
140 if (tr
->trace_flags
& TRACE_ITER_VERBOSE
)
141 trace_seq_printf(s
, "%s ", entry
->types
[i
]);
143 /* parameter values */
144 trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
146 i
== entry
->nb_args
- 1 ? "" : ", ");
149 trace_seq_putc(s
, ')');
151 trace_seq_putc(s
, '\n');
153 return trace_handle_return(s
);
156 static enum print_line_t
157 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
158 struct trace_event
*event
)
160 struct trace_seq
*s
= &iter
->seq
;
161 struct trace_entry
*ent
= iter
->ent
;
162 struct syscall_trace_exit
*trace
;
164 struct syscall_metadata
*entry
;
166 trace
= (typeof(trace
))ent
;
168 entry
= syscall_nr_to_meta(syscall
);
171 trace_seq_putc(s
, '\n');
175 if (entry
->exit_event
->event
.type
!= ent
->type
) {
177 return TRACE_TYPE_UNHANDLED
;
180 trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
184 return trace_handle_return(s
);
187 extern char *__bad_type_size(void);
189 #define SYSCALL_FIELD(type, field, name) \
190 sizeof(type) != sizeof(trace.field) ? \
191 __bad_type_size() : \
192 #type, #name, offsetof(typeof(trace), field), \
193 sizeof(trace.field), is_signed_type(type)
196 __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
201 /* When len=0, we just calculate the needed length */
202 #define LEN_OR_ZERO (len ? len - pos : 0)
204 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
205 for (i
= 0; i
< entry
->nb_args
; i
++) {
206 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
207 entry
->args
[i
], sizeof(unsigned long),
208 i
== entry
->nb_args
- 1 ? "" : ", ");
210 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
212 for (i
= 0; i
< entry
->nb_args
; i
++) {
213 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
214 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
219 /* return the length of print_fmt */
223 static int __init
set_syscall_print_fmt(struct trace_event_call
*call
)
227 struct syscall_metadata
*entry
= call
->data
;
229 if (entry
->enter_event
!= call
) {
230 call
->print_fmt
= "\"0x%lx\", REC->ret";
234 /* First: called with 0 length to calculate the needed length */
235 len
= __set_enter_print_fmt(entry
, NULL
, 0);
237 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
241 /* Second: actually write the @print_fmt */
242 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
243 call
->print_fmt
= print_fmt
;
248 static void __init
free_syscall_print_fmt(struct trace_event_call
*call
)
250 struct syscall_metadata
*entry
= call
->data
;
252 if (entry
->enter_event
== call
)
253 kfree(call
->print_fmt
);
256 static int __init
syscall_enter_define_fields(struct trace_event_call
*call
)
258 struct syscall_trace_enter trace
;
259 struct syscall_metadata
*meta
= call
->data
;
262 int offset
= offsetof(typeof(trace
), args
);
264 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
, __syscall_nr
),
269 for (i
= 0; i
< meta
->nb_args
; i
++) {
270 ret
= trace_define_field(call
, meta
->types
[i
],
271 meta
->args
[i
], offset
,
272 sizeof(unsigned long), 0,
274 offset
+= sizeof(unsigned long);
280 static int __init
syscall_exit_define_fields(struct trace_event_call
*call
)
282 struct syscall_trace_exit trace
;
285 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
, __syscall_nr
),
290 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
, ret
),
296 static void ftrace_syscall_enter(void *data
, struct pt_regs
*regs
, long id
)
298 struct trace_array
*tr
= data
;
299 struct trace_event_file
*trace_file
;
300 struct syscall_trace_enter
*entry
;
301 struct syscall_metadata
*sys_data
;
302 struct ring_buffer_event
*event
;
303 struct ring_buffer
*buffer
;
304 unsigned long irq_flags
;
309 syscall_nr
= trace_get_syscall_nr(current
, regs
);
310 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
313 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
314 trace_file
= rcu_dereference_sched(tr
->enter_syscall_files
[syscall_nr
]);
318 if (trace_trigger_soft_disabled(trace_file
))
321 sys_data
= syscall_nr_to_meta(syscall_nr
);
325 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
327 local_save_flags(irq_flags
);
328 pc
= preempt_count();
330 buffer
= tr
->trace_buffer
.buffer
;
331 event
= trace_buffer_lock_reserve(buffer
,
332 sys_data
->enter_event
->event
.type
, size
, irq_flags
, pc
);
336 entry
= ring_buffer_event_data(event
);
337 entry
->nr
= syscall_nr
;
338 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
340 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
,
344 static void ftrace_syscall_exit(void *data
, struct pt_regs
*regs
, long ret
)
346 struct trace_array
*tr
= data
;
347 struct trace_event_file
*trace_file
;
348 struct syscall_trace_exit
*entry
;
349 struct syscall_metadata
*sys_data
;
350 struct ring_buffer_event
*event
;
351 struct ring_buffer
*buffer
;
352 unsigned long irq_flags
;
356 syscall_nr
= trace_get_syscall_nr(current
, regs
);
357 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
360 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
361 trace_file
= rcu_dereference_sched(tr
->exit_syscall_files
[syscall_nr
]);
365 if (trace_trigger_soft_disabled(trace_file
))
368 sys_data
= syscall_nr_to_meta(syscall_nr
);
372 local_save_flags(irq_flags
);
373 pc
= preempt_count();
375 buffer
= tr
->trace_buffer
.buffer
;
376 event
= trace_buffer_lock_reserve(buffer
,
377 sys_data
->exit_event
->event
.type
, sizeof(*entry
),
382 entry
= ring_buffer_event_data(event
);
383 entry
->nr
= syscall_nr
;
384 entry
->ret
= syscall_get_return_value(current
, regs
);
386 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
,
390 static int reg_event_syscall_enter(struct trace_event_file
*file
,
391 struct trace_event_call
*call
)
393 struct trace_array
*tr
= file
->tr
;
397 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
398 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
400 mutex_lock(&syscall_trace_lock
);
401 if (!tr
->sys_refcount_enter
)
402 ret
= register_trace_sys_enter(ftrace_syscall_enter
, tr
);
404 rcu_assign_pointer(tr
->enter_syscall_files
[num
], file
);
405 tr
->sys_refcount_enter
++;
407 mutex_unlock(&syscall_trace_lock
);
411 static void unreg_event_syscall_enter(struct trace_event_file
*file
,
412 struct trace_event_call
*call
)
414 struct trace_array
*tr
= file
->tr
;
417 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
418 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
420 mutex_lock(&syscall_trace_lock
);
421 tr
->sys_refcount_enter
--;
422 RCU_INIT_POINTER(tr
->enter_syscall_files
[num
], NULL
);
423 if (!tr
->sys_refcount_enter
)
424 unregister_trace_sys_enter(ftrace_syscall_enter
, tr
);
425 mutex_unlock(&syscall_trace_lock
);
428 static int reg_event_syscall_exit(struct trace_event_file
*file
,
429 struct trace_event_call
*call
)
431 struct trace_array
*tr
= file
->tr
;
435 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
436 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
438 mutex_lock(&syscall_trace_lock
);
439 if (!tr
->sys_refcount_exit
)
440 ret
= register_trace_sys_exit(ftrace_syscall_exit
, tr
);
442 rcu_assign_pointer(tr
->exit_syscall_files
[num
], file
);
443 tr
->sys_refcount_exit
++;
445 mutex_unlock(&syscall_trace_lock
);
449 static void unreg_event_syscall_exit(struct trace_event_file
*file
,
450 struct trace_event_call
*call
)
452 struct trace_array
*tr
= file
->tr
;
455 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
456 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
458 mutex_lock(&syscall_trace_lock
);
459 tr
->sys_refcount_exit
--;
460 RCU_INIT_POINTER(tr
->exit_syscall_files
[num
], NULL
);
461 if (!tr
->sys_refcount_exit
)
462 unregister_trace_sys_exit(ftrace_syscall_exit
, tr
);
463 mutex_unlock(&syscall_trace_lock
);
466 static int __init
init_syscall_trace(struct trace_event_call
*call
)
471 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
472 if (num
< 0 || num
>= NR_syscalls
) {
473 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
474 ((struct syscall_metadata
*)call
->data
)->name
);
478 if (set_syscall_print_fmt(call
) < 0)
481 id
= trace_event_raw_init(call
);
484 free_syscall_print_fmt(call
);
491 struct trace_event_functions enter_syscall_print_funcs
= {
492 .trace
= print_syscall_enter
,
495 struct trace_event_functions exit_syscall_print_funcs
= {
496 .trace
= print_syscall_exit
,
499 struct trace_event_class __refdata event_class_syscall_enter
= {
500 .system
= "syscalls",
501 .reg
= syscall_enter_register
,
502 .define_fields
= syscall_enter_define_fields
,
503 .get_fields
= syscall_get_enter_fields
,
504 .raw_init
= init_syscall_trace
,
507 struct trace_event_class __refdata event_class_syscall_exit
= {
508 .system
= "syscalls",
509 .reg
= syscall_exit_register
,
510 .define_fields
= syscall_exit_define_fields
,
511 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
512 .raw_init
= init_syscall_trace
,
515 unsigned long __init __weak
arch_syscall_addr(int nr
)
517 return (unsigned long)sys_call_table
[nr
];
520 void __init
init_ftrace_syscalls(void)
522 struct syscall_metadata
*meta
;
526 syscalls_metadata
= kcalloc(NR_syscalls
, sizeof(*syscalls_metadata
),
528 if (!syscalls_metadata
) {
533 for (i
= 0; i
< NR_syscalls
; i
++) {
534 addr
= arch_syscall_addr(i
);
535 meta
= find_syscall_meta(addr
);
539 meta
->syscall_nr
= i
;
540 syscalls_metadata
[i
] = meta
;
544 #ifdef CONFIG_PERF_EVENTS
546 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
547 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
548 static int sys_perf_refcount_enter
;
549 static int sys_perf_refcount_exit
;
551 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
553 struct syscall_metadata
*sys_data
;
554 struct syscall_trace_enter
*rec
;
555 struct hlist_head
*head
;
560 syscall_nr
= trace_get_syscall_nr(current
, regs
);
561 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
563 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
566 sys_data
= syscall_nr_to_meta(syscall_nr
);
570 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
571 if (hlist_empty(head
))
574 /* get the size after alignment with the u32 buffer size field */
575 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
576 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
579 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
580 sys_data
->enter_event
->event
.type
, NULL
, &rctx
);
584 rec
->nr
= syscall_nr
;
585 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
586 (unsigned long *)&rec
->args
);
587 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
, NULL
);
590 static int perf_sysenter_enable(struct trace_event_call
*call
)
595 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
597 mutex_lock(&syscall_trace_lock
);
598 if (!sys_perf_refcount_enter
)
599 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
601 pr_info("event trace: Could not activate"
602 "syscall entry trace point");
604 set_bit(num
, enabled_perf_enter_syscalls
);
605 sys_perf_refcount_enter
++;
607 mutex_unlock(&syscall_trace_lock
);
611 static void perf_sysenter_disable(struct trace_event_call
*call
)
615 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
617 mutex_lock(&syscall_trace_lock
);
618 sys_perf_refcount_enter
--;
619 clear_bit(num
, enabled_perf_enter_syscalls
);
620 if (!sys_perf_refcount_enter
)
621 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
622 mutex_unlock(&syscall_trace_lock
);
625 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
627 struct syscall_metadata
*sys_data
;
628 struct syscall_trace_exit
*rec
;
629 struct hlist_head
*head
;
634 syscall_nr
= trace_get_syscall_nr(current
, regs
);
635 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
637 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
640 sys_data
= syscall_nr_to_meta(syscall_nr
);
644 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
645 if (hlist_empty(head
))
648 /* We can probably do that at build time */
649 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
652 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
653 sys_data
->exit_event
->event
.type
, NULL
, &rctx
);
657 rec
->nr
= syscall_nr
;
658 rec
->ret
= syscall_get_return_value(current
, regs
);
659 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
, NULL
);
662 static int perf_sysexit_enable(struct trace_event_call
*call
)
667 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
669 mutex_lock(&syscall_trace_lock
);
670 if (!sys_perf_refcount_exit
)
671 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
673 pr_info("event trace: Could not activate"
674 "syscall exit trace point");
676 set_bit(num
, enabled_perf_exit_syscalls
);
677 sys_perf_refcount_exit
++;
679 mutex_unlock(&syscall_trace_lock
);
683 static void perf_sysexit_disable(struct trace_event_call
*call
)
687 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
689 mutex_lock(&syscall_trace_lock
);
690 sys_perf_refcount_exit
--;
691 clear_bit(num
, enabled_perf_exit_syscalls
);
692 if (!sys_perf_refcount_exit
)
693 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
694 mutex_unlock(&syscall_trace_lock
);
697 #endif /* CONFIG_PERF_EVENTS */
699 static int syscall_enter_register(struct trace_event_call
*event
,
700 enum trace_reg type
, void *data
)
702 struct trace_event_file
*file
= data
;
705 case TRACE_REG_REGISTER
:
706 return reg_event_syscall_enter(file
, event
);
707 case TRACE_REG_UNREGISTER
:
708 unreg_event_syscall_enter(file
, event
);
711 #ifdef CONFIG_PERF_EVENTS
712 case TRACE_REG_PERF_REGISTER
:
713 return perf_sysenter_enable(event
);
714 case TRACE_REG_PERF_UNREGISTER
:
715 perf_sysenter_disable(event
);
717 case TRACE_REG_PERF_OPEN
:
718 case TRACE_REG_PERF_CLOSE
:
719 case TRACE_REG_PERF_ADD
:
720 case TRACE_REG_PERF_DEL
:
727 static int syscall_exit_register(struct trace_event_call
*event
,
728 enum trace_reg type
, void *data
)
730 struct trace_event_file
*file
= data
;
733 case TRACE_REG_REGISTER
:
734 return reg_event_syscall_exit(file
, event
);
735 case TRACE_REG_UNREGISTER
:
736 unreg_event_syscall_exit(file
, event
);
739 #ifdef CONFIG_PERF_EVENTS
740 case TRACE_REG_PERF_REGISTER
:
741 return perf_sysexit_enable(event
);
742 case TRACE_REG_PERF_UNREGISTER
:
743 perf_sysexit_disable(event
);
745 case TRACE_REG_PERF_OPEN
:
746 case TRACE_REG_PERF_CLOSE
:
747 case TRACE_REG_PERF_ADD
:
748 case TRACE_REG_PERF_DEL
: