]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
tracing: Add support for SOFT_DISABLE to syscall events
authorTom Zanussi <tom.zanussi@linux.intel.com>
Thu, 24 Oct 2013 13:34:19 +0000 (08:34 -0500)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 5 Nov 2013 22:48:49 +0000 (17:48 -0500)
The original SOFT_DISABLE patches didn't add support for soft disable
of syscall events; this adds it.

Add an array of ftrace_event_file pointers indexed by syscall number
to the trace array and remove the existing enabled bitmaps, which as a
result are now redundant.  The ftrace_event_file structs in turn
contain the soft disable flags we need for per-syscall soft disable
accounting.

Adding ftrace_event_files also means we can remove the USE_CALL_FILTER
bit, thus enabling multibuffer filter support for syscall events.

Link: http://lkml.kernel.org/r/6e72b566e85d8df8042f133efbc6c30e21fb017e.1382620672.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/syscalls.h
kernel/trace/trace.h
kernel/trace/trace_syscalls.c

index 10bafa97049d30ee33db491af8156e16f5d7975f..2ef31bfd620bb9f0be406e5a7d43fd91e40e9192 100644 (file)
@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .class                  = &event_class_syscall_enter,   \
                .event.funcs            = &enter_syscall_print_funcs,   \
                .data                   = (void *)&__syscall_meta_##sname,\
-               .flags                  = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\
+               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
        };                                                              \
        static struct ftrace_event_call __used                          \
          __attribute__((section("_ftrace_events")))                    \
@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
                .class                  = &event_class_syscall_exit,    \
                .event.funcs            = &exit_syscall_print_funcs,    \
                .data                   = (void *)&__syscall_meta_##sname,\
-               .flags                  = TRACE_EVENT_FL_CAP_ANY | TRACE_EVENT_FL_USE_CALL_FILTER,\
+               .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
        };                                                              \
        static struct ftrace_event_call __used                          \
          __attribute__((section("_ftrace_events")))                    \
index 12d1a612a73eb63408923b4d12676de56d07a46a..9c27cdadd71faa55edec74c7469e77c1b29b5134 100644 (file)
@@ -192,8 +192,8 @@ struct trace_array {
 #ifdef CONFIG_FTRACE_SYSCALLS
        int                     sys_refcount_enter;
        int                     sys_refcount_exit;
-       DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
-       DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
+       struct ftrace_event_file *enter_syscall_files[NR_syscalls];
+       struct ftrace_event_file *exit_syscall_files[NR_syscalls];
 #endif
        int                     stop_count;
        int                     clock_id;
index 32644eece42922b74f4415342f37bfb85f12ff1a..e4b6d11bdf78f35a2a6d8e831cd3ec21d97d9c00 100644 (file)
@@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 {
        struct trace_array *tr = data;
+       struct ftrace_event_file *ftrace_file;
        struct syscall_trace_enter *entry;
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
@@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
-       if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
+
+       /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
+       ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
+       if (!ftrace_file)
+               return;
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -336,8 +343,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        entry->nr = syscall_nr;
        syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
 
-       if (!call_filter_check_discard(sys_data->enter_event, entry,
-                                      buffer, event))
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))
                trace_current_buffer_unlock_commit(buffer, event,
                                                   irq_flags, pc);
 }
@@ -345,6 +351,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
 {
        struct trace_array *tr = data;
+       struct ftrace_event_file *ftrace_file;
        struct syscall_trace_exit *entry;
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
@@ -356,7 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
-       if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
+
+       /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
+       ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
+       if (!ftrace_file)
+               return;
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
                return;
 
        sys_data = syscall_nr_to_meta(syscall_nr);
@@ -377,8 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
        entry->nr = syscall_nr;
        entry->ret = syscall_get_return_value(current, regs);
 
-       if (!call_filter_check_discard(sys_data->exit_event, entry,
-                                      buffer, event))
+       if (!filter_check_discard(ftrace_file, entry, buffer, event))
                trace_current_buffer_unlock_commit(buffer, event,
                                                   irq_flags, pc);
 }
@@ -397,7 +409,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
        if (!tr->sys_refcount_enter)
                ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
        if (!ret) {
-               set_bit(num, tr->enabled_enter_syscalls);
+               rcu_assign_pointer(tr->enter_syscall_files[num], file);
                tr->sys_refcount_enter++;
        }
        mutex_unlock(&syscall_trace_lock);
@@ -415,10 +427,15 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
                return;
        mutex_lock(&syscall_trace_lock);
        tr->sys_refcount_enter--;
-       clear_bit(num, tr->enabled_enter_syscalls);
+       rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
        if (!tr->sys_refcount_enter)
                unregister_trace_sys_enter(ftrace_syscall_enter, tr);
        mutex_unlock(&syscall_trace_lock);
+       /*
+        * Callers expect the event to be completely disabled on
+        * return, so wait for current handlers to finish.
+        */
+       synchronize_sched();
 }
 
 static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -435,7 +452,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
        if (!tr->sys_refcount_exit)
                ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
        if (!ret) {
-               set_bit(num, tr->enabled_exit_syscalls);
+               rcu_assign_pointer(tr->exit_syscall_files[num], file);
                tr->sys_refcount_exit++;
        }
        mutex_unlock(&syscall_trace_lock);
@@ -453,10 +470,15 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
                return;
        mutex_lock(&syscall_trace_lock);
        tr->sys_refcount_exit--;
-       clear_bit(num, tr->enabled_exit_syscalls);
+       rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
        if (!tr->sys_refcount_exit)
                unregister_trace_sys_exit(ftrace_syscall_exit, tr);
        mutex_unlock(&syscall_trace_lock);
+       /*
+        * Callers expect the event to be completely disabled on
+        * return, so wait for current handlers to finish.
+        */
+       synchronize_sched();
 }
 
 static int __init init_syscall_trace(struct ftrace_event_call *call)