]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - kernel/trace/ftrace.c
tracing: Add static to local functions
[mirror_ubuntu-artful-kernel.git] / kernel / trace / ftrace.c
index 1fd4b9479210183762293944be777abb5435f8e3..846888ea2ba4cd9ffd37c5b4d8836c46c59d7dec 100644 (file)
@@ -62,7 +62,7 @@
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
-#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define INIT_REGEX_LOCK(opsname)       \
@@ -103,7 +103,6 @@ static int ftrace_disabled __read_mostly;
 
 static DEFINE_MUTEX(ftrace_lock);
 
-static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
@@ -171,23 +170,6 @@ int ftrace_nr_registered_ops(void)
        return cnt;
 }
 
-static void
-ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
-                       struct ftrace_ops *op, struct pt_regs *regs)
-{
-       int bit;
-
-       bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
-       if (bit < 0)
-               return;
-
-       do_for_each_ftrace_op(op, ftrace_global_list) {
-               op->func(ip, parent_ip, op, regs);
-       } while_for_each_ftrace_op(op);
-
-       trace_clear_recursion(bit);
-}
-
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
                            struct ftrace_ops *op, struct pt_regs *regs)
 {
@@ -237,43 +219,6 @@ static int control_ops_alloc(struct ftrace_ops *ops)
        return 0;
 }
 
-static void update_global_ops(void)
-{
-       ftrace_func_t func = ftrace_global_list_func;
-       void *private = NULL;
-
-       /* The list has its own recursion protection. */
-       global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
-
-       /*
-        * If there's only one function registered, then call that
-        * function directly. Otherwise, we need to iterate over the
-        * registered callers.
-        */
-       if (ftrace_global_list == &ftrace_list_end ||
-           ftrace_global_list->next == &ftrace_list_end) {
-               func = ftrace_global_list->func;
-               private = ftrace_global_list->private;
-               /*
-                * As we are calling the function directly.
-                * If it does not have recursion protection,
-                * the function_trace_op needs to be updated
-                * accordingly.
-                */
-               if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
-                       global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
-       }
-
-       /* If we filter on pids, update to use the pid function */
-       if (!list_empty(&ftrace_pids)) {
-               set_ftrace_pid_function(func);
-               func = ftrace_pid_func;
-       }
-
-       global_ops.func = func;
-       global_ops.private = private;
-}
-
 static void ftrace_sync(struct work_struct *work)
 {
        /*
@@ -301,8 +246,6 @@ static void update_ftrace_function(void)
 {
        ftrace_func_t func;
 
-       update_global_ops();
-
        /*
         * If we are at the end of the list and this ops is
         * recursion safe and not dynamic and the arch supports passing ops,
@@ -314,10 +257,7 @@ static void update_ftrace_function(void)
             (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
             !FTRACE_FORCE_LIST_FUNC)) {
                /* Set the ftrace_ops that the arch callback uses */
-               if (ftrace_ops_list == &global_ops)
-                       set_function_trace_op = ftrace_global_list;
-               else
-                       set_function_trace_op = ftrace_ops_list;
+               set_function_trace_op = ftrace_ops_list;
                func = ftrace_ops_list->func;
        } else {
                /* Just use the default ftrace_ops */
@@ -434,16 +374,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if (ops->flags & FTRACE_OPS_FL_DELETED)
                return -EINVAL;
 
-       if (FTRACE_WARN_ON(ops == &global_ops))
-               return -EINVAL;
-
        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
                return -EBUSY;
 
-       /* We don't support both control and global flags set. */
-       if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
-               return -EINVAL;
-
 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
        /*
         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
@@ -461,10 +394,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if (!core_kernel_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
-               ops->flags |= FTRACE_OPS_FL_ENABLED;
-       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+       if (ops->flags & FTRACE_OPS_FL_CONTROL) {
                if (control_ops_alloc(ops))
                        return -ENOMEM;
                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
@@ -484,15 +414,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
                return -EBUSY;
 
-       if (FTRACE_WARN_ON(ops == &global_ops))
-               return -EINVAL;
-
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ret = remove_ftrace_list_ops(&ftrace_global_list,
-                                            &global_ops, ops);
-               if (!ret)
-                       ops->flags &= ~FTRACE_OPS_FL_ENABLED;
-       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+       if (ops->flags & FTRACE_OPS_FL_CONTROL) {
                ret = remove_ftrace_list_ops(&ftrace_control_list,
                                             &control_ops, ops);
        } else
@@ -2128,15 +2050,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
        ftrace_start_up++;
        command |= FTRACE_UPDATE_CALLS;
 
-       /* ops marked global share the filter hashes */
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ops = &global_ops;
-               /* Don't update hash if global is already set */
-               if (global_start_up)
-                       hash_enable = false;
-               global_start_up++;
-       }
-
        ops->flags |= FTRACE_OPS_FL_ENABLED;
        if (hash_enable)
                ftrace_hash_rec_enable(ops, 1);
@@ -2166,21 +2079,10 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
         */
        WARN_ON_ONCE(ftrace_start_up < 0);
 
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ops = &global_ops;
-               global_start_up--;
-               WARN_ON_ONCE(global_start_up < 0);
-               /* Don't update hash if global still has users */
-               if (global_start_up) {
-                       WARN_ON_ONCE(!ftrace_start_up);
-                       hash_disable = false;
-               }
-       }
-
        if (hash_disable)
                ftrace_hash_rec_disable(ops, 1);
 
-       if (ops != &global_ops || !global_start_up)
+       if (!global_start_up)
                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
        command |= FTRACE_UPDATE_CALLS;
@@ -3524,10 +3426,6 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        struct ftrace_hash *hash;
        int ret;
 
-       /* All global ops uses the global ops filters */
-       if (ops->flags & FTRACE_OPS_FL_GLOBAL)
-               ops = &global_ops;
-
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
@@ -4462,6 +4360,34 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
+__init void ftrace_init_global_array_ops(struct trace_array *tr)
+{
+       tr->ops = &global_ops;
+       tr->ops->private = tr;
+}
+
+void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
+{
+       /* If we filter on pids, update to use the pid function */
+       if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
+               if (WARN_ON(tr->ops->func != ftrace_stub))
+                       printk("ftrace ops had %pS for function\n",
+                              tr->ops->func);
+               /* Only the top level instance does pid tracing */
+               if (!list_empty(&ftrace_pids)) {
+                       set_ftrace_pid_function(func);
+                       func = ftrace_pid_func;
+               }
+       }
+       tr->ops->func = func;
+       tr->ops->private = tr;
+}
+
+void ftrace_reset_array_ops(struct trace_array *tr)
+{
+       tr->ops->func = ftrace_stub;
+}
+
 static void
 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
                        struct ftrace_ops *op, struct pt_regs *regs)
@@ -4520,9 +4446,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        do_for_each_ftrace_op(op, ftrace_ops_list) {
-               if (ftrace_ops_test(op, ip, regs))
+               if (ftrace_ops_test(op, ip, regs)) {
+                       if (WARN_ON(!op->func)) {
+                               function_trace_stop = 1;
+                               printk("op=%p %pS\n", op, op);
+                               goto out;
+                       }
                        op->func(ip, parent_ip, op, regs);
+               }
        } while_for_each_ftrace_op(op);
+out:
        preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
@@ -4927,7 +4860,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 static int ftrace_graph_active;
-static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
@@ -5076,8 +5008,7 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
 /* Just a place holder for function graph */
 static struct ftrace_ops fgraph_ops __read_mostly = {
        .func           = ftrace_stub,
-       .flags          = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
-                               FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags          = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
@@ -5104,6 +5035,10 @@ static void update_function_graph_func(void)
                ftrace_graph_entry = ftrace_graph_entry_test;
 }
 
+static struct notifier_block ftrace_suspend_notifier = {
+       .notifier_call = ftrace_suspend_notifier_call,
+};
+
 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                        trace_func_graph_ent_t entryfunc)
 {
@@ -5117,7 +5052,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                goto out;
        }
 
-       ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
 
        ftrace_graph_active++;