]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - kernel/trace/ftrace.c
Merge tag 'pinctrl-v4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-artful-kernel.git] / kernel / trace / ftrace.c
index eb230f06ba4123a1dc75dafb508789f6cd7afd25..b9691ee8f6c182cfee1af7308555b9291f3730bd 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/stop_machine.h>
 #include <linux/clocksource.h>
+#include <linux/sched/task.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/suspend.h>
@@ -1110,13 +1111,6 @@ struct ftrace_func_entry {
        unsigned long ip;
 };
 
-struct ftrace_hash {
-       unsigned long           size_bits;
-       struct hlist_head       *buckets;
-       unsigned long           count;
-       struct rcu_head         rcu;
-};
-
 /*
  * We make these constant because no one should touch them,
  * but they are used as the default "empty hash", to avoid allocating
@@ -1192,26 +1186,24 @@ struct ftrace_page {
 static struct ftrace_page      *ftrace_pages_start;
 static struct ftrace_page      *ftrace_pages;
 
-static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
+static __always_inline unsigned long
+ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
 {
-       return !hash || !hash->count;
+       if (hash->size_bits > 0)
+               return hash_long(ip, hash->size_bits);
+
+       return 0;
 }
 
-static struct ftrace_func_entry *
-ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+/* Only use this function if ftrace_hash_empty() has already been tested */
+static __always_inline struct ftrace_func_entry *
+__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 {
        unsigned long key;
        struct ftrace_func_entry *entry;
        struct hlist_head *hhd;
 
-       if (ftrace_hash_empty(hash))
-               return NULL;
-
-       if (hash->size_bits > 0)
-               key = hash_long(ip, hash->size_bits);
-       else
-               key = 0;
-
+       key = ftrace_hash_key(hash, ip);
        hhd = &hash->buckets[key];
 
        hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
@@ -1221,17 +1213,32 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
        return NULL;
 }
 
+/**
+ * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
+ * @hash: The hash to look at
+ * @ip: The instruction pointer to test
+ *
+ * Search a given @hash to see if a given instruction pointer (@ip)
+ * exists in it.
+ *
+ * Returns the entry that holds the @ip if found. NULL otherwise.
+ */
+struct ftrace_func_entry *
+ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+{
+       if (ftrace_hash_empty(hash))
+               return NULL;
+
+       return __ftrace_lookup_ip(hash, ip);
+}
+
 static void __add_hash_entry(struct ftrace_hash *hash,
                             struct ftrace_func_entry *entry)
 {
        struct hlist_head *hhd;
        unsigned long key;
 
-       if (hash->size_bits)
-               key = hash_long(entry->ip, hash->size_bits);
-       else
-               key = 0;
-
+       key = ftrace_hash_key(hash, entry->ip);
        hhd = &hash->buckets[key];
        hlist_add_head(&entry->hlist, hhd);
        hash->count++;
@@ -1383,9 +1390,8 @@ ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
                                       struct ftrace_hash *new_hash);
 
-static int
-ftrace_hash_move(struct ftrace_ops *ops, int enable,
-                struct ftrace_hash **dst, struct ftrace_hash *src)
+static struct ftrace_hash *
+__ftrace_hash_move(struct ftrace_hash *src)
 {
        struct ftrace_func_entry *entry;
        struct hlist_node *tn;
@@ -1393,21 +1399,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        struct ftrace_hash *new_hash;
        int size = src->count;
        int bits = 0;
-       int ret;
        int i;
 
-       /* Reject setting notrace hash on IPMODIFY ftrace_ops */
-       if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
-               return -EINVAL;
-
        /*
-        * If the new source is empty, just free dst and assign it
-        * the empty_hash.
+        * If the new source is empty, just return the empty_hash.
         */
-       if (!src->count) {
-               new_hash = EMPTY_HASH;
-               goto update;
-       }
+       if (!src->count)
+               return EMPTY_HASH;
 
        /*
         * Make the hash size about 1/2 the # found
@@ -1421,7 +1419,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
 
        new_hash = alloc_ftrace_hash(bits);
        if (!new_hash)
-               return -ENOMEM;
+               return NULL;
 
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
@@ -1432,7 +1430,24 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
                }
        }
 
-update:
+       return new_hash;
+}
+
+static int
+ftrace_hash_move(struct ftrace_ops *ops, int enable,
+                struct ftrace_hash **dst, struct ftrace_hash *src)
+{
+       struct ftrace_hash *new_hash;
+       int ret;
+
+       /* Reject setting notrace hash on IPMODIFY ftrace_ops */
+       if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
+               return -EINVAL;
+
+       new_hash = __ftrace_hash_move(src);
+       if (!new_hash)
+               return -ENOMEM;
+
        /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
        if (enable) {
                /* IPMODIFY should be updated only when filter_hash updating */
@@ -1466,9 +1481,9 @@ static bool hash_contains_ip(unsigned long ip,
         * notrace hash is considered not in the notrace hash.
         */
        return (ftrace_hash_empty(hash->filter_hash) ||
-               ftrace_lookup_ip(hash->filter_hash, ip)) &&
+               __ftrace_lookup_ip(hash->filter_hash, ip)) &&
                (ftrace_hash_empty(hash->notrace_hash) ||
-                !ftrace_lookup_ip(hash->notrace_hash, ip));
+                !__ftrace_lookup_ip(hash->notrace_hash, ip));
 }
 
 /*
@@ -2880,7 +2895,7 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 
        /* The function must be in the filter */
        if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
-           !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
+           !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
                return 0;
 
        /* If in notrace hash, we ignore it too */
@@ -4382,7 +4397,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
-static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
+static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
 
 static unsigned long save_global_trampoline;
 static unsigned long save_global_flags;
@@ -4401,26 +4416,38 @@ static int __init set_graph_notrace_function(char *str)
 }
 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
 
+static int __init set_graph_max_depth_function(char *str)
+{
+       if (!str)
+               return 0;
+       fgraph_max_depth = simple_strtoul(str, NULL, 0);
+       return 1;
+}
+__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
+
 static void __init set_ftrace_early_graph(char *buf, int enable)
 {
        int ret;
        char *func;
-       unsigned long *table = ftrace_graph_funcs;
-       int *count = &ftrace_graph_count;
+       struct ftrace_hash *hash;
 
-       if (!enable) {
-               table = ftrace_graph_notrace_funcs;
-               count = &ftrace_graph_notrace_count;
-       }
+       hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       if (WARN_ON(!hash))
+               return;
 
        while (buf) {
                func = strsep(&buf, ",");
                /* we allow only one expression at a time */
-               ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
+               ret = ftrace_graph_set_hash(hash, func);
                if (ret)
                        printk(KERN_DEBUG "ftrace: function %s not "
                                          "traceable\n", func);
        }
+
+       if (enable)
+               ftrace_graph_hash = hash;
+       else
+               ftrace_graph_notrace_hash = hash;
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
@@ -4540,26 +4567,55 @@ static const struct file_operations ftrace_notrace_fops = {
 
 static DEFINE_MUTEX(graph_lock);
 
-int ftrace_graph_count;
-int ftrace_graph_notrace_count;
-unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
-unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+
+enum graph_filter_type {
+       GRAPH_FILTER_NOTRACE    = 0,
+       GRAPH_FILTER_FUNCTION,
+};
+
+#define FTRACE_GRAPH_EMPTY     ((void *)1)
 
 struct ftrace_graph_data {
-       unsigned long *table;
-       size_t size;
-       int *count;
-       const struct seq_operations *seq_ops;
+       struct ftrace_hash              *hash;
+       struct ftrace_func_entry        *entry;
+       int                             idx;   /* for hash table iteration */
+       enum graph_filter_type          type;
+       struct ftrace_hash              *new_hash;
+       const struct seq_operations     *seq_ops;
+       struct trace_parser             parser;
 };
 
 static void *
 __g_next(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_graph_data *fgd = m->private;
+       struct ftrace_func_entry *entry = fgd->entry;
+       struct hlist_head *head;
+       int i, idx = fgd->idx;
 
-       if (*pos >= *fgd->count)
+       if (*pos >= fgd->hash->count)
                return NULL;
-       return &fgd->table[*pos];
+
+       if (entry) {
+               hlist_for_each_entry_continue(entry, hlist) {
+                       fgd->entry = entry;
+                       return entry;
+               }
+
+               idx++;
+       }
+
+       for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
+               head = &fgd->hash->buckets[i];
+               hlist_for_each_entry(entry, head, hlist) {
+                       fgd->entry = entry;
+                       fgd->idx = i;
+                       return entry;
+               }
+       }
+       return NULL;
 }
 
 static void *
@@ -4575,10 +4631,19 @@ static void *g_start(struct seq_file *m, loff_t *pos)
 
        mutex_lock(&graph_lock);
 
+       if (fgd->type == GRAPH_FILTER_FUNCTION)
+               fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
+                                       lockdep_is_held(&graph_lock));
+       else
+               fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+                                       lockdep_is_held(&graph_lock));
+
        /* Nothing, tell g_show to print all functions are enabled */
-       if (!*fgd->count && !*pos)
-               return (void *)1;
+       if (ftrace_hash_empty(fgd->hash) && !*pos)
+               return FTRACE_GRAPH_EMPTY;
 
+       fgd->idx = 0;
+       fgd->entry = NULL;
        return __g_next(m, pos);
 }
 
@@ -4589,22 +4654,22 @@ static void g_stop(struct seq_file *m, void *p)
 
 static int g_show(struct seq_file *m, void *v)
 {
-       unsigned long *ptr = v;
+       struct ftrace_func_entry *entry = v;
 
-       if (!ptr)
+       if (!entry)
                return 0;
 
-       if (ptr == (unsigned long *)1) {
+       if (entry == FTRACE_GRAPH_EMPTY) {
                struct ftrace_graph_data *fgd = m->private;
 
-               if (fgd->table == ftrace_graph_funcs)
+               if (fgd->type == GRAPH_FILTER_FUNCTION)
                        seq_puts(m, "#### all functions enabled ####\n");
                else
                        seq_puts(m, "#### no functions disabled ####\n");
                return 0;
        }
 
-       seq_printf(m, "%ps\n", (void *)*ptr);
+       seq_printf(m, "%ps\n", (void *)entry->ip);
 
        return 0;
 }
@@ -4621,24 +4686,51 @@ __ftrace_graph_open(struct inode *inode, struct file *file,
                    struct ftrace_graph_data *fgd)
 {
        int ret = 0;
+       struct ftrace_hash *new_hash = NULL;
 
-       mutex_lock(&graph_lock);
-       if ((file->f_mode & FMODE_WRITE) &&
-           (file->f_flags & O_TRUNC)) {
-               *fgd->count = 0;
-               memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
+       if (file->f_mode & FMODE_WRITE) {
+               const int size_bits = FTRACE_HASH_DEFAULT_BITS;
+
+               if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
+                       return -ENOMEM;
+
+               if (file->f_flags & O_TRUNC)
+                       new_hash = alloc_ftrace_hash(size_bits);
+               else
+                       new_hash = alloc_and_copy_ftrace_hash(size_bits,
+                                                             fgd->hash);
+               if (!new_hash) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
        }
-       mutex_unlock(&graph_lock);
 
        if (file->f_mode & FMODE_READ) {
-               ret = seq_open(file, fgd->seq_ops);
+               ret = seq_open(file, &ftrace_graph_seq_ops);
                if (!ret) {
                        struct seq_file *m = file->private_data;
                        m->private = fgd;
+               } else {
+                       /* Failed */
+                       free_ftrace_hash(new_hash);
+                       new_hash = NULL;
                }
        } else
                file->private_data = fgd;
 
+out:
+       if (ret < 0 && file->f_mode & FMODE_WRITE)
+               trace_parser_put(&fgd->parser);
+
+       fgd->new_hash = new_hash;
+
+       /*
+        * All uses of fgd->hash must be taken with the graph_lock
+        * held. The graph_lock is going to be released, so force
+        * fgd->hash to be reinitialized when it is taken again.
+        */
+       fgd->hash = NULL;
+
        return ret;
 }
 
@@ -4646,6 +4738,7 @@ static int
 ftrace_graph_open(struct inode *inode, struct file *file)
 {
        struct ftrace_graph_data *fgd;
+       int ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
@@ -4654,18 +4747,26 @@ ftrace_graph_open(struct inode *inode, struct file *file)
        if (fgd == NULL)
                return -ENOMEM;
 
-       fgd->table = ftrace_graph_funcs;
-       fgd->size = FTRACE_GRAPH_MAX_FUNCS;
-       fgd->count = &ftrace_graph_count;
+       mutex_lock(&graph_lock);
+
+       fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
+                                       lockdep_is_held(&graph_lock));
+       fgd->type = GRAPH_FILTER_FUNCTION;
        fgd->seq_ops = &ftrace_graph_seq_ops;
 
-       return __ftrace_graph_open(inode, file, fgd);
+       ret = __ftrace_graph_open(inode, file, fgd);
+       if (ret < 0)
+               kfree(fgd);
+
+       mutex_unlock(&graph_lock);
+       return ret;
 }
 
 static int
 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
 {
        struct ftrace_graph_data *fgd;
+       int ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
@@ -4674,45 +4775,97 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file)
        if (fgd == NULL)
                return -ENOMEM;
 
-       fgd->table = ftrace_graph_notrace_funcs;
-       fgd->size = FTRACE_GRAPH_MAX_FUNCS;
-       fgd->count = &ftrace_graph_notrace_count;
+       mutex_lock(&graph_lock);
+
+       fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+                                       lockdep_is_held(&graph_lock));
+       fgd->type = GRAPH_FILTER_NOTRACE;
        fgd->seq_ops = &ftrace_graph_seq_ops;
 
-       return __ftrace_graph_open(inode, file, fgd);
+       ret = __ftrace_graph_open(inode, file, fgd);
+       if (ret < 0)
+               kfree(fgd);
+
+       mutex_unlock(&graph_lock);
+       return ret;
 }
 
 static int
 ftrace_graph_release(struct inode *inode, struct file *file)
 {
+       struct ftrace_graph_data *fgd;
+       struct ftrace_hash *old_hash, *new_hash;
+       struct trace_parser *parser;
+       int ret = 0;
+
        if (file->f_mode & FMODE_READ) {
                struct seq_file *m = file->private_data;
 
-               kfree(m->private);
+               fgd = m->private;
                seq_release(inode, file);
        } else {
-               kfree(file->private_data);
+               fgd = file->private_data;
        }
 
-       return 0;
+
+       if (file->f_mode & FMODE_WRITE) {
+
+               parser = &fgd->parser;
+
+               if (trace_parser_loaded((parser))) {
+                       parser->buffer[parser->idx] = 0;
+                       ret = ftrace_graph_set_hash(fgd->new_hash,
+                                                   parser->buffer);
+               }
+
+               trace_parser_put(parser);
+
+               new_hash = __ftrace_hash_move(fgd->new_hash);
+               if (!new_hash) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               mutex_lock(&graph_lock);
+
+               if (fgd->type == GRAPH_FILTER_FUNCTION) {
+                       old_hash = rcu_dereference_protected(ftrace_graph_hash,
+                                       lockdep_is_held(&graph_lock));
+                       rcu_assign_pointer(ftrace_graph_hash, new_hash);
+               } else {
+                       old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+                                       lockdep_is_held(&graph_lock));
+                       rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
+               }
+
+               mutex_unlock(&graph_lock);
+
+               /* Wait till all users are no longer using the old hash */
+               synchronize_sched();
+
+               free_ftrace_hash(old_hash);
+       }
+
+ out:
+       kfree(fgd->new_hash);
+       kfree(fgd);
+
+       return ret;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
+ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
 {
        struct ftrace_glob func_g;
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       struct ftrace_func_entry *entry;
        int fail = 1;
        int not;
-       bool exists;
-       int i;
 
        /* decode regex */
        func_g.type = filter_parse_regex(buffer, strlen(buffer),
                                         &func_g.search, &not);
-       if (!not && *idx >= size)
-               return -EBUSY;
 
        func_g.len = strlen(func_g.search);
 
@@ -4729,26 +4882,18 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
                        continue;
 
                if (ftrace_match_record(rec, &func_g, NULL, 0)) {
-                       /* if it is in the array */
-                       exists = false;
-                       for (i = 0; i < *idx; i++) {
-                               if (array[i] == rec->ip) {
-                                       exists = true;
-                                       break;
-                               }
-                       }
+                       entry = ftrace_lookup_ip(hash, rec->ip);
 
                        if (!not) {
                                fail = 0;
-                               if (!exists) {
-                                       array[(*idx)++] = rec->ip;
-                                       if (*idx >= size)
-                                               goto out;
-                               }
+
+                               if (entry)
+                                       continue;
+                               if (add_hash_entry(hash, rec->ip) < 0)
+                                       goto out;
                        } else {
-                               if (exists) {
-                                       array[i] = array[--(*idx)];
-                                       array[*idx] = 0;
+                               if (entry) {
+                                       free_hash_entry(hash, entry);
                                        fail = 0;
                                }
                        }
@@ -4767,35 +4912,34 @@ static ssize_t
 ftrace_graph_write(struct file *file, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
-       struct trace_parser parser;
        ssize_t read, ret = 0;
        struct ftrace_graph_data *fgd = file->private_data;
+       struct trace_parser *parser;
 
        if (!cnt)
                return 0;
 
-       if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
-               return -ENOMEM;
-
-       read = trace_get_user(&parser, ubuf, cnt, ppos);
+       /* Read mode uses seq functions */
+       if (file->f_mode & FMODE_READ) {
+               struct seq_file *m = file->private_data;
+               fgd = m->private;
+       }
 
-       if (read >= 0 && trace_parser_loaded((&parser))) {
-               parser.buffer[parser.idx] = 0;
+       parser = &fgd->parser;
 
-               mutex_lock(&graph_lock);
+       read = trace_get_user(parser, ubuf, cnt, ppos);
 
-               /* we allow only one expression at a time */
-               ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
-                                     parser.buffer);
+       if (read >= 0 && trace_parser_loaded(parser) &&
+           !trace_parser_cont(parser)) {
 
-               mutex_unlock(&graph_lock);
+               ret = ftrace_graph_set_hash(fgd->new_hash,
+                                           parser->buffer);
+               trace_parser_clear(parser);
        }
 
        if (!ret)
                ret = read;
 
-       trace_parser_put(&parser);
-
        return ret;
 }
 
@@ -5357,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
  * Normally the mcount trampoline will call the ops->func, but there
  * are times that it should not. For example, if the ops does not
  * have its own recursion protection, then it should call the
- * ftrace_ops_recurs_func() instead.
+ * ftrace_ops_assist_func() instead.
  *
  * Returns the function that the trampoline should call for @ops.
  */