]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/kprobes.c
irq/matrix: Fix memory overallocation
[mirror_ubuntu-bionic-kernel.git] / kernel / kprobes.c
index da2ccf14235814df4dc4b21509500d0d777cacb4..6ede02bb05ac406090301aceeadc63cf1e6425a7 100644 (file)
@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
+       lockdep_assert_held(&text_mutex);
        /*
         * The optimization/unoptimization refers online_cpus via
         * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
            list_empty(&optimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_optimize_kprobes(&optimizing_list);
-       mutex_unlock(&text_mutex);
 }
 
 /*
@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
+       lockdep_assert_held(&text_mutex);
        /* See comment in do_optimize_kprobes() */
        lockdep_assert_cpus_held();
 
@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
        if (list_empty(&unoptimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
                } else
                        list_del_init(&op->list);
        }
-       mutex_unlock(&text_mutex);
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -546,8 +544,14 @@ static void do_free_cleaned_kprobes(void)
        struct optimized_kprobe *op, *tmp;
 
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
-               BUG_ON(!kprobe_unused(&op->kp));
                list_del_init(&op->list);
+               if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
+                       /*
+                        * This must not happen, but if there is a kprobe
+                        * still in use, keep it on kprobes hash list.
+                        */
+                       continue;
+               }
                free_aggr_kprobe(&op->kp);
        }
 }
@@ -563,6 +567,7 @@ static void kprobe_optimizer(struct work_struct *work)
 {
        mutex_lock(&kprobe_mutex);
        cpus_read_lock();
+       mutex_lock(&text_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
 
@@ -590,6 +595,7 @@ static void kprobe_optimizer(struct work_struct *work)
        do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
+       mutex_unlock(&text_mutex);
        cpus_read_unlock();
        mutex_unlock(&kprobe_mutex);
 
@@ -700,7 +706,7 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 }
 
 /* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
 
@@ -710,14 +716,15 @@ static void reuse_unused_kprobe(struct kprobe *ap)
         * there is still a relative jump) and disabled.
         */
        op = container_of(ap, struct optimized_kprobe, kp);
-       if (unlikely(list_empty(&op->list)))
-               printk(KERN_WARNING "Warning: found a stray unused "
-                       "aggrprobe@%p\n", ap->addr);
+       WARN_ON_ONCE(list_empty(&op->list));
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       BUG_ON(!kprobe_optready(ap));
+       if (!kprobe_optready(ap))
+               return -EINVAL;
+
        optimize_kprobe(ap);
+       return 0;
 }
 
 /* Remove optimized instructions */
@@ -942,11 +949,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p)                     kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()            do {} while (0)
 
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
+       /*
+        * If the optimized kprobe is NOT supported, the aggr kprobe is
+        * released at the same time that the last aggregated kprobe is
+        * unregistered.
+        * Thus there should be no chance to reuse unused kprobe.
+        */
        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
-       BUG_ON(kprobe_unused(ap));
+       return -EINVAL;
 }
 
 static void free_aggr_kprobe(struct kprobe *p)
@@ -984,7 +996,7 @@ static void arm_kprobe_ftrace(struct kprobe *p)
 
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                                   (unsigned long)p->addr, 0, 0);
-       WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+       WARN(ret < 0, "Failed to arm kprobe-ftrace at %pS (%d)\n", p->addr, ret);
        kprobe_ftrace_enabled++;
        if (kprobe_ftrace_enabled == 1) {
                ret = register_ftrace_function(&kprobe_ftrace_ops);
@@ -1004,7 +1016,7 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
        }
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                           (unsigned long)p->addr, 1, 0);
-       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", p->addr, ret);
 }
 #else  /* !CONFIG_KPROBES_ON_FTRACE */
 #define prepare_kprobe(p)      arch_prepare_kprobe(p)
@@ -1320,9 +1332,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
                        goto out;
                }
                init_aggr_kprobe(ap, orig_p);
-       } else if (kprobe_unused(ap))
+       } else if (kprobe_unused(ap)) {
                /* This probe is going to die. Rescue it */
-               reuse_unused_kprobe(ap);
+               ret = reuse_unused_kprobe(ap);
+               if (ret)
+                       goto out;
+       }
 
        if (kprobe_gone(ap)) {
                /*
@@ -1490,7 +1505,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
        /* Ensure it is not in reserved area nor out of text */
        if (!kernel_text_address((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr)) {
+           jump_label_text_reserved(p->addr, p->addr) ||
+           find_bug((unsigned long)p->addr)) {
                ret = -EINVAL;
                goto out;
        }
@@ -2124,11 +2140,12 @@ out:
 }
 EXPORT_SYMBOL_GPL(enable_kprobe);
 
+/* Caller must NOT call this in usual path. This is only for critical case */
 void dump_kprobe(struct kprobe *kp)
 {
-       printk(KERN_WARNING "Dumping kprobe:\n");
-       printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
-              kp->symbol_name, kp->addr, kp->offset);
+       pr_err("Dumping kprobe:\n");
+       pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
+              kp->symbol_name, kp->offset, kp->addr);
 }
 NOKPROBE_SYMBOL(dump_kprobe);
 
@@ -2151,11 +2168,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
                entry = arch_deref_entry_point((void *)*iter);
 
                if (!kernel_text_address(entry) ||
-                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
-                       pr_err("Failed to find blacklist at %p\n",
-                               (void *)entry);
+                   !kallsyms_lookup_size_offset(entry, &size, &offset))
                        continue;
-               }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
@@ -2383,8 +2397,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
        struct kprobe_blacklist_entry *ent =
                list_entry(v, struct kprobe_blacklist_entry, list);
 
-       seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
-                  (void *)ent->end_addr, (void *)ent->start_addr);
+       /*
+        * If /proc/kallsyms is not showing kernel address, we won't
+        * show them here either.
+        */
+       if (!kallsyms_show_value())
+               seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
+                          (void *)ent->start_addr);
+       else
+               seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
+                          (void *)ent->end_addr, (void *)ent->start_addr);
        return 0;
 }
 
@@ -2533,7 +2555,7 @@ static int __init debugfs_kprobe_init(void)
        if (!dir)
                return -ENOMEM;
 
-       file = debugfs_create_file("list", 0444, dir, NULL,
+       file = debugfs_create_file("list", 0400, dir, NULL,
                                &debugfs_kprobes_operations);
        if (!file)
                goto error;
@@ -2543,7 +2565,7 @@ static int __init debugfs_kprobe_init(void)
        if (!file)
                goto error;
 
-       file = debugfs_create_file("blacklist", 0444, dir, NULL,
+       file = debugfs_create_file("blacklist", 0400, dir, NULL,
                                &debugfs_kprobe_blacklist_ops);
        if (!file)
                goto error;