]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/kprobes.c
bpf: Fix passing modified ctx to ld/abs/ind instruction
[mirror_ubuntu-bionic-kernel.git] / kernel / kprobes.c
index 4641bcecd161b54138695e5113a49d7a7cefbb33..cd1d7d1ec87cf9d6c0c282dd341a0a5235d7d72b 100644 (file)
@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
+       lockdep_assert_held(&text_mutex);
        /*
         * The optimization/unoptimization refers online_cpus via
         * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
            list_empty(&optimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_optimize_kprobes(&optimizing_list);
-       mutex_unlock(&text_mutex);
 }
 
 /*
@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
+       lockdep_assert_held(&text_mutex);
        /* See comment in do_optimize_kprobes() */
        lockdep_assert_cpus_held();
 
@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
        if (list_empty(&unoptimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
                } else
                        list_del_init(&op->list);
        }
-       mutex_unlock(&text_mutex);
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -546,8 +544,14 @@ static void do_free_cleaned_kprobes(void)
        struct optimized_kprobe *op, *tmp;
 
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
-               BUG_ON(!kprobe_unused(&op->kp));
                list_del_init(&op->list);
+               if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
+                       /*
+                        * This must not happen, but if there is a kprobe
+                        * still in use, keep it on kprobes hash list.
+                        */
+                       continue;
+               }
                free_aggr_kprobe(&op->kp);
        }
 }
@@ -563,6 +567,7 @@ static void kprobe_optimizer(struct work_struct *work)
 {
        mutex_lock(&kprobe_mutex);
        cpus_read_lock();
+       mutex_lock(&text_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
 
@@ -590,6 +595,7 @@ static void kprobe_optimizer(struct work_struct *work)
        do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
+       mutex_unlock(&text_mutex);
        cpus_read_unlock();
        mutex_unlock(&kprobe_mutex);
 
@@ -1499,7 +1505,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
        /* Ensure it is not in reserved area nor out of text */
        if (!kernel_text_address((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr)) {
+           jump_label_text_reserved(p->addr, p->addr) ||
+           find_bug((unsigned long)p->addr)) {
                ret = -EINVAL;
                goto out;
        }
@@ -2142,6 +2149,47 @@ void dump_kprobe(struct kprobe *kp)
 }
 NOKPROBE_SYMBOL(dump_kprobe);
 
+int kprobe_add_ksym_blacklist(unsigned long entry)
+{
+       struct kprobe_blacklist_entry *ent;
+       unsigned long offset = 0, size = 0;
+
+       if (!kernel_text_address(entry) ||
+           !kallsyms_lookup_size_offset(entry, &size, &offset))
+               return -EINVAL;
+
+       ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+       if (!ent)
+               return -ENOMEM;
+       ent->start_addr = entry;
+       ent->end_addr = entry + size;
+       INIT_LIST_HEAD(&ent->list);
+       list_add_tail(&ent->list, &kprobe_blacklist);
+
+       return (int)size;
+}
+
+/* Add all symbols in given area into kprobe blacklist */
+int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
+{
+       unsigned long entry;
+       int ret = 0;
+
+       for (entry = start; entry < end; entry += ret) {
+               ret = kprobe_add_ksym_blacklist(entry);
+               if (ret < 0)
+                       return ret;
+               if (ret == 0)   /* In case of alias symbol */
+                       ret = 1;
+       }
+       return 0;
+}
+
+int __init __weak arch_populate_kprobe_blacklist(void)
+{
+       return 0;
+}
+
 /*
  * Lookup and populate the kprobe_blacklist.
  *
@@ -2153,26 +2201,24 @@ NOKPROBE_SYMBOL(dump_kprobe);
 static int __init populate_kprobe_blacklist(unsigned long *start,
                                             unsigned long *end)
 {
+       unsigned long entry;
        unsigned long *iter;
-       struct kprobe_blacklist_entry *ent;
-       unsigned long entry, offset = 0, size = 0;
+       int ret;
 
        for (iter = start; iter < end; iter++) {
                entry = arch_deref_entry_point((void *)*iter);
-
-               if (!kernel_text_address(entry) ||
-                   !kallsyms_lookup_size_offset(entry, &size, &offset))
+               ret = kprobe_add_ksym_blacklist(entry);
+               if (ret == -EINVAL)
                        continue;
-
-               ent = kmalloc(sizeof(*ent), GFP_KERNEL);
-               if (!ent)
-                       return -ENOMEM;
-               ent->start_addr = entry;
-               ent->end_addr = entry + size;
-               INIT_LIST_HEAD(&ent->list);
-               list_add_tail(&ent->list, &kprobe_blacklist);
+               if (ret < 0)
+                       return ret;
        }
-       return 0;
+
+       /* Symbols in __kprobes_text are blacklisted */
+       ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
+                                       (unsigned long)__kprobes_text_end);
+
+       return ret ? : arch_populate_kprobe_blacklist();
 }
 
 /* Module notifier call back, checking kprobes on the module */