]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
kprobes: Fix optimize_kprobe()/unoptimize_kprobe() cancellation logic
authorMasami Hiramatsu <mhiramat@kernel.org>
Tue, 7 Jan 2020 14:42:24 +0000 (23:42 +0900)
committerIngo Molnar <mingo@kernel.org>
Thu, 9 Jan 2020 11:40:13 +0000 (12:40 +0100)
optimize_kprobe() and unoptimize_kprobe() cancels if a given kprobe
is on the optimizing_list or unoptimizing_list already. However, since
the following commit:

  f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code")

modified the update timing of the KPROBE_FLAG_OPTIMIZED, it doesn't
work as expected anymore.

The optimized_kprobe could be in the following states:

- [optimizing]: Before inserting jump instruction
  op.kp->flags has KPROBE_FLAG_OPTIMIZED and
  op->list is not empty.

- [optimized]: jump inserted
  op.kp->flags has KPROBE_FLAG_OPTIMIZED and
  op->list is empty.

- [unoptimizing]: Before removing jump instruction (including unused
  optprobe)
  op.kp->flags has KPROBE_FLAG_OPTIMIZED and
  op->list is not empty.

- [unoptimized]: jump removed
  op.kp->flags doesn't have KPROBE_FLAG_OPTIMIZED and
  op->list is empty.

Current code mis-expects [unoptimizing] state doesn't have
KPROBE_FLAG_OPTIMIZED, and that can cause incorrect results.

To fix this, introduce optprobe_queued_unopt() to distinguish [optimizing]
and [unoptimizing] states and fixes the logic in optimize_kprobe() and
unoptimize_kprobe().

[ mingo: Cleaned up the changelog and the code a bit. ]

Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bristot@redhat.com
Fixes: f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code")
Link: https://lkml.kernel.org/r/157840814418.7181.13478003006386303481.stgit@devnote2
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/kprobes.c

index 34e28b236d680af9db047c64cbd68fca56841367..2625c241ac00f81d5385be6024b9b3062c05ecfd 100644 (file)
@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
        mutex_unlock(&kprobe_mutex);
 }
 
+static bool optprobe_queued_unopt(struct optimized_kprobe *op)
+{
+       struct optimized_kprobe *_op;
+
+       list_for_each_entry(_op, &unoptimizing_list, list) {
+               if (op == _op)
+                       return true;
+       }
+
+       return false;
+}
+
 /* Optimize kprobe if p is ready to be optimized */
 static void optimize_kprobe(struct kprobe *p)
 {
@@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
                return;
 
        /* Check if it is already optimized. */
-       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
+       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
+               if (optprobe_queued_unopt(op)) {
+                       /* This is under unoptimizing. Just dequeue the probe */
+                       list_del_init(&op->list);
+               }
                return;
+       }
        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 
-       if (!list_empty(&op->list))
-               /* This is under unoptimizing. Just dequeue the probe */
-               list_del_init(&op->list);
-       else {
-               list_add(&op->list, &optimizing_list);
-               kick_kprobe_optimizer();
-       }
+       /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
+       if (WARN_ON_ONCE(!list_empty(&op->list)))
+               return;
+
+       list_add(&op->list, &optimizing_list);
+       kick_kprobe_optimizer();
 }
 
 /* Short cut to direct unoptimizing */
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
                return; /* This is not an optprobe nor optimized */
 
        op = container_of(p, struct optimized_kprobe, kp);
-       if (!kprobe_optimized(p)) {
-               /* Unoptimized or unoptimizing case */
-               if (force && !list_empty(&op->list)) {
-                       /*
-                        * Only if this is unoptimizing kprobe and forced,
-                        * forcibly unoptimize it. (No need to unoptimize
-                        * unoptimized kprobe again :)
-                        */
-                       list_del_init(&op->list);
-                       force_unoptimize_kprobe(op);
-               }
+       if (!kprobe_optimized(p))
                return;
-       }
 
        if (!list_empty(&op->list)) {
-               /* Dequeue from the optimization queue */
-               list_del_init(&op->list);
+               if (optprobe_queued_unopt(op)) {
+                       /* Queued in unoptimizing queue */
+                       if (force) {
+                               /*
+                                * Forcibly unoptimize the kprobe here, and queue it
+                                * in the freeing list for release afterwards.
+                                */
+                               force_unoptimize_kprobe(op);
+                               list_move(&op->list, &freeing_list);
+                       }
+               } else {
+                       /* Dequeue from the optimizing queue */
+                       list_del_init(&op->list);
+                       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+               }
                return;
        }
+
        /* Optimized kprobe case */
-       if (force)
+       if (force) {
                /* Forcibly update the code: this is a special case */
                force_unoptimize_kprobe(op);
-       else {
+       else {
                list_add(&op->list, &unoptimizing_list);
                kick_kprobe_optimizer();
        }