]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
bpf/error-inject/kprobes: Clear current_kprobe and enable preempt in kprobe
authorMasami Hiramatsu <mhiramat@kernel.org>
Tue, 19 Jun 2018 16:15:45 +0000 (01:15 +0900)
committerIngo Molnar <mingo@kernel.org>
Thu, 21 Jun 2018 10:33:19 +0000 (12:33 +0200)
Clear current_kprobe and enable preemption in kprobe
even if pre_handler returns !0.

This simplifies function override using kprobes.

Jprobe used to require to keep the preemption disabled and
keep current_kprobe until it returned to original function
entry. For this reason kprobe_int3_handler() and similar
arch dependent kprobe handers checks pre_handler result
and exit without enabling preemption if the result is !0.

After removing the jprobe, Kprobes does not need to
keep preempt disabled even if user handler returns !0
anymore.

But since the function override handler in error-inject
and bpf is also returns !0 if it overrides a function,
to balancing the preempt count, it enables preemption
and reset current kprobe by itself.

That is a bad design that is very buggy. This fixes
such unbalanced preempt-count and current_kprobes setting
in kprobes, bpf and error-inject.

Note: for powerpc and x86, this removes all preempt_disable
from kprobe_ftrace_handler because ftrace callbacks are
called under preempt disabled.

Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-ia64@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-snps-arc@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: sparclinux@vger.kernel.org
Link: https://lore.kernel.org/lkml/152942494574.15209.12323837825873032258.stgit@devbox
Signed-off-by: Ingo Molnar <mingo@kernel.org>
14 files changed:
arch/arc/kernel/kprobes.c
arch/arm/probes/kprobes/core.c
arch/arm64/kernel/probes/kprobes.c
arch/ia64/kernel/kprobes.c
arch/mips/kernel/kprobes.c
arch/powerpc/kernel/kprobes-ftrace.c
arch/powerpc/kernel/kprobes.c
arch/s390/kernel/kprobes.c
arch/sh/kernel/kprobes.c
arch/sparc/kernel/kprobes.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/ftrace.c
kernel/fail_function.c
kernel/trace/trace_kprobe.c

index 465365696c919c090fb75319b5ce7f1006b9e3a0..df35d4c0b0b84f9490d70010384bde7f9ec164c8 100644 (file)
@@ -231,6 +231,9 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
                if (!p->pre_handler || !p->pre_handler(p, regs)) {
                        setup_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_HIT_SS;
+               } else {
+                       reset_current_kprobe();
+                       preempt_enable_no_resched();
                }
 
                return 1;
@@ -442,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        regs->ret = orig_ret_address;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 3192350f389d1ac228162c3a83de63d0102dad56..8d37601fdb20419a27a790e41bb9dcf115999f7c 100644 (file)
@@ -300,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
 
                        /*
                         * If we have no pre-handler or it returned 0, we
-                        * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry,
-                        * so get out doing nothing more here.
+                        * continue with normal processing. If we have a
+                        * pre-handler and it returned non-zero, it will
+                        * modify the execution path and no need to single
+                        * stepping. Let's just reset current kprobe and exit.
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
                                kcb->kprobe_status = KPROBE_HIT_SS;
@@ -312,8 +312,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                        kcb->kprobe_status = KPROBE_HIT_SSDONE;
                                        p->post_handler(p, regs, 0);
                                }
-                               reset_current_kprobe();
                        }
+                       reset_current_kprobe();
                }
        } else {
                /*
index 076c3c0775a67f7542ad34cd9d29e1ab27ee88d3..5daf3d721cb72e1b06c53a7352130ff1145f5ad2 100644 (file)
@@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
                        /*
                         * If we have no pre-handler or it returned 0, we
                         * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry,
-                        * so get out doing nothing more here.
+                        * pre-handler and it returned non-zero, it will
+                        * modify the execution path and no need to single
+                        * stepping. Let's just reset current kprobe and exit.
                         *
                         * pre_handler can hit a breakpoint and can step thru
                         * before return, keep PSTATE D-flag enabled until
@@ -405,8 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
                                setup_singlestep(p, regs, kcb, 0);
-                               return;
-                       }
+                       } else
+                               reset_current_kprobe();
                }
        }
        /*
index 74c8524e630905b5ddf99b23d0fde5c5d022e7bc..aa41bd5cf9b771ecda27e50a8acf7c3f0cf500fb 100644 (file)
@@ -478,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
                         */
                        break;
        }
-
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
@@ -851,13 +848,11 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
        set_current_kprobe(p, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /*
-                * Our pre-handler is specifically requesting that we just
-                * do a return.  This is used for both the jprobe pre-handler
-                * and the kretprobe trampoline
-                */
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
 #if !defined(CONFIG_PREEMPT)
        if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
index 7fd277bc59b944d6f327ae7544193d342dff8f5a..54cd675c5d1d474153f1d3c6b265bdc1c194b378 100644 (file)
@@ -358,6 +358,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 
        if (p->pre_handler && p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
        }
 
@@ -543,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        instruction_pointer(regs) = orig_ret_address;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 070d1d862444337bfc4f3f461d4d591f95e7eb97..e4a49c051325d59ce026b2a415fde8c18f1263ce 100644 (file)
@@ -32,11 +32,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
-       preempt_disable();
-
        p = get_kprobe((kprobe_opcode_t *)nip);
        if (unlikely(!p) || kprobe_disabled(p))
-               goto end;
+               return;
 
        kcb = get_kprobe_ctlblk();
        if (kprobe_running()) {
@@ -60,18 +58,13 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
                                kcb->kprobe_status = KPROBE_HIT_SSDONE;
                                p->post_handler(p, regs, 0);
                        }
-                       __this_cpu_write(current_kprobe, NULL);
-               } else {
-                       /*
-                        * If pre_handler returns !0, it sets regs->nip and
-                        * resets current kprobe. In this case, we should not
-                        * re-enable preemption.
-                        */
-                       return;
                }
+               /*
+                * If pre_handler returns !0, it changes regs->nip. We have to
+                * skip emulating post_handler.
+                */
+               __this_cpu_write(current_kprobe, NULL);
        }
-end:
-       preempt_enable_no_resched();
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
index f06747e2e70de6f019858ea8683e4743eb96126b..5c60bb0f927f819fea2d57aa98d7ad0a7eec5ebf 100644 (file)
@@ -358,9 +358,12 @@ int kprobe_handler(struct pt_regs *regs)
 
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
        set_current_kprobe(p, regs, kcb);
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /* handler has already set things up, so skip ss setup */
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               /* handler changed execution path, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
        if (p->ainsn.boostable >= 0) {
                ret = try_to_emulate(p, regs);
index 3e34018960b54caeeed80927087c4b0e192f66b4..7c0a095e9c5f6f3d483ceff698f7d7ffd06e614b 100644 (file)
@@ -326,8 +326,11 @@ static int kprobe_handler(struct pt_regs *regs)
                         */
                        push_kprobe(kcb, p);
                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-                       if (p->pre_handler && p->pre_handler(p, regs))
+                       if (p->pre_handler && p->pre_handler(p, regs)) {
+                               pop_kprobe(kcb);
+                               preempt_enable_no_resched();
                                return 1;
+                       }
                        kcb->kprobe_status = KPROBE_HIT_SS;
                }
                enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
@@ -431,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 
        regs->psw.addr = orig_ret_address;
 
-       pop_kprobe(get_kprobe_ctlblk());
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 4fafe0cd12c64f9a95d4e30589d9bc8d8de74ed0..241e903dd3ee224a7f05b2318854389afeb13a94 100644 (file)
@@ -272,9 +272,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        set_current_kprobe(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
-       if (p->pre_handler && p->pre_handler(p, regs))
+       if (p->pre_handler && p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
        prepare_singlestep(p, regs);
        kcb->kprobe_status = KPROBE_HIT_SS;
@@ -352,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        regs->pc = orig_ret_address;
        kretprobe_hash_unlock(current, &flags);
 
-       preempt_enable_no_resched();
-
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
index c684c96ef2e98581a12ef9eee202f7d236b0513f..dfbca2470536eda4de3dac29e3a485549db74972 100644 (file)
@@ -175,8 +175,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 
        set_current_kprobe(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-       if (p->pre_handler && p->pre_handler(p, regs))
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
        prepare_singlestep(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_SS;
@@ -508,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        regs->tpc = orig_ret_address;
        regs->tnpc = orig_ret_address + 4;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 0ac16a0d93e5dc2539b3c5fe8819e70ee3416d78..814e26b7c8a2fcf23ef7eb07930c219b0f9faa44 100644 (file)
@@ -694,6 +694,10 @@ int kprobe_int3_handler(struct pt_regs *regs)
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs))
                                setup_singlestep(p, regs, kcb, 0);
+                       else {
+                               reset_current_kprobe();
+                               preempt_enable_no_resched();
+                       }
                        return 1;
                }
        } else if (*addr != BREAKPOINT_INSTRUCTION) {
index 02a6dd1b6bd0d45fc87e6d8a2e82cb1266138015..ef819e19650bc8bb4c2d35eb9e9bd763ec7c9f5d 100644 (file)
@@ -45,8 +45,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
                regs->ip = ip + sizeof(kprobe_opcode_t);
 
-               /* To emulate trap based kprobes, preempt_disable here */
-               preempt_disable();
                __this_cpu_write(current_kprobe, p);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
                if (!p->pre_handler || !p->pre_handler(p, regs)) {
@@ -60,13 +58,12 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                                p->post_handler(p, regs, 0);
                        }
                        regs->ip = orig_ip;
-                       __this_cpu_write(current_kprobe, NULL);
-                       preempt_enable_no_resched();
                }
                /*
-                * If pre_handler returns !0, it sets regs->ip and
-                * resets current kprobe, and keep preempt count +1.
+                * If pre_handler returns !0, it changes regs->ip. We have to
+                * skip emulating post_handler.
                 */
+               __this_cpu_write(current_kprobe, NULL);
        }
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
index 5349c91c22983c4b9dc0d362aacf9579a705cf70..bc80a4e268c0bc27132ecb782df98276b282b237 100644 (file)
@@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
        if (should_fail(&fei_fault_attr, 1)) {
                regs_set_return_value(regs, attr->retval);
                override_function_with_return(regs);
-               /* Kprobe specific fixup */
-               reset_current_kprobe();
-               preempt_enable_no_resched();
                return 1;
        }
 
index daa81571b22a4646bcc6400ccee0fe638dda2515..7e3b944b6ac1b491be43edd3b1d9352952f41bd2 100644 (file)
@@ -1217,16 +1217,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 
                /*
                 * We need to check and see if we modified the pc of the
-                * pt_regs, and if so clear the kprobe and return 1 so that we
-                * don't do the single stepping.
-                * The ftrace kprobe handler leaves it up to us to re-enable
-                * preemption here before returning if we've modified the ip.
+                * pt_regs, and if so return 1 so that we don't do the
+                * single stepping.
                 */
-               if (orig_ip != instruction_pointer(regs)) {
-                       reset_current_kprobe();
-                       preempt_enable_no_resched();
+               if (orig_ip != instruction_pointer(regs))
                        return 1;
-               }
                if (!ret)
                        return 0;
        }