]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
bpf: Fail PERF_EVENT_IOC_SET_BPF when bpf_get_[stack|stackid] cannot work
authorSong Liu <songliubraving@fb.com>
Thu, 23 Jul 2020 18:06:45 +0000 (11:06 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Sun, 26 Jul 2020 03:16:34 +0000 (20:16 -0700)
bpf_get_[stack|stackid] on perf_events with precise_ip uses callchain
attached to perf_sample_data. If this callchain is not presented, do not
allow attaching BPF program that calls bpf_get_[stack|stackid] to this
event.

In the error case, -EPROTO is returned so that libbpf can identify this
error and print proper hint message.

Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200723180648.1429892-3-songliubraving@fb.com
include/linux/filter.h
kernel/bpf/verifier.c
kernel/events/core.c

index d07a6e973a7d6f7770a93f12983e24b50e227a7b..0a355b005bf458dadcec078d3ca2a94e75054f0c 100644 (file)
@@ -533,7 +533,8 @@ struct bpf_prog {
                                is_func:1,      /* program is a bpf function */
                                kprobe_override:1, /* Do we override a kprobe? */
                                has_callchain_buf:1, /* callchain buffer allocated? */
-                               enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
+                               enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+                               call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
        enum bpf_prog_type      type;           /* Type of BPF program */
        enum bpf_attach_type    expected_attach_type; /* For some prog types */
        u32                     len;            /* Number of filter blocks */
index 8d6979db48d8ce30c93218aaedf9f3bf82e5e17f..cd14e70f2d074ada0697a78b3300d7cf6ad33edd 100644 (file)
@@ -4962,6 +4962,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                env->prog->has_callchain_buf = true;
        }
 
+       if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
+               env->prog->call_get_stack = true;
+
        if (changes_data)
                clear_all_pkt_pointers(env);
        return 0;
index 856d98c36f562d2bb15f056009c5b43004cb9e85..ddcfd2fb5cc5a997649a6e3cccde032f1368339b 100644 (file)
@@ -9544,6 +9544,24 @@ static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
+       if (event->attr.precise_ip &&
+           prog->call_get_stack &&
+           (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) ||
+            event->attr.exclude_callchain_kernel ||
+            event->attr.exclude_callchain_user)) {
+               /*
+                * On perf_event with precise_ip, calling bpf_get_stack()
+                * may trigger unwinder warnings and occasional crashes.
+                * bpf_get_[stack|stackid] works around this issue by using
+                * callchain attached to perf_sample_data. If the
+                * perf_event does not full (kernel and user) callchain
+                * attached to perf_sample_data, do not allow attaching BPF
+                * program that calls bpf_get_[stack|stackid].
+                */
+               bpf_prog_put(prog);
+               return -EPROTO;
+       }
+
        event->prog = prog;
        event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
        WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);