]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bpf: move sleepable flag from bpf_prog_aux to bpf_prog
authorAndrii Nakryiko <andrii@kernel.org>
Sat, 9 Mar 2024 00:47:39 +0000 (16:47 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 11 Mar 2024 23:41:25 +0000 (16:41 -0700)
prog->aux->sleepable is checked very frequently as part of (some) BPF
program run hot paths. So this extra aux indirection seems wasteful and
on busy systems might cause unnecessary memory cache misses.

Let's move sleepable flag into prog itself to eliminate unnecessary
pointer dereference.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Message-ID: <20240309004739.2961431-1-andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/bpf_iter.c
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/trace/bpf_trace.c
net/bpf/bpf_dummy_struct_ops.c

index 08ad265cb195979903f4f200193672cdf5214517..4f20f62f9d63da87800af4ac21cbc7c92dae5fb9 100644 (file)
@@ -1455,7 +1455,6 @@ struct bpf_prog_aux {
        bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
        bool attach_tracing_prog; /* true if tracing another tracing program */
        bool func_proto_unreliable;
-       bool sleepable;
        bool tail_call_reachable;
        bool xdp_has_frags;
        bool exception_cb;
@@ -1541,7 +1540,8 @@ struct bpf_prog {
                                enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
                                call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
                                call_get_func_ip:1, /* Do we call get_func_ip() */
-                               tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
+                               tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
+                               sleepable:1;    /* BPF program is sleepable */
        enum bpf_prog_type      type;           /* Type of BPF program */
        enum bpf_attach_type    expected_attach_type; /* For some prog types */
        u32                     len;            /* Number of filter blocks */
@@ -2112,14 +2112,14 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
        old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
        item = &array->items[0];
        while ((prog = READ_ONCE(item->prog))) {
-               if (!prog->aux->sleepable)
+               if (!prog->sleepable)
                        rcu_read_lock();
 
                run_ctx.bpf_cookie = item->bpf_cookie;
                ret &= run_prog(prog, ctx);
                item++;
 
-               if (!prog->aux->sleepable)
+               if (!prog->sleepable)
                        rcu_read_unlock();
        }
        bpf_reset_run_ctx(old_run_ctx);
index 0fae79164187094d77def3bdc9cd6e6cac99c733..112581cf97e7fe913e550ee2b19adb45da28df00 100644 (file)
@@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
                return -ENOENT;
 
        /* Only allow sleepable program for resched-able iterator */
-       if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo))
+       if (prog->sleepable && !bpf_iter_target_support_resched(tinfo))
                return -EINVAL;
 
        link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
@@ -697,7 +697,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
        struct bpf_run_ctx run_ctx, *old_run_ctx;
        int ret;
 
-       if (prog->aux->sleepable) {
+       if (prog->sleepable) {
                rcu_read_lock_trace();
                migrate_disable();
                might_fault();
index 297184baeb2d4052c726bf1e0435e7f798aabccf..696bc55de8e82ea9358ede9c222b4927871e60be 100644 (file)
@@ -2706,7 +2706,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
        bool sleepable;
        u32 i;
 
-       sleepable = aux->sleepable;
+       sleepable = aux->prog->sleepable;
        for (i = 0; i < len; i++) {
                map = used_maps[i];
                if (map->ops->map_poke_untrack)
index 07f2a4db4511c8f9ffeeec4798e5ed3532eff54b..ae2ff73bde7e79aa905f060f753bfc8261451972 100644 (file)
@@ -2248,7 +2248,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
                btf_put(prog->aux->attach_btf);
 
        if (deferred) {
-               if (prog->aux->sleepable)
+               if (prog->sleepable)
                        call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
                else
                        call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
@@ -2813,11 +2813,11 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
        }
 
        prog->expected_attach_type = attr->expected_attach_type;
+       prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
        prog->aux->attach_btf = attach_btf;
        prog->aux->attach_btf_id = attr->attach_btf_id;
        prog->aux->dst_prog = dst_prog;
        prog->aux->dev_bound = !!attr->prog_ifindex;
-       prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
        prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
 
        /* move token into prog->aux, reuse taken refcnt */
@@ -5554,7 +5554,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
        /* The bpf program will not access the bpf map, but for the sake of
         * simplicity, increase sleepable_refcnt for sleepable program as well.
         */
-       if (prog->aux->sleepable)
+       if (prog->sleepable)
                atomic64_inc(&map->sleepable_refcnt);
        memcpy(used_maps_new, used_maps_old,
               sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
index d382f5ebe06c8f02be96679d2277508e0af0644f..db7599c59c78a66f1b85ef969ba64be084edd181 100644 (file)
@@ -1014,7 +1014,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
 
 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
 {
-       bool sleepable = prog->aux->sleepable;
+       bool sleepable = prog->sleepable;
 
        if (bpf_prog_check_recur(prog))
                return sleepable ? __bpf_prog_enter_sleepable_recur :
@@ -1029,7 +1029,7 @@ bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
 
 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
 {
-       bool sleepable = prog->aux->sleepable;
+       bool sleepable = prog->sleepable;
 
        if (bpf_prog_check_recur(prog))
                return sleepable ? __bpf_prog_exit_sleepable_recur :
index d64f7a9b60e81e5387742fefc611a0ef89a9bf2b..d501c90f8ab517165f240e1ca84d6659563a849d 100644 (file)
@@ -5274,7 +5274,7 @@ bad_type:
 
 static bool in_sleepable(struct bpf_verifier_env *env)
 {
-       return env->prog->aux->sleepable;
+       return env->prog->sleepable;
 }
 
 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
@@ -18137,7 +18137,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
                return -EINVAL;
        }
 
-       if (prog->aux->sleepable)
+       if (prog->sleepable)
                switch (map->map_type) {
                case BPF_MAP_TYPE_HASH:
                case BPF_MAP_TYPE_LRU_HASH:
@@ -18325,7 +18325,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
                                return -E2BIG;
                        }
 
-                       if (env->prog->aux->sleepable)
+                       if (env->prog->sleepable)
                                atomic64_inc(&map->sleepable_refcnt);
                        /* hold the map. If the program is rejected by verifier,
                         * the map will be released by release_maps() or it
@@ -20938,7 +20938,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
                        }
                }
 
-               if (prog->aux->sleepable) {
+               if (prog->sleepable) {
                        ret = -EINVAL;
                        switch (prog->type) {
                        case BPF_PROG_TYPE_TRACING:
@@ -21049,14 +21049,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
        u64 key;
 
        if (prog->type == BPF_PROG_TYPE_SYSCALL) {
-               if (prog->aux->sleepable)
+               if (prog->sleepable)
                        /* attach_btf_id checked to be zero already */
                        return 0;
                verbose(env, "Syscall programs can only be sleepable\n");
                return -EINVAL;
        }
 
-       if (prog->aux->sleepable && !can_be_sleepable(prog)) {
+       if (prog->sleepable && !can_be_sleepable(prog)) {
                verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
                return -EINVAL;
        }
index 5ecfa57e3b97f6983dfe413cc17d8c5db903faf5..724e6d7e128f3766f89791861c258fb317297216 100644 (file)
@@ -10553,7 +10553,7 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
            (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
                return -EINVAL;
 
-       if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
+       if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe)
                /* only uprobe programs are allowed to be sleepable */
                return -EINVAL;
 
index 241ddf5e38953e2dc4c275ac8136b9611b25936b..0a5c4efc73c3674fa225757c6f4ccc921f758b57 100644 (file)
@@ -3256,7 +3256,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
                .uprobe = uprobe,
        };
        struct bpf_prog *prog = link->link.prog;
-       bool sleepable = prog->aux->sleepable;
+       bool sleepable = prog->sleepable;
        struct bpf_run_ctx *old_run_ctx;
        int err = 0;
 
index 1b5f812e6972c62d20eef7de66b69b9651d4c76e..de33dc1b0daadc0258950bee1adcc1cd99512143 100644 (file)
@@ -174,7 +174,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
        case offsetof(struct bpf_dummy_ops, test_sleepable):
                break;
        default:
-               if (prog->aux->sleepable)
+               if (prog->sleepable)
                        return -EINVAL;
        }