]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bpf: add support for bpf_wq user type
authorBenjamin Tissoires <bentiss@kernel.org>
Sat, 20 Apr 2024 09:09:05 +0000 (11:09 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 24 Apr 2024 01:31:24 +0000 (18:31 -0700)
Mostly a copy/paste from the bpf_timer API, without the initialization
and free, as they will be done in a separate patch.

Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-5-6c986a5a741f@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
include/uapi/linux/bpf.h
kernel/bpf/btf.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c

index 5034c1b4ded7bfb3c9fe6fe209732e089ef99adf..c7dcfd395555a01d94902884f743d4300d0dd36b 100644 (file)
@@ -185,7 +185,7 @@ struct bpf_map_ops {
 
 enum {
        /* Support at most 10 fields in a BTF type */
-       BTF_FIELDS_MAX     = 10,
+       BTF_FIELDS_MAX     = 11,
 };
 
 enum btf_field_type {
@@ -202,6 +202,7 @@ enum btf_field_type {
        BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
        BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
        BPF_REFCOUNT   = (1 << 9),
+       BPF_WORKQUEUE  = (1 << 10),
 };
 
 typedef void (*btf_dtor_kfunc_t)(void *);
@@ -238,6 +239,7 @@ struct btf_record {
        u32 field_mask;
        int spin_lock_off;
        int timer_off;
+       int wq_off;
        int refcount_off;
        struct btf_field fields[];
 };
@@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
                return "bpf_spin_lock";
        case BPF_TIMER:
                return "bpf_timer";
+       case BPF_WORKQUEUE:
+               return "bpf_wq";
        case BPF_KPTR_UNREF:
        case BPF_KPTR_REF:
                return "kptr";
@@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
                return sizeof(struct bpf_spin_lock);
        case BPF_TIMER:
                return sizeof(struct bpf_timer);
+       case BPF_WORKQUEUE:
+               return sizeof(struct bpf_wq);
        case BPF_KPTR_UNREF:
        case BPF_KPTR_REF:
        case BPF_KPTR_PERCPU:
@@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
                return __alignof__(struct bpf_spin_lock);
        case BPF_TIMER:
                return __alignof__(struct bpf_timer);
+       case BPF_WORKQUEUE:
+               return __alignof__(struct bpf_wq);
        case BPF_KPTR_UNREF:
        case BPF_KPTR_REF:
        case BPF_KPTR_PERCPU:
@@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
                /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
        case BPF_SPIN_LOCK:
        case BPF_TIMER:
+       case BPF_WORKQUEUE:
        case BPF_KPTR_UNREF:
        case BPF_KPTR_REF:
        case BPF_KPTR_PERCPU:
index cee0a7915c08a2b44c19de3f74de476361e9f741..e4ae83550fb39c39149d269805628ee4d96dca44 100644 (file)
@@ -7306,6 +7306,10 @@ struct bpf_timer {
        __u64 __opaque[2];
 } __attribute__((aligned(8)));
 
+struct bpf_wq {
+       __u64 __opaque[2];
+} __attribute__((aligned(8)));
+
 struct bpf_dynptr {
        __u64 __opaque[2];
 } __attribute__((aligned(8)));
index 6d46cee47ae3e85c53303ca9bdd9d1e5663ad45b..8291fbfd27b1f77cc461bc864081c1c636501cf9 100644 (file)
@@ -3464,6 +3464,15 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
                        goto end;
                }
        }
+       if (field_mask & BPF_WORKQUEUE) {
+               if (!strcmp(name, "bpf_wq")) {
+                       if (*seen_mask & BPF_WORKQUEUE)
+                               return -E2BIG;
+                       *seen_mask |= BPF_WORKQUEUE;
+                       type = BPF_WORKQUEUE;
+                       goto end;
+               }
+       }
        field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
        field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
        field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root");
@@ -3515,6 +3524,7 @@ static int btf_find_struct_field(const struct btf *btf,
                switch (field_type) {
                case BPF_SPIN_LOCK:
                case BPF_TIMER:
+               case BPF_WORKQUEUE:
                case BPF_LIST_NODE:
                case BPF_RB_NODE:
                case BPF_REFCOUNT:
@@ -3582,6 +3592,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
                switch (field_type) {
                case BPF_SPIN_LOCK:
                case BPF_TIMER:
+               case BPF_WORKQUEUE:
                case BPF_LIST_NODE:
                case BPF_RB_NODE:
                case BPF_REFCOUNT:
@@ -3816,6 +3827,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
 
        rec->spin_lock_off = -EINVAL;
        rec->timer_off = -EINVAL;
+       rec->wq_off = -EINVAL;
        rec->refcount_off = -EINVAL;
        for (i = 0; i < cnt; i++) {
                field_type_size = btf_field_type_size(info_arr[i].type);
@@ -3846,6 +3858,11 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
                        /* Cache offset for faster lookup at runtime */
                        rec->timer_off = rec->fields[i].offset;
                        break;
+               case BPF_WORKQUEUE:
+                       WARN_ON_ONCE(rec->wq_off >= 0);
+                       /* Cache offset for faster lookup at runtime */
+                       rec->wq_off = rec->fields[i].offset;
+                       break;
                case BPF_REFCOUNT:
                        WARN_ON_ONCE(rec->refcount_off >= 0);
                        /* Cache offset for faster lookup at runtime */
index 7d392ec836557bea5ecf0c5b2bcb5bfcd76f23ad..0848e4141b00abd4ab917691590d1a757f9a2a29 100644 (file)
@@ -559,6 +559,7 @@ void btf_record_free(struct btf_record *rec)
                case BPF_SPIN_LOCK:
                case BPF_TIMER:
                case BPF_REFCOUNT:
+               case BPF_WORKQUEUE:
                        /* Nothing to release */
                        break;
                default:
@@ -608,6 +609,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
                case BPF_SPIN_LOCK:
                case BPF_TIMER:
                case BPF_REFCOUNT:
+               case BPF_WORKQUEUE:
                        /* Nothing to acquire */
                        break;
                default:
@@ -679,6 +681,8 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
                case BPF_TIMER:
                        bpf_timer_cancel_and_free(field_ptr);
                        break;
+               case BPF_WORKQUEUE:
+                       break;
                case BPF_KPTR_UNREF:
                        WRITE_ONCE(*(u64 *)field_ptr, 0);
                        break;
@@ -1085,7 +1089,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
 
        map->record = btf_parse_fields(btf, value_type,
                                       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
-                                      BPF_RB_ROOT | BPF_REFCOUNT,
+                                      BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
                                       map->value_size);
        if (!IS_ERR_OR_NULL(map->record)) {
                int i;
index 5a7e34e83a5b22f64c9b5fb0b44817f37f502b96..89490a95b120300474ea98381889369caeb2858d 100644 (file)
@@ -1838,6 +1838,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
                         */
                        if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
                                reg->map_uid = reg->id;
+                       if (btf_record_has_field(map->inner_map_meta->record, BPF_WORKQUEUE))
+                               reg->map_uid = reg->id;
                } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                        reg->type = PTR_TO_XDP_SOCK;
                } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
@@ -18141,6 +18143,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
                }
        }
 
+       if (btf_record_has_field(map->record, BPF_WORKQUEUE)) {
+               if (is_tracing_prog_type(prog_type)) {
+                       verbose(env, "tracing progs cannot use bpf_wq yet\n");
+                       return -EINVAL;
+               }
+       }
+
        if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
            !bpf_offload_prog_map_match(prog, map)) {
                verbose(env, "offload device mismatch between prog and map\n");