]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blobdiff - kernel/bpf/syscall.c
bpf: allow for correlation of maps and helpers in dump
[mirror_ubuntu-eoan-kernel.git] / kernel / bpf / syscall.c
index 2c4cfeaa8d5e785f16758be08cb8a462766363d9..007802c5ca7d7f07f257233bfd0aa88fb9068908 100644 (file)
@@ -937,10 +937,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               int i;
+
                trace_bpf_prog_put_rcu(prog);
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
+
+               for (i = 0; i < prog->aux->func_cnt; i++)
+                       bpf_prog_kallsyms_del(prog->aux->func[i]);
                bpf_prog_kallsyms_del(prog);
+
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
 }
@@ -1194,7 +1200,8 @@ static int bpf_prog_load(union bpf_attr *attr)
                goto free_used_maps;
 
        /* eBPF program is ready to be JITed */
-       prog = bpf_prog_select_runtime(prog, &err);
+       if (!prog->bpf_func)
+               prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
@@ -1551,6 +1558,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
        return fd;
 }
 
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+                                             unsigned long addr)
+{
+       int i;
+
+       for (i = 0; i < prog->aux->used_map_cnt; i++)
+               if (prog->aux->used_maps[i] == (void *)addr)
+                       return prog->aux->used_maps[i];
+       return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+       const struct bpf_map *map;
+       struct bpf_insn *insns;
+       u64 imm;
+       int i;
+
+       insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+                       GFP_USER);
+       if (!insns)
+               return insns;
+
+       for (i = 0; i < prog->len; i++) {
+               if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+                       insns[i].code = BPF_JMP | BPF_CALL;
+                       insns[i].imm = BPF_FUNC_tail_call;
+                       /* fall-through */
+               }
+               if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+                   insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+                       if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+                               insns[i].code = BPF_JMP | BPF_CALL;
+                       if (!bpf_dump_raw_ok())
+                               insns[i].imm = 0;
+                       continue;
+               }
+
+               if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+                       continue;
+
+               imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+               map = bpf_map_from_imm(prog, imm);
+               if (map) {
+                       insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+                       insns[i].imm = map->id;
+                       insns[i + 1].imm = 0;
+                       continue;
+               }
+
+               if (!bpf_dump_raw_ok() &&
+                   imm == (unsigned long)prog->aux) {
+                       insns[i].imm = 0;
+                       insns[i + 1].imm = 0;
+                       continue;
+               }
+       }
+
+       return insns;
+}
+
 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                                   const union bpf_attr *attr,
                                   union bpf_attr __user *uattr)
@@ -1601,18 +1669,34 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.jited_prog_len;
        info.jited_prog_len = prog->jited_len;
        if (info.jited_prog_len && ulen) {
-               uinsns = u64_to_user_ptr(info.jited_prog_insns);
-               ulen = min_t(u32, info.jited_prog_len, ulen);
-               if (copy_to_user(uinsns, prog->bpf_func, ulen))
-                       return -EFAULT;
+               if (bpf_dump_raw_ok()) {
+                       uinsns = u64_to_user_ptr(info.jited_prog_insns);
+                       ulen = min_t(u32, info.jited_prog_len, ulen);
+                       if (copy_to_user(uinsns, prog->bpf_func, ulen))
+                               return -EFAULT;
+               } else {
+                       info.jited_prog_insns = 0;
+               }
        }
 
        ulen = info.xlated_prog_len;
        info.xlated_prog_len = bpf_prog_insn_size(prog);
        if (info.xlated_prog_len && ulen) {
+               struct bpf_insn *insns_sanitized;
+               bool fault;
+
+               if (prog->blinded && !bpf_dump_raw_ok()) {
+                       info.xlated_prog_insns = 0;
+                       goto done;
+               }
+               insns_sanitized = bpf_insn_prepare_dump(prog);
+               if (!insns_sanitized)
+                       return -ENOMEM;
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
                ulen = min_t(u32, info.xlated_prog_len, ulen);
-               if (copy_to_user(uinsns, prog->insnsi, ulen))
+               fault = copy_to_user(uinsns, insns_sanitized, ulen);
+               kfree(insns_sanitized);
+               if (fault)
                        return -EFAULT;
        }