]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
bpf: Implement an interface to register bpf_iter targets
authorYonghong Song <yhs@fb.com>
Sat, 9 May 2020 17:58:59 +0000 (10:58 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Sun, 10 May 2020 00:05:25 +0000 (17:05 -0700)
The target can call bpf_iter_reg_target() to register itself.
The needed information:
  target:           target name
  seq_ops:          the seq_file operations for the target
  init_seq_private  target callback to initialize seq_priv during file open
  fini_seq_private  target callback to clean up seq_priv during file release
  seq_priv_size:    the private_data size needed by the seq_file
                    operations

The target name represents a target which provides a seq_ops
for iterating objects.

The target can provide two callback functions, init_seq_private
and fini_seq_private, called during file open/release time.
For example, /proc/net/{tcp6, ipv6_route, netlink, ...}, net
name space needs to be setup properly during file open and
released properly during file release.

Function bpf_iter_unreg_target() is also implemented to unregister
a particular target.

Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200509175859.2474669-1-yhs@fb.com
include/linux/bpf.h
kernel/bpf/Makefile
kernel/bpf/bpf_iter.c [new file with mode: 0644]

index 1262ec460ab3565cdeed8b7162cd8d97177a97b4..40c78b86fe387313138c47ef297145e2eafdcc24 100644 (file)
@@ -31,6 +31,7 @@ struct seq_file;
 struct btf;
 struct btf_type;
 struct exception_table_entry;
+struct seq_operations;
 
 extern struct idr btf_idr;
 extern spinlock_t btf_idr_lock;
@@ -1126,6 +1127,20 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd);
 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
 int bpf_obj_get_user(const char __user *pathname, int flags);
 
+typedef int (*bpf_iter_init_seq_priv_t)(void *private_data);
+typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+
+struct bpf_iter_reg {
+       const char *target;
+       const struct seq_operations *seq_ops;
+       bpf_iter_init_seq_priv_t init_seq_private;
+       bpf_iter_fini_seq_priv_t fini_seq_private;
+       u32 seq_priv_size;
+};
+
+int bpf_iter_reg_target(struct bpf_iter_reg *reg_info);
+void bpf_iter_unreg_target(const char *target);
+
 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
index f2d7be596966eb3e889ba2cb6834d8e091fabe99..6a8b0febd3f6023f68062bdfa882957b88c123c6 100644 (file)
@@ -2,7 +2,7 @@
 obj-y := core.o
 CFLAGS_core.o += $(call cc-disable-warning, override-init)
 
-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o
 obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
 obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o
 obj-$(CONFIG_BPF_SYSCALL) += disasm.o
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
new file mode 100644 (file)
index 0000000..5a8119d
--- /dev/null
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 Facebook */
+
+#include <linux/fs.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+struct bpf_iter_target_info {
+       struct list_head list;
+       const char *target;
+       const struct seq_operations *seq_ops;
+       bpf_iter_init_seq_priv_t init_seq_private;
+       bpf_iter_fini_seq_priv_t fini_seq_private;
+       u32 seq_priv_size;
+};
+
+static struct list_head targets = LIST_HEAD_INIT(targets);
+static DEFINE_MUTEX(targets_mutex);
+
+int bpf_iter_reg_target(struct bpf_iter_reg *reg_info)
+{
+       struct bpf_iter_target_info *tinfo;
+
+       tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+       if (!tinfo)
+               return -ENOMEM;
+
+       tinfo->target = reg_info->target;
+       tinfo->seq_ops = reg_info->seq_ops;
+       tinfo->init_seq_private = reg_info->init_seq_private;
+       tinfo->fini_seq_private = reg_info->fini_seq_private;
+       tinfo->seq_priv_size = reg_info->seq_priv_size;
+       INIT_LIST_HEAD(&tinfo->list);
+
+       mutex_lock(&targets_mutex);
+       list_add(&tinfo->list, &targets);
+       mutex_unlock(&targets_mutex);
+
+       return 0;
+}
+
+void bpf_iter_unreg_target(const char *target)
+{
+       struct bpf_iter_target_info *tinfo;
+       bool found = false;
+
+       mutex_lock(&targets_mutex);
+       list_for_each_entry(tinfo, &targets, list) {
+               if (!strcmp(target, tinfo->target)) {
+                       list_del(&tinfo->list);
+                       kfree(tinfo);
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&targets_mutex);
+
+       WARN_ON(found == false);
+}