]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/bpf/offload.c
Merge tag 'mmc-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / offload.c
1 #include <linux/bpf.h>
2 #include <linux/bpf_verifier.h>
3 #include <linux/bug.h>
4 #include <linux/list.h>
5 #include <linux/netdevice.h>
6 #include <linux/printk.h>
7 #include <linux/rtnetlink.h>
8
9 /* protected by RTNL */
10 static LIST_HEAD(bpf_prog_offload_devs);
11
12 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
13 {
14 struct net *net = current->nsproxy->net_ns;
15 struct bpf_dev_offload *offload;
16
17 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
18 attr->prog_type != BPF_PROG_TYPE_XDP)
19 return -EINVAL;
20
21 if (attr->prog_flags)
22 return -EINVAL;
23
24 offload = kzalloc(sizeof(*offload), GFP_USER);
25 if (!offload)
26 return -ENOMEM;
27
28 offload->prog = prog;
29 init_waitqueue_head(&offload->verifier_done);
30
31 rtnl_lock();
32 offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
33 if (!offload->netdev) {
34 rtnl_unlock();
35 kfree(offload);
36 return -EINVAL;
37 }
38
39 prog->aux->offload = offload;
40 list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
41 rtnl_unlock();
42
43 return 0;
44 }
45
46 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
47 struct netdev_bpf *data)
48 {
49 struct net_device *netdev = prog->aux->offload->netdev;
50
51 ASSERT_RTNL();
52
53 if (!netdev)
54 return -ENODEV;
55 if (!netdev->netdev_ops->ndo_bpf)
56 return -EOPNOTSUPP;
57
58 data->command = cmd;
59
60 return netdev->netdev_ops->ndo_bpf(netdev, data);
61 }
62
63 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
64 {
65 struct netdev_bpf data = {};
66 int err;
67
68 data.verifier.prog = env->prog;
69
70 rtnl_lock();
71 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
72 if (err)
73 goto exit_unlock;
74
75 env->dev_ops = data.verifier.ops;
76
77 env->prog->aux->offload->dev_state = true;
78 env->prog->aux->offload->verifier_running = true;
79 exit_unlock:
80 rtnl_unlock();
81 return err;
82 }
83
84 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
85 {
86 struct bpf_dev_offload *offload = prog->aux->offload;
87 struct netdev_bpf data = {};
88
89 /* Caution - if netdev is destroyed before the program, this function
90 * will be called twice.
91 */
92
93 data.offload.prog = prog;
94
95 if (offload->verifier_running)
96 wait_event(offload->verifier_done, !offload->verifier_running);
97
98 if (offload->dev_state)
99 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
100
101 offload->dev_state = false;
102 list_del_init(&offload->offloads);
103 offload->netdev = NULL;
104 }
105
106 void bpf_prog_offload_destroy(struct bpf_prog *prog)
107 {
108 struct bpf_dev_offload *offload = prog->aux->offload;
109
110 offload->verifier_running = false;
111 wake_up(&offload->verifier_done);
112
113 rtnl_lock();
114 __bpf_prog_offload_destroy(prog);
115 rtnl_unlock();
116
117 kfree(offload);
118 }
119
120 static int bpf_prog_offload_translate(struct bpf_prog *prog)
121 {
122 struct bpf_dev_offload *offload = prog->aux->offload;
123 struct netdev_bpf data = {};
124 int ret;
125
126 data.offload.prog = prog;
127
128 offload->verifier_running = false;
129 wake_up(&offload->verifier_done);
130
131 rtnl_lock();
132 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
133 rtnl_unlock();
134
135 return ret;
136 }
137
138 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
139 const struct bpf_insn *insn)
140 {
141 WARN(1, "attempt to execute device eBPF program on the host!");
142 return 0;
143 }
144
145 int bpf_prog_offload_compile(struct bpf_prog *prog)
146 {
147 prog->bpf_func = bpf_prog_warn_on_exec;
148
149 return bpf_prog_offload_translate(prog);
150 }
151
152 const struct bpf_prog_ops bpf_offload_prog_ops = {
153 };
154
155 static int bpf_offload_notification(struct notifier_block *notifier,
156 ulong event, void *ptr)
157 {
158 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
159 struct bpf_dev_offload *offload, *tmp;
160
161 ASSERT_RTNL();
162
163 switch (event) {
164 case NETDEV_UNREGISTER:
165 /* ignore namespace changes */
166 if (netdev->reg_state != NETREG_UNREGISTERING)
167 break;
168
169 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
170 offloads) {
171 if (offload->netdev == netdev)
172 __bpf_prog_offload_destroy(offload->prog);
173 }
174 break;
175 default:
176 break;
177 }
178 return NOTIFY_OK;
179 }
180
181 static struct notifier_block bpf_offload_notifier = {
182 .notifier_call = bpf_offload_notification,
183 };
184
185 static int __init bpf_offload_init(void)
186 {
187 register_netdevice_notifier(&bpf_offload_notifier);
188 return 0;
189 }
190
191 subsys_initcall(bpf_offload_init);