]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/offload.c
bpf: call verifier_prep from its callback in struct bpf_offload_dev
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / offload.c
CommitLineData
a39e17b2 1/*
0cd3cbed 2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
a39e17b2
JK
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
ab3f0063
JK
16#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/bug.h>
675fc275 19#include <linux/kdev_t.h>
ab3f0063 20#include <linux/list.h>
9fd7c555 21#include <linux/lockdep.h>
ab3f0063
JK
22#include <linux/netdevice.h>
23#include <linux/printk.h>
675fc275 24#include <linux/proc_ns.h>
9fd7c555 25#include <linux/rhashtable.h>
ab3f0063 26#include <linux/rtnetlink.h>
e0d3974a 27#include <linux/rwsem.h>
ab3f0063 28
9fd7c555 29/* Protects offdevs, members of bpf_offload_netdev and offload members
a3884572 30 * of all progs.
e0d3974a
JK
31 * RTNL lock cannot be taken when holding this lock.
32 */
33static DECLARE_RWSEM(bpf_devs_lock);
9fd7c555 34
602144c2 35struct bpf_offload_dev {
1385d755 36 const struct bpf_prog_offload_ops *ops;
602144c2
JK
37 struct list_head netdevs;
38};
39
9fd7c555
JK
40struct bpf_offload_netdev {
41 struct rhash_head l;
42 struct net_device *netdev;
602144c2 43 struct bpf_offload_dev *offdev;
9fd7c555
JK
44 struct list_head progs;
45 struct list_head maps;
602144c2 46 struct list_head offdev_netdevs;
9fd7c555
JK
47};
48
49static const struct rhashtable_params offdevs_params = {
50 .nelem_hint = 4,
51 .key_len = sizeof(struct net_device *),
52 .key_offset = offsetof(struct bpf_offload_netdev, netdev),
53 .head_offset = offsetof(struct bpf_offload_netdev, l),
54 .automatic_shrinking = true,
55};
56
57static struct rhashtable offdevs;
58static bool offdevs_inited;
ab3f0063 59
5bc2d55c
JK
60static int bpf_dev_offload_check(struct net_device *netdev)
61{
62 if (!netdev)
63 return -EINVAL;
64 if (!netdev->netdev_ops->ndo_bpf)
65 return -EOPNOTSUPP;
66 return 0;
67}
68
9fd7c555
JK
69static struct bpf_offload_netdev *
70bpf_offload_find_netdev(struct net_device *netdev)
71{
72 lockdep_assert_held(&bpf_devs_lock);
73
74 if (!offdevs_inited)
75 return NULL;
76 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
77}
78
ab3f0063
JK
79int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
80{
9fd7c555 81 struct bpf_offload_netdev *ondev;
0a9c1991 82 struct bpf_prog_offload *offload;
5bc2d55c 83 int err;
ab3f0063 84
649f11dc
JK
85 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
86 attr->prog_type != BPF_PROG_TYPE_XDP)
87 return -EINVAL;
ab3f0063
JK
88
89 if (attr->prog_flags)
90 return -EINVAL;
91
92 offload = kzalloc(sizeof(*offload), GFP_USER);
93 if (!offload)
94 return -ENOMEM;
95
96 offload->prog = prog;
ab3f0063 97
e0d3974a
JK
98 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
99 attr->prog_ifindex);
5bc2d55c
JK
100 err = bpf_dev_offload_check(offload->netdev);
101 if (err)
102 goto err_maybe_put;
ab3f0063 103
e0d3974a 104 down_write(&bpf_devs_lock);
9fd7c555
JK
105 ondev = bpf_offload_find_netdev(offload->netdev);
106 if (!ondev) {
5bc2d55c 107 err = -EINVAL;
e0d3974a 108 goto err_unlock;
5bc2d55c 109 }
341b3e7b 110 offload->offdev = ondev->offdev;
ab3f0063 111 prog->aux->offload = offload;
9fd7c555 112 list_add_tail(&offload->offloads, &ondev->progs);
e0d3974a
JK
113 dev_put(offload->netdev);
114 up_write(&bpf_devs_lock);
ab3f0063
JK
115
116 return 0;
e0d3974a
JK
117err_unlock:
118 up_write(&bpf_devs_lock);
5bc2d55c
JK
119err_maybe_put:
120 if (offload->netdev)
121 dev_put(offload->netdev);
e0d3974a 122 kfree(offload);
5bc2d55c 123 return err;
ab3f0063
JK
124}
125
126static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
127 struct netdev_bpf *data)
128{
0a9c1991 129 struct bpf_prog_offload *offload = prog->aux->offload;
ce3b9db4 130 struct net_device *netdev;
ab3f0063
JK
131
132 ASSERT_RTNL();
133
ce3b9db4 134 if (!offload)
ab3f0063 135 return -ENODEV;
ce3b9db4 136 netdev = offload->netdev;
ab3f0063
JK
137
138 data->command = cmd;
139
140 return netdev->netdev_ops->ndo_bpf(netdev, data);
141}
142
143int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
144{
00db12c3
QM
145 struct bpf_prog_offload *offload;
146 int ret = -ENODEV;
ab3f0063 147
00db12c3
QM
148 down_read(&bpf_devs_lock);
149 offload = env->prog->aux->offload;
150 if (offload)
151 ret = offload->offdev->ops->prepare(offload->netdev, env);
152 offload->dev_state = !ret;
153 up_read(&bpf_devs_lock);
ab3f0063 154
00db12c3 155 return ret;
ab3f0063
JK
156}
157
cae1927c
JK
158int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
159 int insn_idx, int prev_insn_idx)
160{
0a9c1991 161 struct bpf_prog_offload *offload;
cae1927c
JK
162 int ret = -ENODEV;
163
164 down_read(&bpf_devs_lock);
165 offload = env->prog->aux->offload;
ce3b9db4 166 if (offload)
341b3e7b
QM
167 ret = offload->offdev->ops->insn_hook(env, insn_idx,
168 prev_insn_idx);
cae1927c
JK
169 up_read(&bpf_devs_lock);
170
171 return ret;
172}
173
c941ce9c
QM
174int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
175{
176 struct bpf_prog_offload *offload;
177 int ret = -ENODEV;
178
179 down_read(&bpf_devs_lock);
180 offload = env->prog->aux->offload;
181 if (offload) {
6dc18fa6
QM
182 if (offload->offdev->ops->finalize)
183 ret = offload->offdev->ops->finalize(env);
c941ce9c
QM
184 else
185 ret = 0;
186 }
187 up_read(&bpf_devs_lock);
188
189 return ret;
190}
191
ab3f0063
JK
192static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
193{
0a9c1991 194 struct bpf_prog_offload *offload = prog->aux->offload;
ab3f0063
JK
195 struct netdev_bpf data = {};
196
197 data.offload.prog = prog;
198
ab3f0063
JK
199 if (offload->dev_state)
200 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
201
ad8ad79f
JK
202 /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
203 bpf_prog_free_id(prog, true);
204
ab3f0063 205 list_del_init(&offload->offloads);
ce3b9db4
JK
206 kfree(offload);
207 prog->aux->offload = NULL;
ab3f0063
JK
208}
209
210void bpf_prog_offload_destroy(struct bpf_prog *prog)
211{
ab3f0063 212 rtnl_lock();
e0d3974a 213 down_write(&bpf_devs_lock);
ce3b9db4
JK
214 if (prog->aux->offload)
215 __bpf_prog_offload_destroy(prog);
e0d3974a 216 up_write(&bpf_devs_lock);
ab3f0063 217 rtnl_unlock();
ab3f0063
JK
218}
219
220static int bpf_prog_offload_translate(struct bpf_prog *prog)
221{
ab3f0063
JK
222 struct netdev_bpf data = {};
223 int ret;
224
225 data.offload.prog = prog;
226
ab3f0063
JK
227 rtnl_lock();
228 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
229 rtnl_unlock();
230
231 return ret;
232}
233
234static unsigned int bpf_prog_warn_on_exec(const void *ctx,
235 const struct bpf_insn *insn)
236{
237 WARN(1, "attempt to execute device eBPF program on the host!");
238 return 0;
239}
240
241int bpf_prog_offload_compile(struct bpf_prog *prog)
242{
243 prog->bpf_func = bpf_prog_warn_on_exec;
244
245 return bpf_prog_offload_translate(prog);
246}
247
675fc275
JK
248struct ns_get_path_bpf_prog_args {
249 struct bpf_prog *prog;
250 struct bpf_prog_info *info;
251};
252
253static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
254{
255 struct ns_get_path_bpf_prog_args *args = private_data;
256 struct bpf_prog_aux *aux = args->prog->aux;
257 struct ns_common *ns;
258 struct net *net;
259
260 rtnl_lock();
261 down_read(&bpf_devs_lock);
262
263 if (aux->offload) {
264 args->info->ifindex = aux->offload->netdev->ifindex;
265 net = dev_net(aux->offload->netdev);
266 get_net(net);
267 ns = &net->ns;
268 } else {
269 args->info->ifindex = 0;
270 ns = NULL;
271 }
272
273 up_read(&bpf_devs_lock);
274 rtnl_unlock();
275
276 return ns;
277}
278
279int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
280 struct bpf_prog *prog)
281{
282 struct ns_get_path_bpf_prog_args args = {
283 .prog = prog,
284 .info = info,
285 };
fcfb126d 286 struct bpf_prog_aux *aux = prog->aux;
675fc275
JK
287 struct inode *ns_inode;
288 struct path ns_path;
fcfb126d 289 char __user *uinsns;
675fc275 290 void *res;
fcfb126d 291 u32 ulen;
675fc275
JK
292
293 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
294 if (IS_ERR(res)) {
295 if (!info->ifindex)
296 return -ENODEV;
297 return PTR_ERR(res);
298 }
299
fcfb126d
JW
300 down_read(&bpf_devs_lock);
301
302 if (!aux->offload) {
303 up_read(&bpf_devs_lock);
304 return -ENODEV;
305 }
306
307 ulen = info->jited_prog_len;
308 info->jited_prog_len = aux->offload->jited_len;
309 if (info->jited_prog_len & ulen) {
310 uinsns = u64_to_user_ptr(info->jited_prog_insns);
311 ulen = min_t(u32, info->jited_prog_len, ulen);
312 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
313 up_read(&bpf_devs_lock);
314 return -EFAULT;
315 }
316 }
317
318 up_read(&bpf_devs_lock);
319
675fc275
JK
320 ns_inode = ns_path.dentry->d_inode;
321 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
322 info->netns_ino = ns_inode->i_ino;
323 path_put(&ns_path);
324
325 return 0;
326}
327
ab3f0063
JK
328const struct bpf_prog_ops bpf_offload_prog_ops = {
329};
330
a3884572
JK
331static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
332 enum bpf_netdev_command cmd)
333{
334 struct netdev_bpf data = {};
335 struct net_device *netdev;
336
337 ASSERT_RTNL();
338
339 data.command = cmd;
340 data.offmap = offmap;
341 /* Caller must make sure netdev is valid */
342 netdev = offmap->netdev;
343
344 return netdev->netdev_ops->ndo_bpf(netdev, &data);
345}
346
347struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
348{
349 struct net *net = current->nsproxy->net_ns;
9fd7c555 350 struct bpf_offload_netdev *ondev;
a3884572
JK
351 struct bpf_offloaded_map *offmap;
352 int err;
353
354 if (!capable(CAP_SYS_ADMIN))
355 return ERR_PTR(-EPERM);
7a0ef693
JK
356 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
357 attr->map_type != BPF_MAP_TYPE_HASH)
a3884572
JK
358 return ERR_PTR(-EINVAL);
359
360 offmap = kzalloc(sizeof(*offmap), GFP_USER);
361 if (!offmap)
362 return ERR_PTR(-ENOMEM);
363
364 bpf_map_init_from_attr(&offmap->map, attr);
365
366 rtnl_lock();
367 down_write(&bpf_devs_lock);
368 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
369 err = bpf_dev_offload_check(offmap->netdev);
370 if (err)
371 goto err_unlock;
372
9fd7c555
JK
373 ondev = bpf_offload_find_netdev(offmap->netdev);
374 if (!ondev) {
375 err = -EINVAL;
376 goto err_unlock;
377 }
378
a3884572
JK
379 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
380 if (err)
381 goto err_unlock;
382
9fd7c555 383 list_add_tail(&offmap->offloads, &ondev->maps);
a3884572
JK
384 up_write(&bpf_devs_lock);
385 rtnl_unlock();
386
387 return &offmap->map;
388
389err_unlock:
390 up_write(&bpf_devs_lock);
391 rtnl_unlock();
392 kfree(offmap);
393 return ERR_PTR(err);
394}
395
396static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
397{
398 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
399 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
400 bpf_map_free_id(&offmap->map, true);
401 list_del_init(&offmap->offloads);
402 offmap->netdev = NULL;
403}
404
405void bpf_map_offload_map_free(struct bpf_map *map)
406{
407 struct bpf_offloaded_map *offmap = map_to_offmap(map);
408
409 rtnl_lock();
410 down_write(&bpf_devs_lock);
411 if (offmap->netdev)
412 __bpf_map_offload_destroy(offmap);
413 up_write(&bpf_devs_lock);
414 rtnl_unlock();
415
416 kfree(offmap);
417}
418
419int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
420{
421 struct bpf_offloaded_map *offmap = map_to_offmap(map);
422 int ret = -ENODEV;
423
424 down_read(&bpf_devs_lock);
425 if (offmap->netdev)
426 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
427 up_read(&bpf_devs_lock);
428
429 return ret;
430}
431
432int bpf_map_offload_update_elem(struct bpf_map *map,
433 void *key, void *value, u64 flags)
434{
435 struct bpf_offloaded_map *offmap = map_to_offmap(map);
436 int ret = -ENODEV;
437
438 if (unlikely(flags > BPF_EXIST))
439 return -EINVAL;
440
441 down_read(&bpf_devs_lock);
442 if (offmap->netdev)
443 ret = offmap->dev_ops->map_update_elem(offmap, key, value,
444 flags);
445 up_read(&bpf_devs_lock);
446
447 return ret;
448}
449
450int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
451{
452 struct bpf_offloaded_map *offmap = map_to_offmap(map);
453 int ret = -ENODEV;
454
455 down_read(&bpf_devs_lock);
456 if (offmap->netdev)
457 ret = offmap->dev_ops->map_delete_elem(offmap, key);
458 up_read(&bpf_devs_lock);
459
460 return ret;
461}
462
463int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
464{
465 struct bpf_offloaded_map *offmap = map_to_offmap(map);
466 int ret = -ENODEV;
467
468 down_read(&bpf_devs_lock);
469 if (offmap->netdev)
470 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
471 up_read(&bpf_devs_lock);
472
473 return ret;
474}
475
52775b33
JK
476struct ns_get_path_bpf_map_args {
477 struct bpf_offloaded_map *offmap;
478 struct bpf_map_info *info;
479};
480
481static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
482{
483 struct ns_get_path_bpf_map_args *args = private_data;
484 struct ns_common *ns;
485 struct net *net;
486
487 rtnl_lock();
488 down_read(&bpf_devs_lock);
489
490 if (args->offmap->netdev) {
491 args->info->ifindex = args->offmap->netdev->ifindex;
492 net = dev_net(args->offmap->netdev);
493 get_net(net);
494 ns = &net->ns;
495 } else {
496 args->info->ifindex = 0;
497 ns = NULL;
498 }
499
500 up_read(&bpf_devs_lock);
501 rtnl_unlock();
502
503 return ns;
504}
505
506int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
507{
508 struct ns_get_path_bpf_map_args args = {
509 .offmap = map_to_offmap(map),
510 .info = info,
511 };
512 struct inode *ns_inode;
513 struct path ns_path;
514 void *res;
515
516 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
517 if (IS_ERR(res)) {
518 if (!info->ifindex)
519 return -ENODEV;
520 return PTR_ERR(res);
521 }
522
523 ns_inode = ns_path.dentry->d_inode;
524 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
525 info->netns_ino = ns_inode->i_ino;
526 path_put(&ns_path);
527
528 return 0;
529}
530
fd4f227d
JK
531static bool __bpf_offload_dev_match(struct bpf_prog *prog,
532 struct net_device *netdev)
a3884572 533{
fd4f227d 534 struct bpf_offload_netdev *ondev1, *ondev2;
a3884572 535 struct bpf_prog_offload *offload;
a3884572 536
0cd3cbed 537 if (!bpf_prog_is_dev_bound(prog->aux))
a3884572 538 return false;
a3884572 539
a3884572 540 offload = prog->aux->offload;
fd4f227d
JK
541 if (!offload)
542 return false;
543 if (offload->netdev == netdev)
544 return true;
545
546 ondev1 = bpf_offload_find_netdev(offload->netdev);
547 ondev2 = bpf_offload_find_netdev(netdev);
548
549 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
550}
551
552bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
553{
554 bool ret;
555
556 down_read(&bpf_devs_lock);
557 ret = __bpf_offload_dev_match(prog, netdev);
558 up_read(&bpf_devs_lock);
559
560 return ret;
561}
562EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
563
564bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
565{
566 struct bpf_offloaded_map *offmap;
567 bool ret;
568
569 if (!bpf_map_is_dev_bound(map))
570 return bpf_map_offload_neutral(map);
a3884572
JK
571 offmap = map_to_offmap(map);
572
fd4f227d
JK
573 down_read(&bpf_devs_lock);
574 ret = __bpf_offload_dev_match(prog, offmap->netdev);
a3884572
JK
575 up_read(&bpf_devs_lock);
576
577 return ret;
578}
579
602144c2
JK
580int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
581 struct net_device *netdev)
a3884572 582{
9fd7c555
JK
583 struct bpf_offload_netdev *ondev;
584 int err;
a3884572 585
9fd7c555
JK
586 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
587 if (!ondev)
588 return -ENOMEM;
589
590 ondev->netdev = netdev;
602144c2 591 ondev->offdev = offdev;
9fd7c555
JK
592 INIT_LIST_HEAD(&ondev->progs);
593 INIT_LIST_HEAD(&ondev->maps);
594
595 down_write(&bpf_devs_lock);
596 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
597 if (err) {
598 netdev_warn(netdev, "failed to register for BPF offload\n");
599 goto err_unlock_free;
600 }
a3884572 601
602144c2 602 list_add(&ondev->offdev_netdevs, &offdev->netdevs);
9fd7c555
JK
603 up_write(&bpf_devs_lock);
604 return 0;
605
606err_unlock_free:
607 up_write(&bpf_devs_lock);
608 kfree(ondev);
609 return err;
a3884572 610}
9fd7c555 611EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
a3884572 612
602144c2
JK
613void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
614 struct net_device *netdev)
ab3f0063 615{
602144c2 616 struct bpf_offload_netdev *ondev, *altdev;
9fd7c555
JK
617 struct bpf_offloaded_map *offmap, *mtmp;
618 struct bpf_prog_offload *offload, *ptmp;
ab3f0063
JK
619
620 ASSERT_RTNL();
621
9fd7c555
JK
622 down_write(&bpf_devs_lock);
623 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
624 if (WARN_ON(!ondev))
625 goto unlock;
ab3f0063 626
9fd7c555 627 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
602144c2
JK
628 list_del(&ondev->offdev_netdevs);
629
630 /* Try to move the objects to another netdev of the device */
631 altdev = list_first_entry_or_null(&offdev->netdevs,
632 struct bpf_offload_netdev,
633 offdev_netdevs);
634 if (altdev) {
635 list_for_each_entry(offload, &ondev->progs, offloads)
636 offload->netdev = altdev->netdev;
637 list_splice_init(&ondev->progs, &altdev->progs);
638
639 list_for_each_entry(offmap, &ondev->maps, offloads)
640 offmap->netdev = altdev->netdev;
641 list_splice_init(&ondev->maps, &altdev->maps);
642 } else {
643 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
644 __bpf_prog_offload_destroy(offload->prog);
645 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
646 __bpf_map_offload_destroy(offmap);
647 }
ab3f0063 648
9fd7c555
JK
649 WARN_ON(!list_empty(&ondev->progs));
650 WARN_ON(!list_empty(&ondev->maps));
651 kfree(ondev);
652unlock:
653 up_write(&bpf_devs_lock);
654}
655EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
602144c2 656
1385d755
QM
657struct bpf_offload_dev *
658bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
602144c2
JK
659{
660 struct bpf_offload_dev *offdev;
661 int err;
662
663 down_write(&bpf_devs_lock);
664 if (!offdevs_inited) {
665 err = rhashtable_init(&offdevs, &offdevs_params);
666 if (err)
667 return ERR_PTR(err);
668 offdevs_inited = true;
669 }
670 up_write(&bpf_devs_lock);
671
672 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
673 if (!offdev)
674 return ERR_PTR(-ENOMEM);
675
1385d755 676 offdev->ops = ops;
602144c2
JK
677 INIT_LIST_HEAD(&offdev->netdevs);
678
679 return offdev;
680}
681EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
682
683void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
684{
685 WARN_ON(!list_empty(&offdev->netdevs));
686 kfree(offdev);
687}
688EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);