]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/cls_bpf.c
net/sched: fix NULL dereference on the error path of tcf_skbmod_init()
[mirror_ubuntu-bionic-kernel.git] / net / sched / cls_bpf.c
1 /*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/idr.h>
21
22 #include <net/rtnetlink.h>
23 #include <net/pkt_cls.h>
24 #include <net/sock.h>
25
26 MODULE_LICENSE("GPL");
27 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
28 MODULE_DESCRIPTION("TC BPF based classifier");
29
30 #define CLS_BPF_NAME_LEN 256
31 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
32 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
33
34 struct cls_bpf_head {
35 struct list_head plist;
36 struct idr handle_idr;
37 struct rcu_head rcu;
38 };
39
40 struct cls_bpf_prog {
41 struct bpf_prog *filter;
42 struct list_head link;
43 struct tcf_result res;
44 bool exts_integrated;
45 u32 gen_flags;
46 struct tcf_exts exts;
47 u32 handle;
48 u16 bpf_num_ops;
49 struct sock_filter *bpf_ops;
50 const char *bpf_name;
51 struct tcf_proto *tp;
52 union {
53 struct work_struct work;
54 struct rcu_head rcu;
55 };
56 };
57
58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
60 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
61 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
62 [TCA_BPF_FD] = { .type = NLA_U32 },
63 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
64 .len = CLS_BPF_NAME_LEN },
65 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
66 [TCA_BPF_OPS] = { .type = NLA_BINARY,
67 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68 };
69
70 static int cls_bpf_exec_opcode(int code)
71 {
72 switch (code) {
73 case TC_ACT_OK:
74 case TC_ACT_SHOT:
75 case TC_ACT_STOLEN:
76 case TC_ACT_TRAP:
77 case TC_ACT_REDIRECT:
78 case TC_ACT_UNSPEC:
79 return code;
80 default:
81 return TC_ACT_UNSPEC;
82 }
83 }
84
85 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
86 struct tcf_result *res)
87 {
88 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
89 bool at_ingress = skb_at_tc_ingress(skb);
90 struct cls_bpf_prog *prog;
91 int ret = -1;
92
93 /* Needed here for accessing maps. */
94 rcu_read_lock();
95 list_for_each_entry_rcu(prog, &head->plist, link) {
96 int filter_res;
97
98 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
99
100 if (tc_skip_sw(prog->gen_flags)) {
101 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
102 } else if (at_ingress) {
103 /* It is safe to push/pull even if skb_shared() */
104 __skb_push(skb, skb->mac_len);
105 bpf_compute_data_pointers(skb);
106 filter_res = BPF_PROG_RUN(prog->filter, skb);
107 __skb_pull(skb, skb->mac_len);
108 } else {
109 bpf_compute_data_pointers(skb);
110 filter_res = BPF_PROG_RUN(prog->filter, skb);
111 }
112
113 if (prog->exts_integrated) {
114 res->class = 0;
115 res->classid = TC_H_MAJ(prog->res.classid) |
116 qdisc_skb_cb(skb)->tc_classid;
117
118 ret = cls_bpf_exec_opcode(filter_res);
119 if (ret == TC_ACT_UNSPEC)
120 continue;
121 break;
122 }
123
124 if (filter_res == 0)
125 continue;
126 if (filter_res != -1) {
127 res->class = 0;
128 res->classid = filter_res;
129 } else {
130 *res = prog->res;
131 }
132
133 ret = tcf_exts_exec(skb, &prog->exts, res);
134 if (ret < 0)
135 continue;
136
137 break;
138 }
139 rcu_read_unlock();
140
141 return ret;
142 }
143
144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
145 {
146 return !prog->bpf_ops;
147 }
148
149 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
150 struct cls_bpf_prog *oldprog)
151 {
152 struct tcf_block *block = tp->chain->block;
153 struct tc_cls_bpf_offload cls_bpf = {};
154 struct cls_bpf_prog *obj;
155 bool skip_sw;
156 int err;
157
158 skip_sw = prog && tc_skip_sw(prog->gen_flags);
159 obj = prog ?: oldprog;
160
161 tc_cls_common_offload_init(&cls_bpf.common, tp);
162 cls_bpf.command = TC_CLSBPF_OFFLOAD;
163 cls_bpf.exts = &obj->exts;
164 cls_bpf.prog = prog ? prog->filter : NULL;
165 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
166 cls_bpf.name = obj->bpf_name;
167 cls_bpf.exts_integrated = obj->exts_integrated;
168 cls_bpf.gen_flags = obj->gen_flags;
169
170 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
171 if (prog) {
172 if (err < 0) {
173 cls_bpf_offload_cmd(tp, oldprog, prog);
174 return err;
175 } else if (err > 0) {
176 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
177 }
178 }
179
180 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
181 return -EINVAL;
182
183 return 0;
184 }
185
186 static u32 cls_bpf_flags(u32 flags)
187 {
188 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
189 }
190
191 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
192 struct cls_bpf_prog *oldprog)
193 {
194 if (prog && oldprog &&
195 cls_bpf_flags(prog->gen_flags) !=
196 cls_bpf_flags(oldprog->gen_flags))
197 return -EINVAL;
198
199 if (prog && tc_skip_hw(prog->gen_flags))
200 prog = NULL;
201 if (oldprog && tc_skip_hw(oldprog->gen_flags))
202 oldprog = NULL;
203 if (!prog && !oldprog)
204 return 0;
205
206 return cls_bpf_offload_cmd(tp, prog, oldprog);
207 }
208
209 static void cls_bpf_stop_offload(struct tcf_proto *tp,
210 struct cls_bpf_prog *prog)
211 {
212 int err;
213
214 err = cls_bpf_offload_cmd(tp, NULL, prog);
215 if (err)
216 pr_err("Stopping hardware offload failed: %d\n", err);
217 }
218
219 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
220 struct cls_bpf_prog *prog)
221 {
222 struct tcf_block *block = tp->chain->block;
223 struct tc_cls_bpf_offload cls_bpf = {};
224
225 tc_cls_common_offload_init(&cls_bpf.common, tp);
226 cls_bpf.command = TC_CLSBPF_STATS;
227 cls_bpf.exts = &prog->exts;
228 cls_bpf.prog = prog->filter;
229 cls_bpf.name = prog->bpf_name;
230 cls_bpf.exts_integrated = prog->exts_integrated;
231 cls_bpf.gen_flags = prog->gen_flags;
232
233 tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
234 }
235
236 static int cls_bpf_init(struct tcf_proto *tp)
237 {
238 struct cls_bpf_head *head;
239
240 head = kzalloc(sizeof(*head), GFP_KERNEL);
241 if (head == NULL)
242 return -ENOBUFS;
243
244 INIT_LIST_HEAD_RCU(&head->plist);
245 idr_init(&head->handle_idr);
246 rcu_assign_pointer(tp->root, head);
247
248 return 0;
249 }
250
251 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
252 {
253 if (cls_bpf_is_ebpf(prog))
254 bpf_prog_put(prog->filter);
255 else
256 bpf_prog_destroy(prog->filter);
257
258 kfree(prog->bpf_name);
259 kfree(prog->bpf_ops);
260 }
261
262 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
263 {
264 tcf_exts_destroy(&prog->exts);
265 tcf_exts_put_net(&prog->exts);
266
267 cls_bpf_free_parms(prog);
268 kfree(prog);
269 }
270
271 static void cls_bpf_delete_prog_work(struct work_struct *work)
272 {
273 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
274
275 rtnl_lock();
276 __cls_bpf_delete_prog(prog);
277 rtnl_unlock();
278 }
279
280 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
281 {
282 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
283
284 INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
285 tcf_queue_work(&prog->work);
286 }
287
288 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
289 {
290 struct cls_bpf_head *head = rtnl_dereference(tp->root);
291
292 idr_remove_ext(&head->handle_idr, prog->handle);
293 cls_bpf_stop_offload(tp, prog);
294 list_del_rcu(&prog->link);
295 tcf_unbind_filter(tp, &prog->res);
296 if (tcf_exts_get_net(&prog->exts))
297 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
298 else
299 __cls_bpf_delete_prog(prog);
300 }
301
302 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
303 {
304 struct cls_bpf_head *head = rtnl_dereference(tp->root);
305
306 __cls_bpf_delete(tp, arg);
307 *last = list_empty(&head->plist);
308 return 0;
309 }
310
311 static void cls_bpf_destroy(struct tcf_proto *tp)
312 {
313 struct cls_bpf_head *head = rtnl_dereference(tp->root);
314 struct cls_bpf_prog *prog, *tmp;
315
316 list_for_each_entry_safe(prog, tmp, &head->plist, link)
317 __cls_bpf_delete(tp, prog);
318
319 idr_destroy(&head->handle_idr);
320 kfree_rcu(head, rcu);
321 }
322
323 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
324 {
325 struct cls_bpf_head *head = rtnl_dereference(tp->root);
326 struct cls_bpf_prog *prog;
327
328 list_for_each_entry(prog, &head->plist, link) {
329 if (prog->handle == handle)
330 return prog;
331 }
332
333 return NULL;
334 }
335
336 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
337 {
338 struct sock_filter *bpf_ops;
339 struct sock_fprog_kern fprog_tmp;
340 struct bpf_prog *fp;
341 u16 bpf_size, bpf_num_ops;
342 int ret;
343
344 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
345 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
346 return -EINVAL;
347
348 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
349 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
350 return -EINVAL;
351
352 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
353 if (bpf_ops == NULL)
354 return -ENOMEM;
355
356 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
357
358 fprog_tmp.len = bpf_num_ops;
359 fprog_tmp.filter = bpf_ops;
360
361 ret = bpf_prog_create(&fp, &fprog_tmp);
362 if (ret < 0) {
363 kfree(bpf_ops);
364 return ret;
365 }
366
367 prog->bpf_ops = bpf_ops;
368 prog->bpf_num_ops = bpf_num_ops;
369 prog->bpf_name = NULL;
370 prog->filter = fp;
371
372 return 0;
373 }
374
375 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
376 u32 gen_flags, const struct tcf_proto *tp)
377 {
378 struct bpf_prog *fp;
379 char *name = NULL;
380 bool skip_sw;
381 u32 bpf_fd;
382
383 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
384 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
385
386 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
387 if (IS_ERR(fp))
388 return PTR_ERR(fp);
389
390 if (tb[TCA_BPF_NAME]) {
391 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
392 if (!name) {
393 bpf_prog_put(fp);
394 return -ENOMEM;
395 }
396 }
397
398 prog->bpf_ops = NULL;
399 prog->bpf_name = name;
400 prog->filter = fp;
401
402 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
403 netif_keep_dst(qdisc_dev(tp->q));
404
405 return 0;
406 }
407
408 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
409 struct cls_bpf_prog *prog, unsigned long base,
410 struct nlattr **tb, struct nlattr *est, bool ovr)
411 {
412 bool is_bpf, is_ebpf, have_exts = false;
413 u32 gen_flags = 0;
414 int ret;
415
416 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
417 is_ebpf = tb[TCA_BPF_FD];
418 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
419 return -EINVAL;
420
421 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
422 if (ret < 0)
423 return ret;
424
425 if (tb[TCA_BPF_FLAGS]) {
426 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
427
428 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
429 return -EINVAL;
430
431 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
432 }
433 if (tb[TCA_BPF_FLAGS_GEN]) {
434 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
435 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
436 !tc_flags_valid(gen_flags))
437 return -EINVAL;
438 }
439
440 prog->exts_integrated = have_exts;
441 prog->gen_flags = gen_flags;
442
443 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
444 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
445 if (ret < 0)
446 return ret;
447
448 if (tb[TCA_BPF_CLASSID]) {
449 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
450 tcf_bind_filter(tp, &prog->res, base);
451 }
452
453 return 0;
454 }
455
456 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
457 struct tcf_proto *tp, unsigned long base,
458 u32 handle, struct nlattr **tca,
459 void **arg, bool ovr)
460 {
461 struct cls_bpf_head *head = rtnl_dereference(tp->root);
462 struct cls_bpf_prog *oldprog = *arg;
463 struct nlattr *tb[TCA_BPF_MAX + 1];
464 struct cls_bpf_prog *prog;
465 unsigned long idr_index;
466 int ret;
467
468 if (tca[TCA_OPTIONS] == NULL)
469 return -EINVAL;
470
471 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
472 NULL);
473 if (ret < 0)
474 return ret;
475
476 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
477 if (!prog)
478 return -ENOBUFS;
479
480 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
481 if (ret < 0)
482 goto errout;
483
484 if (oldprog) {
485 if (handle && oldprog->handle != handle) {
486 ret = -EINVAL;
487 goto errout;
488 }
489 }
490
491 if (handle == 0) {
492 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
493 1, 0x7FFFFFFF, GFP_KERNEL);
494 if (ret)
495 goto errout;
496 prog->handle = idr_index;
497 } else {
498 if (!oldprog) {
499 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
500 handle, handle + 1, GFP_KERNEL);
501 if (ret)
502 goto errout;
503 }
504 prog->handle = handle;
505 }
506
507 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
508 if (ret < 0)
509 goto errout_idr;
510
511 ret = cls_bpf_offload(tp, prog, oldprog);
512 if (ret)
513 goto errout_parms;
514
515 if (!tc_in_hw(prog->gen_flags))
516 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
517
518 if (oldprog) {
519 idr_replace_ext(&head->handle_idr, prog, handle);
520 list_replace_rcu(&oldprog->link, &prog->link);
521 tcf_unbind_filter(tp, &oldprog->res);
522 tcf_exts_get_net(&oldprog->exts);
523 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
524 } else {
525 list_add_rcu(&prog->link, &head->plist);
526 }
527
528 *arg = prog;
529 return 0;
530
531 errout_parms:
532 cls_bpf_free_parms(prog);
533 errout_idr:
534 if (!oldprog)
535 idr_remove_ext(&head->handle_idr, prog->handle);
536 errout:
537 tcf_exts_destroy(&prog->exts);
538 kfree(prog);
539 return ret;
540 }
541
542 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
543 struct sk_buff *skb)
544 {
545 struct nlattr *nla;
546
547 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
548 return -EMSGSIZE;
549
550 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
551 sizeof(struct sock_filter));
552 if (nla == NULL)
553 return -EMSGSIZE;
554
555 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
556
557 return 0;
558 }
559
560 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
561 struct sk_buff *skb)
562 {
563 struct nlattr *nla;
564
565 if (prog->bpf_name &&
566 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
567 return -EMSGSIZE;
568
569 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
570 return -EMSGSIZE;
571
572 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
573 if (nla == NULL)
574 return -EMSGSIZE;
575
576 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
577
578 return 0;
579 }
580
581 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
582 struct sk_buff *skb, struct tcmsg *tm)
583 {
584 struct cls_bpf_prog *prog = fh;
585 struct nlattr *nest;
586 u32 bpf_flags = 0;
587 int ret;
588
589 if (prog == NULL)
590 return skb->len;
591
592 tm->tcm_handle = prog->handle;
593
594 cls_bpf_offload_update_stats(tp, prog);
595
596 nest = nla_nest_start(skb, TCA_OPTIONS);
597 if (nest == NULL)
598 goto nla_put_failure;
599
600 if (prog->res.classid &&
601 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
602 goto nla_put_failure;
603
604 if (cls_bpf_is_ebpf(prog))
605 ret = cls_bpf_dump_ebpf_info(prog, skb);
606 else
607 ret = cls_bpf_dump_bpf_info(prog, skb);
608 if (ret)
609 goto nla_put_failure;
610
611 if (tcf_exts_dump(skb, &prog->exts) < 0)
612 goto nla_put_failure;
613
614 if (prog->exts_integrated)
615 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
616 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
617 goto nla_put_failure;
618 if (prog->gen_flags &&
619 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
620 goto nla_put_failure;
621
622 nla_nest_end(skb, nest);
623
624 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
625 goto nla_put_failure;
626
627 return skb->len;
628
629 nla_put_failure:
630 nla_nest_cancel(skb, nest);
631 return -1;
632 }
633
634 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
635 {
636 struct cls_bpf_prog *prog = fh;
637
638 if (prog && prog->res.classid == classid)
639 prog->res.class = cl;
640 }
641
642 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
643 {
644 struct cls_bpf_head *head = rtnl_dereference(tp->root);
645 struct cls_bpf_prog *prog;
646
647 list_for_each_entry(prog, &head->plist, link) {
648 if (arg->count < arg->skip)
649 goto skip;
650 if (arg->fn(tp, prog, arg) < 0) {
651 arg->stop = 1;
652 break;
653 }
654 skip:
655 arg->count++;
656 }
657 }
658
659 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
660 .kind = "bpf",
661 .owner = THIS_MODULE,
662 .classify = cls_bpf_classify,
663 .init = cls_bpf_init,
664 .destroy = cls_bpf_destroy,
665 .get = cls_bpf_get,
666 .change = cls_bpf_change,
667 .delete = cls_bpf_delete,
668 .walk = cls_bpf_walk,
669 .dump = cls_bpf_dump,
670 .bind_class = cls_bpf_bind_class,
671 };
672
673 static int __init cls_bpf_init_mod(void)
674 {
675 return register_tcf_proto_ops(&cls_bpf_ops);
676 }
677
678 static void __exit cls_bpf_exit_mod(void)
679 {
680 unregister_tcf_proto_ops(&cls_bpf_ops);
681 }
682
683 module_init(cls_bpf_init_mod);
684 module_exit(cls_bpf_exit_mod);