]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/cls_route.c
cls_flower: Fix incorrect idr release when failing to modify rule
[mirror_ubuntu-bionic-kernel.git] / net / sched / cls_route.c
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <net/dst.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
24
25 /*
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
31 */
32 struct route4_fastmap {
33 struct route4_filter *filter;
34 u32 id;
35 int iif;
36 };
37
38 struct route4_head {
39 struct route4_fastmap fastmap[16];
40 struct route4_bucket __rcu *table[256 + 1];
41 struct rcu_head rcu;
42 };
43
44 struct route4_bucket {
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter __rcu *ht[16 + 16 + 1];
47 struct rcu_head rcu;
48 };
49
50 struct route4_filter {
51 struct route4_filter __rcu *next;
52 u32 id;
53 int iif;
54
55 struct tcf_result res;
56 struct tcf_exts exts;
57 u32 handle;
58 struct route4_bucket *bkt;
59 struct tcf_proto *tp;
60 union {
61 struct work_struct work;
62 struct rcu_head rcu;
63 };
64 };
65
66 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
67
68 static inline int route4_fastmap_hash(u32 id, int iif)
69 {
70 return id & 0xF;
71 }
72
73 static DEFINE_SPINLOCK(fastmap_lock);
74 static void
75 route4_reset_fastmap(struct route4_head *head)
76 {
77 spin_lock_bh(&fastmap_lock);
78 memset(head->fastmap, 0, sizeof(head->fastmap));
79 spin_unlock_bh(&fastmap_lock);
80 }
81
82 static void
83 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
84 struct route4_filter *f)
85 {
86 int h = route4_fastmap_hash(id, iif);
87
88 /* fastmap updates must look atomic to aling id, iff, filter */
89 spin_lock_bh(&fastmap_lock);
90 head->fastmap[h].id = id;
91 head->fastmap[h].iif = iif;
92 head->fastmap[h].filter = f;
93 spin_unlock_bh(&fastmap_lock);
94 }
95
96 static inline int route4_hash_to(u32 id)
97 {
98 return id & 0xFF;
99 }
100
101 static inline int route4_hash_from(u32 id)
102 {
103 return (id >> 16) & 0xF;
104 }
105
106 static inline int route4_hash_iif(int iif)
107 {
108 return 16 + ((iif >> 16) & 0xF);
109 }
110
111 static inline int route4_hash_wild(void)
112 {
113 return 32;
114 }
115
116 #define ROUTE4_APPLY_RESULT() \
117 { \
118 *res = f->res; \
119 if (tcf_exts_has_actions(&f->exts)) { \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
121 if (r < 0) { \
122 dont_cache = 1; \
123 continue; \
124 } \
125 return r; \
126 } else if (!dont_cache) \
127 route4_set_fastmap(head, id, iif, f); \
128 return 0; \
129 }
130
131 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132 struct tcf_result *res)
133 {
134 struct route4_head *head = rcu_dereference_bh(tp->root);
135 struct dst_entry *dst;
136 struct route4_bucket *b;
137 struct route4_filter *f;
138 u32 id, h;
139 int iif, dont_cache = 0;
140
141 dst = skb_dst(skb);
142 if (!dst)
143 goto failure;
144
145 id = dst->tclassid;
146
147 iif = inet_iif(skb);
148
149 h = route4_fastmap_hash(id, iif);
150
151 spin_lock(&fastmap_lock);
152 if (id == head->fastmap[h].id &&
153 iif == head->fastmap[h].iif &&
154 (f = head->fastmap[h].filter) != NULL) {
155 if (f == ROUTE4_FAILURE) {
156 spin_unlock(&fastmap_lock);
157 goto failure;
158 }
159
160 *res = f->res;
161 spin_unlock(&fastmap_lock);
162 return 0;
163 }
164 spin_unlock(&fastmap_lock);
165
166 h = route4_hash_to(id);
167
168 restart:
169 b = rcu_dereference_bh(head->table[h]);
170 if (b) {
171 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
172 f;
173 f = rcu_dereference_bh(f->next))
174 if (f->id == id)
175 ROUTE4_APPLY_RESULT();
176
177 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
178 f;
179 f = rcu_dereference_bh(f->next))
180 if (f->iif == iif)
181 ROUTE4_APPLY_RESULT();
182
183 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
184 f;
185 f = rcu_dereference_bh(f->next))
186 ROUTE4_APPLY_RESULT();
187 }
188 if (h < 256) {
189 h = 256;
190 id &= ~0xFFFF;
191 goto restart;
192 }
193
194 if (!dont_cache)
195 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
196 failure:
197 return -1;
198 }
199
200 static inline u32 to_hash(u32 id)
201 {
202 u32 h = id & 0xFF;
203
204 if (id & 0x8000)
205 h += 256;
206 return h;
207 }
208
209 static inline u32 from_hash(u32 id)
210 {
211 id &= 0xFFFF;
212 if (id == 0xFFFF)
213 return 32;
214 if (!(id & 0x8000)) {
215 if (id > 255)
216 return 256;
217 return id & 0xF;
218 }
219 return 16 + (id & 0xF);
220 }
221
222 static void *route4_get(struct tcf_proto *tp, u32 handle)
223 {
224 struct route4_head *head = rtnl_dereference(tp->root);
225 struct route4_bucket *b;
226 struct route4_filter *f;
227 unsigned int h1, h2;
228
229 h1 = to_hash(handle);
230 if (h1 > 256)
231 return NULL;
232
233 h2 = from_hash(handle >> 16);
234 if (h2 > 32)
235 return NULL;
236
237 b = rtnl_dereference(head->table[h1]);
238 if (b) {
239 for (f = rtnl_dereference(b->ht[h2]);
240 f;
241 f = rtnl_dereference(f->next))
242 if (f->handle == handle)
243 return f;
244 }
245 return NULL;
246 }
247
248 static int route4_init(struct tcf_proto *tp)
249 {
250 struct route4_head *head;
251
252 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
253 if (head == NULL)
254 return -ENOBUFS;
255
256 rcu_assign_pointer(tp->root, head);
257 return 0;
258 }
259
260 static void __route4_delete_filter(struct route4_filter *f)
261 {
262 tcf_exts_destroy(&f->exts);
263 tcf_exts_put_net(&f->exts);
264 kfree(f);
265 }
266
267 static void route4_delete_filter_work(struct work_struct *work)
268 {
269 struct route4_filter *f = container_of(work, struct route4_filter, work);
270
271 rtnl_lock();
272 __route4_delete_filter(f);
273 rtnl_unlock();
274 }
275
276 static void route4_delete_filter(struct rcu_head *head)
277 {
278 struct route4_filter *f = container_of(head, struct route4_filter, rcu);
279
280 INIT_WORK(&f->work, route4_delete_filter_work);
281 tcf_queue_work(&f->work);
282 }
283
284 static void route4_destroy(struct tcf_proto *tp)
285 {
286 struct route4_head *head = rtnl_dereference(tp->root);
287 int h1, h2;
288
289 if (head == NULL)
290 return;
291
292 for (h1 = 0; h1 <= 256; h1++) {
293 struct route4_bucket *b;
294
295 b = rtnl_dereference(head->table[h1]);
296 if (b) {
297 for (h2 = 0; h2 <= 32; h2++) {
298 struct route4_filter *f;
299
300 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
301 struct route4_filter *next;
302
303 next = rtnl_dereference(f->next);
304 RCU_INIT_POINTER(b->ht[h2], next);
305 tcf_unbind_filter(tp, &f->res);
306 if (tcf_exts_get_net(&f->exts))
307 call_rcu(&f->rcu, route4_delete_filter);
308 else
309 __route4_delete_filter(f);
310 }
311 }
312 RCU_INIT_POINTER(head->table[h1], NULL);
313 kfree_rcu(b, rcu);
314 }
315 }
316 kfree_rcu(head, rcu);
317 }
318
319 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last)
320 {
321 struct route4_head *head = rtnl_dereference(tp->root);
322 struct route4_filter *f = arg;
323 struct route4_filter __rcu **fp;
324 struct route4_filter *nf;
325 struct route4_bucket *b;
326 unsigned int h = 0;
327 int i, h1;
328
329 if (!head || !f)
330 return -EINVAL;
331
332 h = f->handle;
333 b = f->bkt;
334
335 fp = &b->ht[from_hash(h >> 16)];
336 for (nf = rtnl_dereference(*fp); nf;
337 fp = &nf->next, nf = rtnl_dereference(*fp)) {
338 if (nf == f) {
339 /* unlink it */
340 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
341
342 /* Remove any fastmap lookups that might ref filter
343 * notice we unlink'd the filter so we can't get it
344 * back in the fastmap.
345 */
346 route4_reset_fastmap(head);
347
348 /* Delete it */
349 tcf_unbind_filter(tp, &f->res);
350 tcf_exts_get_net(&f->exts);
351 call_rcu(&f->rcu, route4_delete_filter);
352
353 /* Strip RTNL protected tree */
354 for (i = 0; i <= 32; i++) {
355 struct route4_filter *rt;
356
357 rt = rtnl_dereference(b->ht[i]);
358 if (rt)
359 goto out;
360 }
361
362 /* OK, session has no flows */
363 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
364 kfree_rcu(b, rcu);
365 break;
366 }
367 }
368
369 out:
370 *last = true;
371 for (h1 = 0; h1 <= 256; h1++) {
372 if (rcu_access_pointer(head->table[h1])) {
373 *last = false;
374 break;
375 }
376 }
377
378 return 0;
379 }
380
381 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
382 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
383 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
384 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
385 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
386 };
387
388 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
389 unsigned long base, struct route4_filter *f,
390 u32 handle, struct route4_head *head,
391 struct nlattr **tb, struct nlattr *est, int new,
392 bool ovr)
393 {
394 u32 id = 0, to = 0, nhandle = 0x8000;
395 struct route4_filter *fp;
396 unsigned int h1;
397 struct route4_bucket *b;
398 int err;
399
400 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
401 if (err < 0)
402 return err;
403
404 if (tb[TCA_ROUTE4_TO]) {
405 if (new && handle & 0x8000)
406 return -EINVAL;
407 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
408 if (to > 0xFF)
409 return -EINVAL;
410 nhandle = to;
411 }
412
413 if (tb[TCA_ROUTE4_FROM]) {
414 if (tb[TCA_ROUTE4_IIF])
415 return -EINVAL;
416 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
417 if (id > 0xFF)
418 return -EINVAL;
419 nhandle |= id << 16;
420 } else if (tb[TCA_ROUTE4_IIF]) {
421 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
422 if (id > 0x7FFF)
423 return -EINVAL;
424 nhandle |= (id | 0x8000) << 16;
425 } else
426 nhandle |= 0xFFFF << 16;
427
428 if (handle && new) {
429 nhandle |= handle & 0x7F00;
430 if (nhandle != handle)
431 return -EINVAL;
432 }
433
434 h1 = to_hash(nhandle);
435 b = rtnl_dereference(head->table[h1]);
436 if (!b) {
437 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
438 if (b == NULL)
439 return -ENOBUFS;
440
441 rcu_assign_pointer(head->table[h1], b);
442 } else {
443 unsigned int h2 = from_hash(nhandle >> 16);
444
445 for (fp = rtnl_dereference(b->ht[h2]);
446 fp;
447 fp = rtnl_dereference(fp->next))
448 if (fp->handle == f->handle)
449 return -EEXIST;
450 }
451
452 if (tb[TCA_ROUTE4_TO])
453 f->id = to;
454
455 if (tb[TCA_ROUTE4_FROM])
456 f->id = to | id<<16;
457 else if (tb[TCA_ROUTE4_IIF])
458 f->iif = id;
459
460 f->handle = nhandle;
461 f->bkt = b;
462 f->tp = tp;
463
464 if (tb[TCA_ROUTE4_CLASSID]) {
465 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
466 tcf_bind_filter(tp, &f->res, base);
467 }
468
469 return 0;
470 }
471
472 static int route4_change(struct net *net, struct sk_buff *in_skb,
473 struct tcf_proto *tp, unsigned long base, u32 handle,
474 struct nlattr **tca, void **arg, bool ovr)
475 {
476 struct route4_head *head = rtnl_dereference(tp->root);
477 struct route4_filter __rcu **fp;
478 struct route4_filter *fold, *f1, *pfp, *f = NULL;
479 struct route4_bucket *b;
480 struct nlattr *opt = tca[TCA_OPTIONS];
481 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
482 unsigned int h, th;
483 int err;
484 bool new = true;
485
486 if (opt == NULL)
487 return handle ? -EINVAL : 0;
488
489 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL);
490 if (err < 0)
491 return err;
492
493 fold = *arg;
494 if (fold && handle && fold->handle != handle)
495 return -EINVAL;
496
497 err = -ENOBUFS;
498 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
499 if (!f)
500 goto errout;
501
502 err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
503 if (err < 0)
504 goto errout;
505
506 if (fold) {
507 f->id = fold->id;
508 f->iif = fold->iif;
509 f->res = fold->res;
510 f->handle = fold->handle;
511
512 f->tp = fold->tp;
513 f->bkt = fold->bkt;
514 new = false;
515 }
516
517 err = route4_set_parms(net, tp, base, f, handle, head, tb,
518 tca[TCA_RATE], new, ovr);
519 if (err < 0)
520 goto errout;
521
522 h = from_hash(f->handle >> 16);
523 fp = &f->bkt->ht[h];
524 for (pfp = rtnl_dereference(*fp);
525 (f1 = rtnl_dereference(*fp)) != NULL;
526 fp = &f1->next)
527 if (f->handle < f1->handle)
528 break;
529
530 netif_keep_dst(qdisc_dev(tp->q));
531 rcu_assign_pointer(f->next, f1);
532 rcu_assign_pointer(*fp, f);
533
534 if (fold && fold->handle && f->handle != fold->handle) {
535 th = to_hash(fold->handle);
536 h = from_hash(fold->handle >> 16);
537 b = rtnl_dereference(head->table[th]);
538 if (b) {
539 fp = &b->ht[h];
540 for (pfp = rtnl_dereference(*fp); pfp;
541 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
542 if (pfp == f) {
543 *fp = f->next;
544 break;
545 }
546 }
547 }
548 }
549
550 route4_reset_fastmap(head);
551 *arg = f;
552 if (fold) {
553 tcf_unbind_filter(tp, &fold->res);
554 tcf_exts_get_net(&fold->exts);
555 call_rcu(&fold->rcu, route4_delete_filter);
556 }
557 return 0;
558
559 errout:
560 if (f)
561 tcf_exts_destroy(&f->exts);
562 kfree(f);
563 return err;
564 }
565
566 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
567 {
568 struct route4_head *head = rtnl_dereference(tp->root);
569 unsigned int h, h1;
570
571 if (head == NULL)
572 arg->stop = 1;
573
574 if (arg->stop)
575 return;
576
577 for (h = 0; h <= 256; h++) {
578 struct route4_bucket *b = rtnl_dereference(head->table[h]);
579
580 if (b) {
581 for (h1 = 0; h1 <= 32; h1++) {
582 struct route4_filter *f;
583
584 for (f = rtnl_dereference(b->ht[h1]);
585 f;
586 f = rtnl_dereference(f->next)) {
587 if (arg->count < arg->skip) {
588 arg->count++;
589 continue;
590 }
591 if (arg->fn(tp, f, arg) < 0) {
592 arg->stop = 1;
593 return;
594 }
595 arg->count++;
596 }
597 }
598 }
599 }
600 }
601
602 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
603 struct sk_buff *skb, struct tcmsg *t)
604 {
605 struct route4_filter *f = fh;
606 struct nlattr *nest;
607 u32 id;
608
609 if (f == NULL)
610 return skb->len;
611
612 t->tcm_handle = f->handle;
613
614 nest = nla_nest_start(skb, TCA_OPTIONS);
615 if (nest == NULL)
616 goto nla_put_failure;
617
618 if (!(f->handle & 0x8000)) {
619 id = f->id & 0xFF;
620 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
621 goto nla_put_failure;
622 }
623 if (f->handle & 0x80000000) {
624 if ((f->handle >> 16) != 0xFFFF &&
625 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
626 goto nla_put_failure;
627 } else {
628 id = f->id >> 16;
629 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
630 goto nla_put_failure;
631 }
632 if (f->res.classid &&
633 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
634 goto nla_put_failure;
635
636 if (tcf_exts_dump(skb, &f->exts) < 0)
637 goto nla_put_failure;
638
639 nla_nest_end(skb, nest);
640
641 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
642 goto nla_put_failure;
643
644 return skb->len;
645
646 nla_put_failure:
647 nla_nest_cancel(skb, nest);
648 return -1;
649 }
650
651 static void route4_bind_class(void *fh, u32 classid, unsigned long cl)
652 {
653 struct route4_filter *f = fh;
654
655 if (f && f->res.classid == classid)
656 f->res.class = cl;
657 }
658
659 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
660 .kind = "route",
661 .classify = route4_classify,
662 .init = route4_init,
663 .destroy = route4_destroy,
664 .get = route4_get,
665 .change = route4_change,
666 .delete = route4_delete,
667 .walk = route4_walk,
668 .dump = route4_dump,
669 .bind_class = route4_bind_class,
670 .owner = THIS_MODULE,
671 };
672
673 static int __init init_route4(void)
674 {
675 return register_tcf_proto_ops(&cls_route4_ops);
676 }
677
678 static void __exit exit_route4(void)
679 {
680 unregister_tcf_proto_ops(&cls_route4_ops);
681 }
682
683 module_init(init_route4)
684 module_exit(exit_route4)
685 MODULE_LICENSE("GPL");