]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/sched/cls_route.c
[NET_SCHED]: act_api: use PTR_ERR in tcf_action_init/tcf_action_get
[mirror_ubuntu-bionic-kernel.git] / net / sched / cls_route.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/cls_route.c ROUTE4 classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
1da177e4
LT
13#include <linux/types.h>
14#include <linux/kernel.h>
1da177e4 15#include <linux/string.h>
1da177e4 16#include <linux/errno.h>
1da177e4 17#include <linux/skbuff.h>
0ba48053
PM
18#include <net/dst.h>
19#include <net/route.h>
20#include <net/netlink.h>
1da177e4
LT
21#include <net/act_api.h>
22#include <net/pkt_cls.h>
23
24/*
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
30 */
31
32struct route4_fastmap
33{
34 struct route4_filter *filter;
35 u32 id;
36 int iif;
37};
38
39struct route4_head
40{
41 struct route4_fastmap fastmap[16];
42 struct route4_bucket *table[256+1];
43};
44
45struct route4_bucket
46{
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter *ht[16+16+1];
49};
50
51struct route4_filter
52{
53 struct route4_filter *next;
54 u32 id;
55 int iif;
56
57 struct tcf_result res;
58 struct tcf_exts exts;
59 u32 handle;
60 struct route4_bucket *bkt;
61};
62
63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64
65static struct tcf_ext_map route_ext_map = {
66 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT
68};
69
70static __inline__ int route4_fastmap_hash(u32 id, int iif)
71{
72 return id&0xF;
73}
74
75static inline
76void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
77{
fd44de7c 78 qdisc_lock_tree(dev);
1da177e4 79 memset(head->fastmap, 0, sizeof(head->fastmap));
fd44de7c 80 qdisc_unlock_tree(dev);
1da177e4
LT
81}
82
b6f99a21 83static inline void
1da177e4
LT
84route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85 struct route4_filter *f)
86{
87 int h = route4_fastmap_hash(id, iif);
88 head->fastmap[h].id = id;
89 head->fastmap[h].iif = iif;
90 head->fastmap[h].filter = f;
91}
92
93static __inline__ int route4_hash_to(u32 id)
94{
95 return id&0xFF;
96}
97
98static __inline__ int route4_hash_from(u32 id)
99{
100 return (id>>16)&0xF;
101}
102
103static __inline__ int route4_hash_iif(int iif)
104{
105 return 16 + ((iif>>16)&0xF);
106}
107
108static __inline__ int route4_hash_wild(void)
109{
110 return 32;
111}
112
113#define ROUTE4_APPLY_RESULT() \
114{ \
115 *res = f->res; \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
118 if (r < 0) { \
119 dont_cache = 1; \
120 continue; \
121 } \
122 return r; \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
125 return 0; \
126}
127
128static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129 struct tcf_result *res)
130{
131 struct route4_head *head = (struct route4_head*)tp->root;
132 struct dst_entry *dst;
133 struct route4_bucket *b;
134 struct route4_filter *f;
135 u32 id, h;
136 int iif, dont_cache = 0;
137
138 if ((dst = skb->dst) == NULL)
139 goto failure;
140
141 id = dst->tclassid;
142 if (head == NULL)
143 goto old_method;
144
145 iif = ((struct rtable*)dst)->fl.iif;
146
147 h = route4_fastmap_hash(id, iif);
148 if (id == head->fastmap[h].id &&
149 iif == head->fastmap[h].iif &&
150 (f = head->fastmap[h].filter) != NULL) {
151 if (f == ROUTE4_FAILURE)
152 goto failure;
153
154 *res = f->res;
155 return 0;
156 }
157
158 h = route4_hash_to(id);
159
160restart:
161 if ((b = head->table[h]) != NULL) {
162 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
163 if (f->id == id)
164 ROUTE4_APPLY_RESULT();
165
166 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
167 if (f->iif == iif)
168 ROUTE4_APPLY_RESULT();
169
170 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171 ROUTE4_APPLY_RESULT();
172
173 }
174 if (h < 256) {
175 h = 256;
176 id &= ~0xFFFF;
177 goto restart;
178 }
179
180 if (!dont_cache)
181 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
182failure:
183 return -1;
184
185old_method:
186 if (id && (TC_H_MAJ(id) == 0 ||
187 !(TC_H_MAJ(id^tp->q->handle)))) {
188 res->classid = id;
189 res->class = 0;
190 return 0;
191 }
192 return -1;
193}
194
195static inline u32 to_hash(u32 id)
196{
197 u32 h = id&0xFF;
198 if (id&0x8000)
199 h += 256;
200 return h;
201}
202
203static inline u32 from_hash(u32 id)
204{
205 id &= 0xFFFF;
206 if (id == 0xFFFF)
207 return 32;
208 if (!(id & 0x8000)) {
209 if (id > 255)
210 return 256;
211 return id&0xF;
212 }
213 return 16 + (id&0xF);
214}
215
216static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
217{
218 struct route4_head *head = (struct route4_head*)tp->root;
219 struct route4_bucket *b;
220 struct route4_filter *f;
221 unsigned h1, h2;
222
223 if (!head)
224 return 0;
225
226 h1 = to_hash(handle);
227 if (h1 > 256)
228 return 0;
229
230 h2 = from_hash(handle>>16);
231 if (h2 > 32)
232 return 0;
233
234 if ((b = head->table[h1]) != NULL) {
235 for (f = b->ht[h2]; f; f = f->next)
236 if (f->handle == handle)
237 return (unsigned long)f;
238 }
239 return 0;
240}
241
242static void route4_put(struct tcf_proto *tp, unsigned long f)
243{
244}
245
246static int route4_init(struct tcf_proto *tp)
247{
248 return 0;
249}
250
251static inline void
252route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
253{
254 tcf_unbind_filter(tp, &f->res);
255 tcf_exts_destroy(tp, &f->exts);
256 kfree(f);
257}
258
259static void route4_destroy(struct tcf_proto *tp)
260{
261 struct route4_head *head = xchg(&tp->root, NULL);
262 int h1, h2;
263
264 if (head == NULL)
265 return;
266
267 for (h1=0; h1<=256; h1++) {
268 struct route4_bucket *b;
269
270 if ((b = head->table[h1]) != NULL) {
271 for (h2=0; h2<=32; h2++) {
272 struct route4_filter *f;
273
274 while ((f = b->ht[h2]) != NULL) {
275 b->ht[h2] = f->next;
276 route4_delete_filter(tp, f);
277 }
278 }
279 kfree(b);
280 }
281 }
282 kfree(head);
283}
284
285static int route4_delete(struct tcf_proto *tp, unsigned long arg)
286{
287 struct route4_head *head = (struct route4_head*)tp->root;
288 struct route4_filter **fp, *f = (struct route4_filter*)arg;
289 unsigned h = 0;
290 struct route4_bucket *b;
291 int i;
292
293 if (!head || !f)
294 return -EINVAL;
295
296 h = f->handle;
297 b = f->bkt;
298
299 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
300 if (*fp == f) {
301 tcf_tree_lock(tp);
302 *fp = f->next;
303 tcf_tree_unlock(tp);
304
305 route4_reset_fastmap(tp->q->dev, head, f->id);
306 route4_delete_filter(tp, f);
307
308 /* Strip tree */
309
310 for (i=0; i<=32; i++)
311 if (b->ht[i])
312 return 0;
313
314 /* OK, session has no flows */
315 tcf_tree_lock(tp);
316 head->table[to_hash(h)] = NULL;
317 tcf_tree_unlock(tp);
318
319 kfree(b);
320 return 0;
321 }
322 }
323 return 0;
324}
325
326static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
327 struct route4_filter *f, u32 handle, struct route4_head *head,
add93b61 328 struct nlattr **tb, struct nlattr *est, int new)
1da177e4
LT
329{
330 int err;
331 u32 id = 0, to = 0, nhandle = 0x8000;
332 struct route4_filter *fp;
333 unsigned int h1;
334 struct route4_bucket *b;
335 struct tcf_exts e;
336
337 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
338 if (err < 0)
339 return err;
340
341 err = -EINVAL;
add93b61
PM
342 if (tb[TCA_ROUTE4_CLASSID])
343 if (nla_len(tb[TCA_ROUTE4_CLASSID]) < sizeof(u32))
1da177e4
LT
344 goto errout;
345
add93b61 346 if (tb[TCA_ROUTE4_TO]) {
1da177e4
LT
347 if (new && handle & 0x8000)
348 goto errout;
add93b61 349 if (nla_len(tb[TCA_ROUTE4_TO]) < sizeof(u32))
1da177e4 350 goto errout;
add93b61 351 to = *(u32*)nla_data(tb[TCA_ROUTE4_TO]);
1da177e4
LT
352 if (to > 0xFF)
353 goto errout;
354 nhandle = to;
355 }
356
add93b61
PM
357 if (tb[TCA_ROUTE4_FROM]) {
358 if (tb[TCA_ROUTE4_IIF])
1da177e4 359 goto errout;
add93b61 360 if (nla_len(tb[TCA_ROUTE4_FROM]) < sizeof(u32))
1da177e4 361 goto errout;
add93b61 362 id = *(u32*)nla_data(tb[TCA_ROUTE4_FROM]);
1da177e4
LT
363 if (id > 0xFF)
364 goto errout;
365 nhandle |= id << 16;
add93b61
PM
366 } else if (tb[TCA_ROUTE4_IIF]) {
367 if (nla_len(tb[TCA_ROUTE4_IIF]) < sizeof(u32))
1da177e4 368 goto errout;
add93b61 369 id = *(u32*)nla_data(tb[TCA_ROUTE4_IIF]);
1da177e4
LT
370 if (id > 0x7FFF)
371 goto errout;
372 nhandle |= (id | 0x8000) << 16;
373 } else
374 nhandle |= 0xFFFF << 16;
375
376 if (handle && new) {
377 nhandle |= handle & 0x7F00;
378 if (nhandle != handle)
379 goto errout;
380 }
381
382 h1 = to_hash(nhandle);
383 if ((b = head->table[h1]) == NULL) {
384 err = -ENOBUFS;
0da974f4 385 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
1da177e4
LT
386 if (b == NULL)
387 goto errout;
1da177e4
LT
388
389 tcf_tree_lock(tp);
390 head->table[h1] = b;
391 tcf_tree_unlock(tp);
392 } else {
393 unsigned int h2 = from_hash(nhandle >> 16);
394 err = -EEXIST;
395 for (fp = b->ht[h2]; fp; fp = fp->next)
396 if (fp->handle == f->handle)
397 goto errout;
398 }
399
400 tcf_tree_lock(tp);
add93b61 401 if (tb[TCA_ROUTE4_TO])
1da177e4
LT
402 f->id = to;
403
add93b61 404 if (tb[TCA_ROUTE4_FROM])
1da177e4 405 f->id = to | id<<16;
add93b61 406 else if (tb[TCA_ROUTE4_IIF])
1da177e4
LT
407 f->iif = id;
408
409 f->handle = nhandle;
410 f->bkt = b;
411 tcf_tree_unlock(tp);
412
add93b61
PM
413 if (tb[TCA_ROUTE4_CLASSID]) {
414 f->res.classid = *(u32*)nla_data(tb[TCA_ROUTE4_CLASSID]);
1da177e4
LT
415 tcf_bind_filter(tp, &f->res, base);
416 }
417
418 tcf_exts_change(tp, &f->exts, &e);
419
420 return 0;
421errout:
422 tcf_exts_destroy(tp, &e);
423 return err;
424}
425
426static int route4_change(struct tcf_proto *tp, unsigned long base,
427 u32 handle,
add93b61 428 struct nlattr **tca,
1da177e4
LT
429 unsigned long *arg)
430{
431 struct route4_head *head = tp->root;
432 struct route4_filter *f, *f1, **fp;
433 struct route4_bucket *b;
add93b61
PM
434 struct nlattr *opt = tca[TCA_OPTIONS];
435 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
1da177e4
LT
436 unsigned int h, th;
437 u32 old_handle = 0;
438 int err;
439
440 if (opt == NULL)
441 return handle ? -EINVAL : 0;
442
add93b61 443 if (nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, NULL) < 0)
1da177e4
LT
444 return -EINVAL;
445
446 if ((f = (struct route4_filter*)*arg) != NULL) {
447 if (f->handle != handle && handle)
448 return -EINVAL;
449
450 if (f->bkt)
451 old_handle = f->handle;
452
453 err = route4_set_parms(tp, base, f, handle, head, tb,
add93b61 454 tca[TCA_RATE], 0);
1da177e4
LT
455 if (err < 0)
456 return err;
457
458 goto reinsert;
459 }
460
461 err = -ENOBUFS;
462 if (head == NULL) {
0da974f4 463 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
1da177e4
LT
464 if (head == NULL)
465 goto errout;
1da177e4
LT
466
467 tcf_tree_lock(tp);
468 tp->root = head;
469 tcf_tree_unlock(tp);
470 }
471
0da974f4 472 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
1da177e4
LT
473 if (f == NULL)
474 goto errout;
1da177e4
LT
475
476 err = route4_set_parms(tp, base, f, handle, head, tb,
add93b61 477 tca[TCA_RATE], 1);
1da177e4
LT
478 if (err < 0)
479 goto errout;
480
481reinsert:
482 h = from_hash(f->handle >> 16);
483 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
484 if (f->handle < f1->handle)
485 break;
486
487 f->next = f1;
488 tcf_tree_lock(tp);
489 *fp = f;
490
491 if (old_handle && f->handle != old_handle) {
492 th = to_hash(old_handle);
493 h = from_hash(old_handle >> 16);
494 if ((b = head->table[th]) != NULL) {
495 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
496 if (*fp == f) {
497 *fp = f->next;
498 break;
499 }
500 }
501 }
502 }
503 tcf_tree_unlock(tp);
504
505 route4_reset_fastmap(tp->q->dev, head, f->id);
506 *arg = (unsigned long)f;
507 return 0;
508
509errout:
a51482bd 510 kfree(f);
1da177e4
LT
511 return err;
512}
513
514static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
515{
516 struct route4_head *head = tp->root;
517 unsigned h, h1;
518
519 if (head == NULL)
520 arg->stop = 1;
521
522 if (arg->stop)
523 return;
524
525 for (h = 0; h <= 256; h++) {
526 struct route4_bucket *b = head->table[h];
527
528 if (b) {
529 for (h1 = 0; h1 <= 32; h1++) {
530 struct route4_filter *f;
531
532 for (f = b->ht[h1]; f; f = f->next) {
533 if (arg->count < arg->skip) {
534 arg->count++;
535 continue;
536 }
537 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
538 arg->stop = 1;
539 return;
540 }
541 arg->count++;
542 }
543 }
544 }
545 }
546}
547
548static int route4_dump(struct tcf_proto *tp, unsigned long fh,
549 struct sk_buff *skb, struct tcmsg *t)
550{
551 struct route4_filter *f = (struct route4_filter*)fh;
27a884dc 552 unsigned char *b = skb_tail_pointer(skb);
add93b61 553 struct nlattr *nla;
1da177e4
LT
554 u32 id;
555
556 if (f == NULL)
557 return skb->len;
558
559 t->tcm_handle = f->handle;
560
add93b61
PM
561 nla = (struct nlattr*)b;
562 NLA_PUT(skb, TCA_OPTIONS, 0, NULL);
1da177e4
LT
563
564 if (!(f->handle&0x8000)) {
565 id = f->id&0xFF;
add93b61 566 NLA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
1da177e4
LT
567 }
568 if (f->handle&0x80000000) {
569 if ((f->handle>>16) != 0xFFFF)
add93b61 570 NLA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
1da177e4
LT
571 } else {
572 id = f->id>>16;
add93b61 573 NLA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
1da177e4
LT
574 }
575 if (f->res.classid)
add93b61 576 NLA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
1da177e4
LT
577
578 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
add93b61 579 goto nla_put_failure;
1da177e4 580
add93b61 581 nla->nla_len = skb_tail_pointer(skb) - b;
1da177e4
LT
582
583 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
add93b61 584 goto nla_put_failure;
1da177e4
LT
585
586 return skb->len;
587
add93b61 588nla_put_failure:
dc5fc579 589 nlmsg_trim(skb, b);
1da177e4
LT
590 return -1;
591}
592
2eb9d75c 593static struct tcf_proto_ops cls_route4_ops __read_mostly = {
1da177e4
LT
594 .kind = "route",
595 .classify = route4_classify,
596 .init = route4_init,
597 .destroy = route4_destroy,
598 .get = route4_get,
599 .put = route4_put,
600 .change = route4_change,
601 .delete = route4_delete,
602 .walk = route4_walk,
603 .dump = route4_dump,
604 .owner = THIS_MODULE,
605};
606
607static int __init init_route4(void)
608{
609 return register_tcf_proto_ops(&cls_route4_ops);
610}
611
612static void __exit exit_route4(void)
613{
614 unregister_tcf_proto_ops(&cls_route4_ops);
615}
616
617module_init(init_route4)
618module_exit(exit_route4)
619MODULE_LICENSE("GPL");