]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/cls_tcindex.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / net / sched / cls_tcindex.c
1 /*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/sch_generic.h>
17
18 /*
19 * Passing parameters to the root seems to be done more awkwardly than really
20 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
21 * verified. FIXME.
22 */
23
24 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
25 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
26
27
28 struct tcindex_filter_result {
29 struct tcf_exts exts;
30 struct tcf_result res;
31 struct rcu_work rwork;
32 };
33
34 struct tcindex_filter {
35 u16 key;
36 struct tcindex_filter_result result;
37 struct tcindex_filter __rcu *next;
38 struct rcu_work rwork;
39 };
40
41
42 struct tcindex_data {
43 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44 struct tcindex_filter __rcu **h; /* imperfect hash; */
45 struct tcf_proto *tp;
46 u16 mask; /* AND key with mask */
47 u32 shift; /* shift ANDed key to the right */
48 u32 hash; /* hash table size; 0 if undefined */
49 u32 alloc_hash; /* allocated size */
50 u32 fall_through; /* 0: only classify if explicit match */
51 struct rcu_work rwork;
52 };
53
54 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
55 {
56 return tcf_exts_has_actions(&r->exts) || r->res.classid;
57 }
58
59 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
60 u16 key)
61 {
62 if (p->perfect) {
63 struct tcindex_filter_result *f = p->perfect + key;
64
65 return tcindex_filter_is_set(f) ? f : NULL;
66 } else if (p->h) {
67 struct tcindex_filter __rcu **fp;
68 struct tcindex_filter *f;
69
70 fp = &p->h[key % p->hash];
71 for (f = rcu_dereference_bh_rtnl(*fp);
72 f;
73 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
74 if (f->key == key)
75 return &f->result;
76 }
77
78 return NULL;
79 }
80
81
82 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
84 {
85 struct tcindex_data *p = rcu_dereference_bh(tp->root);
86 struct tcindex_filter_result *f;
87 int key = (skb->tc_index & p->mask) >> p->shift;
88
89 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90 skb, tp, res, p);
91
92 f = tcindex_lookup(p, key);
93 if (!f) {
94 struct Qdisc *q = tcf_block_q(tp->chain->block);
95
96 if (!p->fall_through)
97 return -1;
98 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
99 res->class = 0;
100 pr_debug("alg 0x%x\n", res->classid);
101 return 0;
102 }
103 *res = f->res;
104 pr_debug("map 0x%x\n", res->classid);
105
106 return tcf_exts_exec(skb, &f->exts, res);
107 }
108
109
110 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
111 {
112 struct tcindex_data *p = rtnl_dereference(tp->root);
113 struct tcindex_filter_result *r;
114
115 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
116 if (p->perfect && handle >= p->alloc_hash)
117 return NULL;
118 r = tcindex_lookup(p, handle);
119 return r && tcindex_filter_is_set(r) ? r : NULL;
120 }
121
122 static int tcindex_init(struct tcf_proto *tp)
123 {
124 struct tcindex_data *p;
125
126 pr_debug("tcindex_init(tp %p)\n", tp);
127 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
128 if (!p)
129 return -ENOMEM;
130
131 p->mask = 0xffff;
132 p->hash = DEFAULT_HASH_SIZE;
133 p->fall_through = 1;
134
135 rcu_assign_pointer(tp->root, p);
136 return 0;
137 }
138
139 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
140 {
141 tcf_exts_destroy(&r->exts);
142 tcf_exts_put_net(&r->exts);
143 }
144
145 static void tcindex_destroy_rexts_work(struct work_struct *work)
146 {
147 struct tcindex_filter_result *r;
148
149 r = container_of(to_rcu_work(work),
150 struct tcindex_filter_result,
151 rwork);
152 rtnl_lock();
153 __tcindex_destroy_rexts(r);
154 rtnl_unlock();
155 }
156
157 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
158 {
159 tcf_exts_destroy(&f->result.exts);
160 tcf_exts_put_net(&f->result.exts);
161 kfree(f);
162 }
163
164 static void tcindex_destroy_fexts_work(struct work_struct *work)
165 {
166 struct tcindex_filter *f = container_of(to_rcu_work(work),
167 struct tcindex_filter,
168 rwork);
169
170 rtnl_lock();
171 __tcindex_destroy_fexts(f);
172 rtnl_unlock();
173 }
174
175 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
176 {
177 struct tcindex_data *p = rtnl_dereference(tp->root);
178 struct tcindex_filter_result *r = arg;
179 struct tcindex_filter __rcu **walk;
180 struct tcindex_filter *f = NULL;
181
182 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
183 if (p->perfect) {
184 if (!r->res.class)
185 return -ENOENT;
186 } else {
187 int i;
188
189 for (i = 0; i < p->hash; i++) {
190 walk = p->h + i;
191 for (f = rtnl_dereference(*walk); f;
192 walk = &f->next, f = rtnl_dereference(*walk)) {
193 if (&f->result == r)
194 goto found;
195 }
196 }
197 return -ENOENT;
198
199 found:
200 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
201 }
202 tcf_unbind_filter(tp, &r->res);
203 /* all classifiers are required to call tcf_exts_destroy() after rcu
204 * grace period, since converted-to-rcu actions are relying on that
205 * in cleanup() callback
206 */
207 if (f) {
208 if (tcf_exts_get_net(&f->result.exts))
209 tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
210 else
211 __tcindex_destroy_fexts(f);
212 } else {
213 if (tcf_exts_get_net(&r->exts))
214 tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
215 else
216 __tcindex_destroy_rexts(r);
217 }
218
219 *last = false;
220 return 0;
221 }
222
223 static void tcindex_destroy_work(struct work_struct *work)
224 {
225 struct tcindex_data *p = container_of(to_rcu_work(work),
226 struct tcindex_data,
227 rwork);
228
229 kfree(p->perfect);
230 kfree(p->h);
231 kfree(p);
232 }
233
234 static inline int
235 valid_perfect_hash(struct tcindex_data *p)
236 {
237 return p->hash > (p->mask >> p->shift);
238 }
239
240 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
241 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
242 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
243 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
244 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
245 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
246 };
247
248 static int tcindex_filter_result_init(struct tcindex_filter_result *r,
249 struct net *net)
250 {
251 memset(r, 0, sizeof(*r));
252 return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
253 TCA_TCINDEX_POLICE);
254 }
255
256 static void tcindex_partial_destroy_work(struct work_struct *work)
257 {
258 struct tcindex_data *p = container_of(to_rcu_work(work),
259 struct tcindex_data,
260 rwork);
261
262 kfree(p->perfect);
263 kfree(p);
264 }
265
266 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
267 {
268 int i;
269
270 for (i = 0; i < cp->hash; i++)
271 tcf_exts_destroy(&cp->perfect[i].exts);
272 kfree(cp->perfect);
273 }
274
275 static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
276 {
277 int i, err = 0;
278
279 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
280 GFP_KERNEL);
281 if (!cp->perfect)
282 return -ENOMEM;
283
284 for (i = 0; i < cp->hash; i++) {
285 err = tcf_exts_init(&cp->perfect[i].exts, net,
286 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
287 if (err < 0)
288 goto errout;
289 }
290
291 return 0;
292
293 errout:
294 tcindex_free_perfect_hash(cp);
295 return err;
296 }
297
298 static int
299 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
300 u32 handle, struct tcindex_data *p,
301 struct tcindex_filter_result *r, struct nlattr **tb,
302 struct nlattr *est, bool ovr)
303 {
304 struct tcindex_filter_result new_filter_result, *old_r = r;
305 struct tcindex_data *cp = NULL, *oldp;
306 struct tcindex_filter *f = NULL; /* make gcc behave */
307 struct tcf_result cr = {};
308 int err, balloc = 0;
309 struct tcf_exts e;
310
311 err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
312 if (err < 0)
313 return err;
314 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
315 if (err < 0)
316 goto errout;
317
318 err = -ENOMEM;
319 /* tcindex_data attributes must look atomic to classifier/lookup so
320 * allocate new tcindex data and RCU assign it onto root. Keeping
321 * perfect hash and hash pointers from old data.
322 */
323 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
324 if (!cp)
325 goto errout;
326
327 cp->mask = p->mask;
328 cp->shift = p->shift;
329 cp->hash = p->hash;
330 cp->alloc_hash = p->alloc_hash;
331 cp->fall_through = p->fall_through;
332 cp->tp = tp;
333
334 if (tb[TCA_TCINDEX_HASH])
335 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
336
337 if (tb[TCA_TCINDEX_MASK])
338 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
339
340 if (tb[TCA_TCINDEX_SHIFT])
341 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
342
343 if (!cp->hash) {
344 /* Hash not specified, use perfect hash if the upper limit
345 * of the hashing index is below the threshold.
346 */
347 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
348 cp->hash = (cp->mask >> cp->shift) + 1;
349 else
350 cp->hash = DEFAULT_HASH_SIZE;
351 }
352
353 if (p->perfect) {
354 int i;
355
356 if (tcindex_alloc_perfect_hash(net, cp) < 0)
357 goto errout;
358 for (i = 0; i < min(cp->hash, p->hash); i++)
359 cp->perfect[i].res = p->perfect[i].res;
360 balloc = 1;
361 }
362 cp->h = p->h;
363
364 err = tcindex_filter_result_init(&new_filter_result, net);
365 if (err < 0)
366 goto errout_alloc;
367 if (old_r)
368 cr = r->res;
369
370 err = -EBUSY;
371
372 /* Hash already allocated, make sure that we still meet the
373 * requirements for the allocated hash.
374 */
375 if (cp->perfect) {
376 if (!valid_perfect_hash(cp) ||
377 cp->hash > cp->alloc_hash)
378 goto errout_alloc;
379 } else if (cp->h && cp->hash != cp->alloc_hash) {
380 goto errout_alloc;
381 }
382
383 err = -EINVAL;
384 if (tb[TCA_TCINDEX_FALL_THROUGH])
385 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
386
387 if (!cp->perfect && !cp->h)
388 cp->alloc_hash = cp->hash;
389
390 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
391 * but then, we'd fail handles that may become valid after some future
392 * mask change. While this is extremely unlikely to ever matter,
393 * the check below is safer (and also more backwards-compatible).
394 */
395 if (cp->perfect || valid_perfect_hash(cp))
396 if (handle >= cp->alloc_hash)
397 goto errout_alloc;
398
399
400 err = -ENOMEM;
401 if (!cp->perfect && !cp->h) {
402 if (valid_perfect_hash(cp)) {
403 if (tcindex_alloc_perfect_hash(net, cp) < 0)
404 goto errout_alloc;
405 balloc = 1;
406 } else {
407 struct tcindex_filter __rcu **hash;
408
409 hash = kcalloc(cp->hash,
410 sizeof(struct tcindex_filter *),
411 GFP_KERNEL);
412
413 if (!hash)
414 goto errout_alloc;
415
416 cp->h = hash;
417 balloc = 2;
418 }
419 }
420
421 if (cp->perfect)
422 r = cp->perfect + handle;
423 else
424 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
425
426 if (r == &new_filter_result) {
427 f = kzalloc(sizeof(*f), GFP_KERNEL);
428 if (!f)
429 goto errout_alloc;
430 f->key = handle;
431 f->next = NULL;
432 err = tcindex_filter_result_init(&f->result, net);
433 if (err < 0) {
434 kfree(f);
435 goto errout_alloc;
436 }
437 }
438
439 if (tb[TCA_TCINDEX_CLASSID]) {
440 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
441 tcf_bind_filter(tp, &cr, base);
442 }
443
444 if (old_r && old_r != r) {
445 err = tcindex_filter_result_init(old_r, net);
446 if (err < 0) {
447 kfree(f);
448 goto errout_alloc;
449 }
450 }
451
452 oldp = p;
453 r->res = cr;
454 tcf_exts_change(&r->exts, &e);
455
456 rcu_assign_pointer(tp->root, cp);
457
458 if (r == &new_filter_result) {
459 struct tcindex_filter *nfp;
460 struct tcindex_filter __rcu **fp;
461
462 f->result.res = r->res;
463 tcf_exts_change(&f->result.exts, &r->exts);
464
465 fp = cp->h + (handle % cp->hash);
466 for (nfp = rtnl_dereference(*fp);
467 nfp;
468 fp = &nfp->next, nfp = rtnl_dereference(*fp))
469 ; /* nothing */
470
471 rcu_assign_pointer(*fp, f);
472 } else {
473 tcf_exts_destroy(&new_filter_result.exts);
474 }
475
476 if (oldp)
477 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
478 return 0;
479
480 errout_alloc:
481 if (balloc == 1)
482 tcindex_free_perfect_hash(cp);
483 else if (balloc == 2)
484 kfree(cp->h);
485 tcf_exts_destroy(&new_filter_result.exts);
486 errout:
487 kfree(cp);
488 tcf_exts_destroy(&e);
489 return err;
490 }
491
492 static int
493 tcindex_change(struct net *net, struct sk_buff *in_skb,
494 struct tcf_proto *tp, unsigned long base, u32 handle,
495 struct nlattr **tca, void **arg, bool ovr)
496 {
497 struct nlattr *opt = tca[TCA_OPTIONS];
498 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
499 struct tcindex_data *p = rtnl_dereference(tp->root);
500 struct tcindex_filter_result *r = *arg;
501 int err;
502
503 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
504 "p %p,r %p,*arg %p\n",
505 tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
506
507 if (!opt)
508 return 0;
509
510 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy, NULL);
511 if (err < 0)
512 return err;
513
514 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
515 tca[TCA_RATE], ovr);
516 }
517
518 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
519 {
520 struct tcindex_data *p = rtnl_dereference(tp->root);
521 struct tcindex_filter *f, *next;
522 int i;
523
524 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
525 if (p->perfect) {
526 for (i = 0; i < p->hash; i++) {
527 if (!p->perfect[i].res.class)
528 continue;
529 if (walker->count >= walker->skip) {
530 if (walker->fn(tp, p->perfect + i, walker) < 0) {
531 walker->stop = 1;
532 return;
533 }
534 }
535 walker->count++;
536 }
537 }
538 if (!p->h)
539 return;
540 for (i = 0; i < p->hash; i++) {
541 for (f = rtnl_dereference(p->h[i]); f; f = next) {
542 next = rtnl_dereference(f->next);
543 if (walker->count >= walker->skip) {
544 if (walker->fn(tp, &f->result, walker) < 0) {
545 walker->stop = 1;
546 return;
547 }
548 }
549 walker->count++;
550 }
551 }
552 }
553
554 static void tcindex_destroy(struct tcf_proto *tp)
555 {
556 struct tcindex_data *p = rtnl_dereference(tp->root);
557 int i;
558
559 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
560
561 if (p->perfect) {
562 for (i = 0; i < p->hash; i++) {
563 struct tcindex_filter_result *r = p->perfect + i;
564
565 tcf_unbind_filter(tp, &r->res);
566 if (tcf_exts_get_net(&r->exts))
567 tcf_queue_work(&r->rwork,
568 tcindex_destroy_rexts_work);
569 else
570 __tcindex_destroy_rexts(r);
571 }
572 }
573
574 for (i = 0; p->h && i < p->hash; i++) {
575 struct tcindex_filter *f, *next;
576 bool last;
577
578 for (f = rtnl_dereference(p->h[i]); f; f = next) {
579 next = rtnl_dereference(f->next);
580 tcindex_delete(tp, &f->result, &last);
581 }
582 }
583
584 tcf_queue_work(&p->rwork, tcindex_destroy_work);
585 }
586
587
588 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
589 struct sk_buff *skb, struct tcmsg *t)
590 {
591 struct tcindex_data *p = rtnl_dereference(tp->root);
592 struct tcindex_filter_result *r = fh;
593 struct nlattr *nest;
594
595 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
596 tp, fh, skb, t, p, r);
597 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
598
599 nest = nla_nest_start(skb, TCA_OPTIONS);
600 if (nest == NULL)
601 goto nla_put_failure;
602
603 if (!fh) {
604 t->tcm_handle = ~0; /* whatever ... */
605 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
606 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
607 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
608 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
609 goto nla_put_failure;
610 nla_nest_end(skb, nest);
611 } else {
612 if (p->perfect) {
613 t->tcm_handle = r - p->perfect;
614 } else {
615 struct tcindex_filter *f;
616 struct tcindex_filter __rcu **fp;
617 int i;
618
619 t->tcm_handle = 0;
620 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
621 fp = &p->h[i];
622 for (f = rtnl_dereference(*fp);
623 !t->tcm_handle && f;
624 fp = &f->next, f = rtnl_dereference(*fp)) {
625 if (&f->result == r)
626 t->tcm_handle = f->key;
627 }
628 }
629 }
630 pr_debug("handle = %d\n", t->tcm_handle);
631 if (r->res.class &&
632 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
633 goto nla_put_failure;
634
635 if (tcf_exts_dump(skb, &r->exts) < 0)
636 goto nla_put_failure;
637 nla_nest_end(skb, nest);
638
639 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
640 goto nla_put_failure;
641 }
642
643 return skb->len;
644
645 nla_put_failure:
646 nla_nest_cancel(skb, nest);
647 return -1;
648 }
649
650 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
651 void *q, unsigned long base)
652 {
653 struct tcindex_filter_result *r = fh;
654
655 if (r && r->res.classid == classid) {
656 if (cl)
657 __tcf_bind_filter(q, &r->res, base);
658 else
659 __tcf_unbind_filter(q, &r->res);
660 }
661 }
662
663 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
664 .kind = "tcindex",
665 .classify = tcindex_classify,
666 .init = tcindex_init,
667 .destroy = tcindex_destroy,
668 .get = tcindex_get,
669 .change = tcindex_change,
670 .delete = tcindex_delete,
671 .walk = tcindex_walk,
672 .dump = tcindex_dump,
673 .bind_class = tcindex_bind_class,
674 .owner = THIS_MODULE,
675 };
676
677 static int __init init_tcindex(void)
678 {
679 return register_tcf_proto_ops(&cls_tcindex_ops);
680 }
681
682 static void __exit exit_tcindex(void)
683 {
684 unregister_tcf_proto_ops(&cls_tcindex_ops);
685 }
686
687 module_init(init_tcindex)
688 module_exit(exit_tcindex)
689 MODULE_LICENSE("GPL");