]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/cls_u32.c
net: remove duplicate includes
[mirror_ubuntu-bionic-kernel.git] / net / sched / cls_u32.c
1 /*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
14 *
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
19 *
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
22 *
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
29 *
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <linux/netdevice.h>
44 #include <linux/hash.h>
45 #include <net/netlink.h>
46 #include <net/act_api.h>
47 #include <net/pkt_cls.h>
48 #include <linux/idr.h>
49
50 struct tc_u_knode {
51 struct tc_u_knode __rcu *next;
52 u32 handle;
53 struct tc_u_hnode __rcu *ht_up;
54 struct tcf_exts exts;
55 #ifdef CONFIG_NET_CLS_IND
56 int ifindex;
57 #endif
58 u8 fshift;
59 struct tcf_result res;
60 struct tc_u_hnode __rcu *ht_down;
61 #ifdef CONFIG_CLS_U32_PERF
62 struct tc_u32_pcnt __percpu *pf;
63 #endif
64 u32 flags;
65 #ifdef CONFIG_CLS_U32_MARK
66 u32 val;
67 u32 mask;
68 u32 __percpu *pcpu_success;
69 #endif
70 struct tcf_proto *tp;
71 union {
72 struct work_struct work;
73 struct rcu_head rcu;
74 };
75 /* The 'sel' field MUST be the last field in structure to allow for
76 * tc_u32_keys allocated at end of structure.
77 */
78 struct tc_u32_sel sel;
79 };
80
81 struct tc_u_hnode {
82 struct tc_u_hnode __rcu *next;
83 u32 handle;
84 u32 prio;
85 struct tc_u_common *tp_c;
86 int refcnt;
87 unsigned int divisor;
88 struct idr handle_idr;
89 struct rcu_head rcu;
90 /* The 'ht' field MUST be the last field in structure to allow for
91 * more entries allocated at end of structure.
92 */
93 struct tc_u_knode __rcu *ht[1];
94 };
95
96 struct tc_u_common {
97 struct tc_u_hnode __rcu *hlist;
98 struct tcf_block *block;
99 int refcnt;
100 struct idr handle_idr;
101 struct hlist_node hnode;
102 struct rcu_head rcu;
103 };
104
105 static inline unsigned int u32_hash_fold(__be32 key,
106 const struct tc_u32_sel *sel,
107 u8 fshift)
108 {
109 unsigned int h = ntohl(key & sel->hmask) >> fshift;
110
111 return h;
112 }
113
114 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
115 struct tcf_result *res)
116 {
117 struct {
118 struct tc_u_knode *knode;
119 unsigned int off;
120 } stack[TC_U32_MAXDEPTH];
121
122 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
123 unsigned int off = skb_network_offset(skb);
124 struct tc_u_knode *n;
125 int sdepth = 0;
126 int off2 = 0;
127 int sel = 0;
128 #ifdef CONFIG_CLS_U32_PERF
129 int j;
130 #endif
131 int i, r;
132
133 next_ht:
134 n = rcu_dereference_bh(ht->ht[sel]);
135
136 next_knode:
137 if (n) {
138 struct tc_u32_key *key = n->sel.keys;
139
140 #ifdef CONFIG_CLS_U32_PERF
141 __this_cpu_inc(n->pf->rcnt);
142 j = 0;
143 #endif
144
145 if (tc_skip_sw(n->flags)) {
146 n = rcu_dereference_bh(n->next);
147 goto next_knode;
148 }
149
150 #ifdef CONFIG_CLS_U32_MARK
151 if ((skb->mark & n->mask) != n->val) {
152 n = rcu_dereference_bh(n->next);
153 goto next_knode;
154 } else {
155 __this_cpu_inc(*n->pcpu_success);
156 }
157 #endif
158
159 for (i = n->sel.nkeys; i > 0; i--, key++) {
160 int toff = off + key->off + (off2 & key->offmask);
161 __be32 *data, hdata;
162
163 if (skb_headroom(skb) + toff > INT_MAX)
164 goto out;
165
166 data = skb_header_pointer(skb, toff, 4, &hdata);
167 if (!data)
168 goto out;
169 if ((*data ^ key->val) & key->mask) {
170 n = rcu_dereference_bh(n->next);
171 goto next_knode;
172 }
173 #ifdef CONFIG_CLS_U32_PERF
174 __this_cpu_inc(n->pf->kcnts[j]);
175 j++;
176 #endif
177 }
178
179 ht = rcu_dereference_bh(n->ht_down);
180 if (!ht) {
181 check_terminal:
182 if (n->sel.flags & TC_U32_TERMINAL) {
183
184 *res = n->res;
185 #ifdef CONFIG_NET_CLS_IND
186 if (!tcf_match_indev(skb, n->ifindex)) {
187 n = rcu_dereference_bh(n->next);
188 goto next_knode;
189 }
190 #endif
191 #ifdef CONFIG_CLS_U32_PERF
192 __this_cpu_inc(n->pf->rhit);
193 #endif
194 r = tcf_exts_exec(skb, &n->exts, res);
195 if (r < 0) {
196 n = rcu_dereference_bh(n->next);
197 goto next_knode;
198 }
199
200 return r;
201 }
202 n = rcu_dereference_bh(n->next);
203 goto next_knode;
204 }
205
206 /* PUSH */
207 if (sdepth >= TC_U32_MAXDEPTH)
208 goto deadloop;
209 stack[sdepth].knode = n;
210 stack[sdepth].off = off;
211 sdepth++;
212
213 ht = rcu_dereference_bh(n->ht_down);
214 sel = 0;
215 if (ht->divisor) {
216 __be32 *data, hdata;
217
218 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
219 &hdata);
220 if (!data)
221 goto out;
222 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
223 n->fshift);
224 }
225 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
226 goto next_ht;
227
228 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
229 off2 = n->sel.off + 3;
230 if (n->sel.flags & TC_U32_VAROFFSET) {
231 __be16 *data, hdata;
232
233 data = skb_header_pointer(skb,
234 off + n->sel.offoff,
235 2, &hdata);
236 if (!data)
237 goto out;
238 off2 += ntohs(n->sel.offmask & *data) >>
239 n->sel.offshift;
240 }
241 off2 &= ~3;
242 }
243 if (n->sel.flags & TC_U32_EAT) {
244 off += off2;
245 off2 = 0;
246 }
247
248 if (off < skb->len)
249 goto next_ht;
250 }
251
252 /* POP */
253 if (sdepth--) {
254 n = stack[sdepth].knode;
255 ht = rcu_dereference_bh(n->ht_up);
256 off = stack[sdepth].off;
257 goto check_terminal;
258 }
259 out:
260 return -1;
261
262 deadloop:
263 net_warn_ratelimited("cls_u32: dead loop\n");
264 return -1;
265 }
266
267 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
268 {
269 struct tc_u_hnode *ht;
270
271 for (ht = rtnl_dereference(tp_c->hlist);
272 ht;
273 ht = rtnl_dereference(ht->next))
274 if (ht->handle == handle)
275 break;
276
277 return ht;
278 }
279
280 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
281 {
282 unsigned int sel;
283 struct tc_u_knode *n = NULL;
284
285 sel = TC_U32_HASH(handle);
286 if (sel > ht->divisor)
287 goto out;
288
289 for (n = rtnl_dereference(ht->ht[sel]);
290 n;
291 n = rtnl_dereference(n->next))
292 if (n->handle == handle)
293 break;
294 out:
295 return n;
296 }
297
298
299 static void *u32_get(struct tcf_proto *tp, u32 handle)
300 {
301 struct tc_u_hnode *ht;
302 struct tc_u_common *tp_c = tp->data;
303
304 if (TC_U32_HTID(handle) == TC_U32_ROOT)
305 ht = rtnl_dereference(tp->root);
306 else
307 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
308
309 if (!ht)
310 return NULL;
311
312 if (TC_U32_KEY(handle) == 0)
313 return ht;
314
315 return u32_lookup_key(ht, handle);
316 }
317
318 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
319 {
320 unsigned long idr_index;
321 int err;
322
323 /* This is only used inside rtnl lock it is safe to increment
324 * without read _copy_ update semantics
325 */
326 err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index,
327 1, 0x7FF, GFP_KERNEL);
328 if (err)
329 return 0;
330 return (u32)(idr_index | 0x800) << 20;
331 }
332
333 static struct hlist_head *tc_u_common_hash;
334
335 #define U32_HASH_SHIFT 10
336 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
337
338 static unsigned int tc_u_hash(const struct tcf_proto *tp)
339 {
340 return hash_ptr(tp->chain->block, U32_HASH_SHIFT);
341 }
342
343 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
344 {
345 struct tc_u_common *tc;
346 unsigned int h;
347
348 h = tc_u_hash(tp);
349 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
350 if (tc->block == tp->chain->block)
351 return tc;
352 }
353 return NULL;
354 }
355
356 static int u32_init(struct tcf_proto *tp)
357 {
358 struct tc_u_hnode *root_ht;
359 struct tc_u_common *tp_c;
360 unsigned int h;
361
362 tp_c = tc_u_common_find(tp);
363
364 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
365 if (root_ht == NULL)
366 return -ENOBUFS;
367
368 root_ht->refcnt++;
369 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
370 root_ht->prio = tp->prio;
371 idr_init(&root_ht->handle_idr);
372
373 if (tp_c == NULL) {
374 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
375 if (tp_c == NULL) {
376 kfree(root_ht);
377 return -ENOBUFS;
378 }
379 tp_c->block = tp->chain->block;
380 INIT_HLIST_NODE(&tp_c->hnode);
381 idr_init(&tp_c->handle_idr);
382
383 h = tc_u_hash(tp);
384 hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
385 }
386
387 tp_c->refcnt++;
388 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
389 rcu_assign_pointer(tp_c->hlist, root_ht);
390 root_ht->tp_c = tp_c;
391
392 rcu_assign_pointer(tp->root, root_ht);
393 tp->data = tp_c;
394 return 0;
395 }
396
397 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
398 bool free_pf)
399 {
400 tcf_exts_destroy(&n->exts);
401 tcf_exts_put_net(&n->exts);
402 if (n->ht_down)
403 n->ht_down->refcnt--;
404 #ifdef CONFIG_CLS_U32_PERF
405 if (free_pf)
406 free_percpu(n->pf);
407 #endif
408 #ifdef CONFIG_CLS_U32_MARK
409 if (free_pf)
410 free_percpu(n->pcpu_success);
411 #endif
412 kfree(n);
413 return 0;
414 }
415
416 /* u32_delete_key_rcu should be called when free'ing a copied
417 * version of a tc_u_knode obtained from u32_init_knode(). When
418 * copies are obtained from u32_init_knode() the statistics are
419 * shared between the old and new copies to allow readers to
420 * continue to update the statistics during the copy. To support
421 * this the u32_delete_key_rcu variant does not free the percpu
422 * statistics.
423 */
424 static void u32_delete_key_work(struct work_struct *work)
425 {
426 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
427
428 rtnl_lock();
429 u32_destroy_key(key->tp, key, false);
430 rtnl_unlock();
431 }
432
433 static void u32_delete_key_rcu(struct rcu_head *rcu)
434 {
435 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
436
437 INIT_WORK(&key->work, u32_delete_key_work);
438 tcf_queue_work(&key->work);
439 }
440
441 /* u32_delete_key_freepf_rcu is the rcu callback variant
442 * that free's the entire structure including the statistics
443 * percpu variables. Only use this if the key is not a copy
444 * returned by u32_init_knode(). See u32_delete_key_rcu()
445 * for the variant that should be used with keys return from
446 * u32_init_knode()
447 */
448 static void u32_delete_key_freepf_work(struct work_struct *work)
449 {
450 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
451
452 rtnl_lock();
453 u32_destroy_key(key->tp, key, true);
454 rtnl_unlock();
455 }
456
457 static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
458 {
459 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
460
461 INIT_WORK(&key->work, u32_delete_key_freepf_work);
462 tcf_queue_work(&key->work);
463 }
464
465 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
466 {
467 struct tc_u_knode __rcu **kp;
468 struct tc_u_knode *pkp;
469 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
470
471 if (ht) {
472 kp = &ht->ht[TC_U32_HASH(key->handle)];
473 for (pkp = rtnl_dereference(*kp); pkp;
474 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
475 if (pkp == key) {
476 RCU_INIT_POINTER(*kp, key->next);
477
478 tcf_unbind_filter(tp, &key->res);
479 tcf_exts_get_net(&key->exts);
480 call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
481 return 0;
482 }
483 }
484 }
485 WARN_ON(1);
486 return 0;
487 }
488
489 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
490 {
491 struct tcf_block *block = tp->chain->block;
492 struct tc_cls_u32_offload cls_u32 = {};
493
494 tc_cls_common_offload_init(&cls_u32.common, tp);
495 cls_u32.command = TC_CLSU32_DELETE_HNODE;
496 cls_u32.hnode.divisor = h->divisor;
497 cls_u32.hnode.handle = h->handle;
498 cls_u32.hnode.prio = h->prio;
499
500 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
501 }
502
503 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
504 u32 flags)
505 {
506 struct tcf_block *block = tp->chain->block;
507 struct tc_cls_u32_offload cls_u32 = {};
508 bool skip_sw = tc_skip_sw(flags);
509 bool offloaded = false;
510 int err;
511
512 tc_cls_common_offload_init(&cls_u32.common, tp);
513 cls_u32.command = TC_CLSU32_NEW_HNODE;
514 cls_u32.hnode.divisor = h->divisor;
515 cls_u32.hnode.handle = h->handle;
516 cls_u32.hnode.prio = h->prio;
517
518 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
519 if (err < 0) {
520 u32_clear_hw_hnode(tp, h);
521 return err;
522 } else if (err > 0) {
523 offloaded = true;
524 }
525
526 if (skip_sw && !offloaded)
527 return -EINVAL;
528
529 return 0;
530 }
531
532 static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
533 {
534 struct tcf_block *block = tp->chain->block;
535 struct tc_cls_u32_offload cls_u32 = {};
536
537 tc_cls_common_offload_init(&cls_u32.common, tp);
538 cls_u32.command = TC_CLSU32_DELETE_KNODE;
539 cls_u32.knode.handle = handle;
540
541 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
542 }
543
544 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
545 u32 flags)
546 {
547 struct tcf_block *block = tp->chain->block;
548 struct tc_cls_u32_offload cls_u32 = {};
549 bool skip_sw = tc_skip_sw(flags);
550 int err;
551
552 tc_cls_common_offload_init(&cls_u32.common, tp);
553 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
554 cls_u32.knode.handle = n->handle;
555 cls_u32.knode.fshift = n->fshift;
556 #ifdef CONFIG_CLS_U32_MARK
557 cls_u32.knode.val = n->val;
558 cls_u32.knode.mask = n->mask;
559 #else
560 cls_u32.knode.val = 0;
561 cls_u32.knode.mask = 0;
562 #endif
563 cls_u32.knode.sel = &n->sel;
564 cls_u32.knode.exts = &n->exts;
565 if (n->ht_down)
566 cls_u32.knode.link_handle = n->ht_down->handle;
567
568 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
569 if (err < 0) {
570 u32_remove_hw_knode(tp, n->handle);
571 return err;
572 } else if (err > 0) {
573 n->flags |= TCA_CLS_FLAGS_IN_HW;
574 }
575
576 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
577 return -EINVAL;
578
579 return 0;
580 }
581
582 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
583 {
584 struct tc_u_knode *n;
585 unsigned int h;
586
587 for (h = 0; h <= ht->divisor; h++) {
588 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
589 RCU_INIT_POINTER(ht->ht[h],
590 rtnl_dereference(n->next));
591 tcf_unbind_filter(tp, &n->res);
592 u32_remove_hw_knode(tp, n->handle);
593 idr_remove_ext(&ht->handle_idr, n->handle);
594 if (tcf_exts_get_net(&n->exts))
595 call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
596 else
597 u32_destroy_key(n->tp, n, true);
598 }
599 }
600 }
601
602 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
603 {
604 struct tc_u_common *tp_c = tp->data;
605 struct tc_u_hnode __rcu **hn;
606 struct tc_u_hnode *phn;
607
608 WARN_ON(ht->refcnt);
609
610 u32_clear_hnode(tp, ht);
611
612 hn = &tp_c->hlist;
613 for (phn = rtnl_dereference(*hn);
614 phn;
615 hn = &phn->next, phn = rtnl_dereference(*hn)) {
616 if (phn == ht) {
617 u32_clear_hw_hnode(tp, ht);
618 idr_destroy(&ht->handle_idr);
619 idr_remove_ext(&tp_c->handle_idr, ht->handle);
620 RCU_INIT_POINTER(*hn, ht->next);
621 kfree_rcu(ht, rcu);
622 return 0;
623 }
624 }
625
626 return -ENOENT;
627 }
628
629 static bool ht_empty(struct tc_u_hnode *ht)
630 {
631 unsigned int h;
632
633 for (h = 0; h <= ht->divisor; h++)
634 if (rcu_access_pointer(ht->ht[h]))
635 return false;
636
637 return true;
638 }
639
640 static void u32_destroy(struct tcf_proto *tp)
641 {
642 struct tc_u_common *tp_c = tp->data;
643 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
644
645 WARN_ON(root_ht == NULL);
646
647 if (root_ht && --root_ht->refcnt == 0)
648 u32_destroy_hnode(tp, root_ht);
649
650 if (--tp_c->refcnt == 0) {
651 struct tc_u_hnode *ht;
652
653 hlist_del(&tp_c->hnode);
654
655 for (ht = rtnl_dereference(tp_c->hlist);
656 ht;
657 ht = rtnl_dereference(ht->next)) {
658 ht->refcnt--;
659 u32_clear_hnode(tp, ht);
660 }
661
662 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
663 RCU_INIT_POINTER(tp_c->hlist, ht->next);
664 kfree_rcu(ht, rcu);
665 }
666
667 idr_destroy(&tp_c->handle_idr);
668 kfree(tp_c);
669 }
670
671 tp->data = NULL;
672 }
673
674 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
675 {
676 struct tc_u_hnode *ht = arg;
677 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
678 struct tc_u_common *tp_c = tp->data;
679 int ret = 0;
680
681 if (ht == NULL)
682 goto out;
683
684 if (TC_U32_KEY(ht->handle)) {
685 u32_remove_hw_knode(tp, ht->handle);
686 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
687 goto out;
688 }
689
690 if (root_ht == ht)
691 return -EINVAL;
692
693 if (ht->refcnt == 1) {
694 ht->refcnt--;
695 u32_destroy_hnode(tp, ht);
696 } else {
697 return -EBUSY;
698 }
699
700 out:
701 *last = true;
702 if (root_ht) {
703 if (root_ht->refcnt > 1) {
704 *last = false;
705 goto ret;
706 }
707 if (root_ht->refcnt == 1) {
708 if (!ht_empty(root_ht)) {
709 *last = false;
710 goto ret;
711 }
712 }
713 }
714
715 if (tp_c->refcnt > 1) {
716 *last = false;
717 goto ret;
718 }
719
720 if (tp_c->refcnt == 1) {
721 struct tc_u_hnode *ht;
722
723 for (ht = rtnl_dereference(tp_c->hlist);
724 ht;
725 ht = rtnl_dereference(ht->next))
726 if (!ht_empty(ht)) {
727 *last = false;
728 break;
729 }
730 }
731
732 ret:
733 return ret;
734 }
735
736 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
737 {
738 unsigned long idr_index;
739 u32 start = htid | 0x800;
740 u32 max = htid | 0xFFF;
741 u32 min = htid;
742
743 if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
744 start, max + 1, GFP_KERNEL)) {
745 if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
746 min + 1, max + 1, GFP_KERNEL))
747 return max;
748 }
749
750 return (u32)idr_index;
751 }
752
753 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
754 [TCA_U32_CLASSID] = { .type = NLA_U32 },
755 [TCA_U32_HASH] = { .type = NLA_U32 },
756 [TCA_U32_LINK] = { .type = NLA_U32 },
757 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
758 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
759 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
760 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
761 [TCA_U32_FLAGS] = { .type = NLA_U32 },
762 };
763
764 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
765 unsigned long base, struct tc_u_hnode *ht,
766 struct tc_u_knode *n, struct nlattr **tb,
767 struct nlattr *est, bool ovr)
768 {
769 int err;
770
771 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr);
772 if (err < 0)
773 return err;
774
775 if (tb[TCA_U32_LINK]) {
776 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
777 struct tc_u_hnode *ht_down = NULL, *ht_old;
778
779 if (TC_U32_KEY(handle))
780 return -EINVAL;
781
782 if (handle) {
783 ht_down = u32_lookup_ht(ht->tp_c, handle);
784
785 if (ht_down == NULL)
786 return -EINVAL;
787 ht_down->refcnt++;
788 }
789
790 ht_old = rtnl_dereference(n->ht_down);
791 rcu_assign_pointer(n->ht_down, ht_down);
792
793 if (ht_old)
794 ht_old->refcnt--;
795 }
796 if (tb[TCA_U32_CLASSID]) {
797 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
798 tcf_bind_filter(tp, &n->res, base);
799 }
800
801 #ifdef CONFIG_NET_CLS_IND
802 if (tb[TCA_U32_INDEV]) {
803 int ret;
804 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
805 if (ret < 0)
806 return -EINVAL;
807 n->ifindex = ret;
808 }
809 #endif
810 return 0;
811 }
812
813 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
814 struct tc_u_knode *n)
815 {
816 struct tc_u_knode __rcu **ins;
817 struct tc_u_knode *pins;
818 struct tc_u_hnode *ht;
819
820 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
821 ht = rtnl_dereference(tp->root);
822 else
823 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
824
825 ins = &ht->ht[TC_U32_HASH(n->handle)];
826
827 /* The node must always exist for it to be replaced if this is not the
828 * case then something went very wrong elsewhere.
829 */
830 for (pins = rtnl_dereference(*ins); ;
831 ins = &pins->next, pins = rtnl_dereference(*ins))
832 if (pins->handle == n->handle)
833 break;
834
835 idr_replace_ext(&ht->handle_idr, n, n->handle);
836 RCU_INIT_POINTER(n->next, pins->next);
837 rcu_assign_pointer(*ins, n);
838 }
839
840 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
841 struct tc_u_knode *n)
842 {
843 struct tc_u_knode *new;
844 struct tc_u32_sel *s = &n->sel;
845
846 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
847 GFP_KERNEL);
848
849 if (!new)
850 return NULL;
851
852 RCU_INIT_POINTER(new->next, n->next);
853 new->handle = n->handle;
854 RCU_INIT_POINTER(new->ht_up, n->ht_up);
855
856 #ifdef CONFIG_NET_CLS_IND
857 new->ifindex = n->ifindex;
858 #endif
859 new->fshift = n->fshift;
860 new->res = n->res;
861 new->flags = n->flags;
862 RCU_INIT_POINTER(new->ht_down, n->ht_down);
863
864 /* bump reference count as long as we hold pointer to structure */
865 if (new->ht_down)
866 new->ht_down->refcnt++;
867
868 #ifdef CONFIG_CLS_U32_PERF
869 /* Statistics may be incremented by readers during update
870 * so we must keep them in tact. When the node is later destroyed
871 * a special destroy call must be made to not free the pf memory.
872 */
873 new->pf = n->pf;
874 #endif
875
876 #ifdef CONFIG_CLS_U32_MARK
877 new->val = n->val;
878 new->mask = n->mask;
879 /* Similarly success statistics must be moved as pointers */
880 new->pcpu_success = n->pcpu_success;
881 #endif
882 new->tp = tp;
883 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
884
885 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
886 kfree(new);
887 return NULL;
888 }
889
890 return new;
891 }
892
893 static int u32_change(struct net *net, struct sk_buff *in_skb,
894 struct tcf_proto *tp, unsigned long base, u32 handle,
895 struct nlattr **tca, void **arg, bool ovr)
896 {
897 struct tc_u_common *tp_c = tp->data;
898 struct tc_u_hnode *ht;
899 struct tc_u_knode *n;
900 struct tc_u32_sel *s;
901 struct nlattr *opt = tca[TCA_OPTIONS];
902 struct nlattr *tb[TCA_U32_MAX + 1];
903 u32 htid, flags = 0;
904 int err;
905 #ifdef CONFIG_CLS_U32_PERF
906 size_t size;
907 #endif
908
909 if (opt == NULL)
910 return handle ? -EINVAL : 0;
911
912 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, NULL);
913 if (err < 0)
914 return err;
915
916 if (tb[TCA_U32_FLAGS]) {
917 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
918 if (!tc_flags_valid(flags))
919 return -EINVAL;
920 }
921
922 n = *arg;
923 if (n) {
924 struct tc_u_knode *new;
925
926 if (TC_U32_KEY(n->handle) == 0)
927 return -EINVAL;
928
929 if (n->flags != flags)
930 return -EINVAL;
931
932 new = u32_init_knode(tp, n);
933 if (!new)
934 return -ENOMEM;
935
936 err = u32_set_parms(net, tp, base,
937 rtnl_dereference(n->ht_up), new, tb,
938 tca[TCA_RATE], ovr);
939
940 if (err) {
941 u32_destroy_key(tp, new, false);
942 return err;
943 }
944
945 err = u32_replace_hw_knode(tp, new, flags);
946 if (err) {
947 u32_destroy_key(tp, new, false);
948 return err;
949 }
950
951 if (!tc_in_hw(new->flags))
952 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
953
954 u32_replace_knode(tp, tp_c, new);
955 tcf_unbind_filter(tp, &n->res);
956 tcf_exts_get_net(&n->exts);
957 call_rcu(&n->rcu, u32_delete_key_rcu);
958 return 0;
959 }
960
961 if (tb[TCA_U32_DIVISOR]) {
962 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
963
964 if (--divisor > 0x100)
965 return -EINVAL;
966 if (TC_U32_KEY(handle))
967 return -EINVAL;
968 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
969 if (ht == NULL)
970 return -ENOBUFS;
971 if (handle == 0) {
972 handle = gen_new_htid(tp->data, ht);
973 if (handle == 0) {
974 kfree(ht);
975 return -ENOMEM;
976 }
977 } else {
978 err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL,
979 handle, handle + 1, GFP_KERNEL);
980 if (err) {
981 kfree(ht);
982 return err;
983 }
984 }
985 ht->tp_c = tp_c;
986 ht->refcnt = 1;
987 ht->divisor = divisor;
988 ht->handle = handle;
989 ht->prio = tp->prio;
990 idr_init(&ht->handle_idr);
991
992 err = u32_replace_hw_hnode(tp, ht, flags);
993 if (err) {
994 idr_remove_ext(&tp_c->handle_idr, handle);
995 kfree(ht);
996 return err;
997 }
998
999 RCU_INIT_POINTER(ht->next, tp_c->hlist);
1000 rcu_assign_pointer(tp_c->hlist, ht);
1001 *arg = ht;
1002
1003 return 0;
1004 }
1005
1006 if (tb[TCA_U32_HASH]) {
1007 htid = nla_get_u32(tb[TCA_U32_HASH]);
1008 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1009 ht = rtnl_dereference(tp->root);
1010 htid = ht->handle;
1011 } else {
1012 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1013 if (ht == NULL)
1014 return -EINVAL;
1015 }
1016 } else {
1017 ht = rtnl_dereference(tp->root);
1018 htid = ht->handle;
1019 }
1020
1021 if (ht->divisor < TC_U32_HASH(htid))
1022 return -EINVAL;
1023
1024 if (handle) {
1025 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
1026 return -EINVAL;
1027 handle = htid | TC_U32_NODE(handle);
1028 err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
1029 handle, handle + 1,
1030 GFP_KERNEL);
1031 if (err)
1032 return err;
1033 } else
1034 handle = gen_new_kid(ht, htid);
1035
1036 if (tb[TCA_U32_SEL] == NULL) {
1037 err = -EINVAL;
1038 goto erridr;
1039 }
1040
1041 s = nla_data(tb[TCA_U32_SEL]);
1042
1043 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
1044 if (n == NULL) {
1045 err = -ENOBUFS;
1046 goto erridr;
1047 }
1048
1049 #ifdef CONFIG_CLS_U32_PERF
1050 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1051 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1052 if (!n->pf) {
1053 err = -ENOBUFS;
1054 goto errfree;
1055 }
1056 #endif
1057
1058 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
1059 RCU_INIT_POINTER(n->ht_up, ht);
1060 n->handle = handle;
1061 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1062 n->flags = flags;
1063 n->tp = tp;
1064
1065 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1066 if (err < 0)
1067 goto errout;
1068
1069 #ifdef CONFIG_CLS_U32_MARK
1070 n->pcpu_success = alloc_percpu(u32);
1071 if (!n->pcpu_success) {
1072 err = -ENOMEM;
1073 goto errout;
1074 }
1075
1076 if (tb[TCA_U32_MARK]) {
1077 struct tc_u32_mark *mark;
1078
1079 mark = nla_data(tb[TCA_U32_MARK]);
1080 n->val = mark->val;
1081 n->mask = mark->mask;
1082 }
1083 #endif
1084
1085 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
1086 if (err == 0) {
1087 struct tc_u_knode __rcu **ins;
1088 struct tc_u_knode *pins;
1089
1090 err = u32_replace_hw_knode(tp, n, flags);
1091 if (err)
1092 goto errhw;
1093
1094 if (!tc_in_hw(n->flags))
1095 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1096
1097 ins = &ht->ht[TC_U32_HASH(handle)];
1098 for (pins = rtnl_dereference(*ins); pins;
1099 ins = &pins->next, pins = rtnl_dereference(*ins))
1100 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1101 break;
1102
1103 RCU_INIT_POINTER(n->next, pins);
1104 rcu_assign_pointer(*ins, n);
1105 *arg = n;
1106 return 0;
1107 }
1108
1109 errhw:
1110 #ifdef CONFIG_CLS_U32_MARK
1111 free_percpu(n->pcpu_success);
1112 #endif
1113
1114 errout:
1115 tcf_exts_destroy(&n->exts);
1116 #ifdef CONFIG_CLS_U32_PERF
1117 errfree:
1118 free_percpu(n->pf);
1119 #endif
1120 kfree(n);
1121 erridr:
1122 idr_remove_ext(&ht->handle_idr, handle);
1123 return err;
1124 }
1125
1126 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1127 {
1128 struct tc_u_common *tp_c = tp->data;
1129 struct tc_u_hnode *ht;
1130 struct tc_u_knode *n;
1131 unsigned int h;
1132
1133 if (arg->stop)
1134 return;
1135
1136 for (ht = rtnl_dereference(tp_c->hlist);
1137 ht;
1138 ht = rtnl_dereference(ht->next)) {
1139 if (ht->prio != tp->prio)
1140 continue;
1141 if (arg->count >= arg->skip) {
1142 if (arg->fn(tp, ht, arg) < 0) {
1143 arg->stop = 1;
1144 return;
1145 }
1146 }
1147 arg->count++;
1148 for (h = 0; h <= ht->divisor; h++) {
1149 for (n = rtnl_dereference(ht->ht[h]);
1150 n;
1151 n = rtnl_dereference(n->next)) {
1152 if (arg->count < arg->skip) {
1153 arg->count++;
1154 continue;
1155 }
1156 if (arg->fn(tp, n, arg) < 0) {
1157 arg->stop = 1;
1158 return;
1159 }
1160 arg->count++;
1161 }
1162 }
1163 }
1164 }
1165
1166 static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
1167 {
1168 struct tc_u_knode *n = fh;
1169
1170 if (n && n->res.classid == classid)
1171 n->res.class = cl;
1172 }
1173
1174 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1175 struct sk_buff *skb, struct tcmsg *t)
1176 {
1177 struct tc_u_knode *n = fh;
1178 struct tc_u_hnode *ht_up, *ht_down;
1179 struct nlattr *nest;
1180
1181 if (n == NULL)
1182 return skb->len;
1183
1184 t->tcm_handle = n->handle;
1185
1186 nest = nla_nest_start(skb, TCA_OPTIONS);
1187 if (nest == NULL)
1188 goto nla_put_failure;
1189
1190 if (TC_U32_KEY(n->handle) == 0) {
1191 struct tc_u_hnode *ht = fh;
1192 u32 divisor = ht->divisor + 1;
1193
1194 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1195 goto nla_put_failure;
1196 } else {
1197 #ifdef CONFIG_CLS_U32_PERF
1198 struct tc_u32_pcnt *gpf;
1199 int cpu;
1200 #endif
1201
1202 if (nla_put(skb, TCA_U32_SEL,
1203 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1204 &n->sel))
1205 goto nla_put_failure;
1206
1207 ht_up = rtnl_dereference(n->ht_up);
1208 if (ht_up) {
1209 u32 htid = n->handle & 0xFFFFF000;
1210 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1211 goto nla_put_failure;
1212 }
1213 if (n->res.classid &&
1214 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1215 goto nla_put_failure;
1216
1217 ht_down = rtnl_dereference(n->ht_down);
1218 if (ht_down &&
1219 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1220 goto nla_put_failure;
1221
1222 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1223 goto nla_put_failure;
1224
1225 #ifdef CONFIG_CLS_U32_MARK
1226 if ((n->val || n->mask)) {
1227 struct tc_u32_mark mark = {.val = n->val,
1228 .mask = n->mask,
1229 .success = 0};
1230 int cpum;
1231
1232 for_each_possible_cpu(cpum) {
1233 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1234
1235 mark.success += cnt;
1236 }
1237
1238 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1239 goto nla_put_failure;
1240 }
1241 #endif
1242
1243 if (tcf_exts_dump(skb, &n->exts) < 0)
1244 goto nla_put_failure;
1245
1246 #ifdef CONFIG_NET_CLS_IND
1247 if (n->ifindex) {
1248 struct net_device *dev;
1249 dev = __dev_get_by_index(net, n->ifindex);
1250 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1251 goto nla_put_failure;
1252 }
1253 #endif
1254 #ifdef CONFIG_CLS_U32_PERF
1255 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1256 n->sel.nkeys * sizeof(u64),
1257 GFP_KERNEL);
1258 if (!gpf)
1259 goto nla_put_failure;
1260
1261 for_each_possible_cpu(cpu) {
1262 int i;
1263 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1264
1265 gpf->rcnt += pf->rcnt;
1266 gpf->rhit += pf->rhit;
1267 for (i = 0; i < n->sel.nkeys; i++)
1268 gpf->kcnts[i] += pf->kcnts[i];
1269 }
1270
1271 if (nla_put_64bit(skb, TCA_U32_PCNT,
1272 sizeof(struct tc_u32_pcnt) +
1273 n->sel.nkeys * sizeof(u64),
1274 gpf, TCA_U32_PAD)) {
1275 kfree(gpf);
1276 goto nla_put_failure;
1277 }
1278 kfree(gpf);
1279 #endif
1280 }
1281
1282 nla_nest_end(skb, nest);
1283
1284 if (TC_U32_KEY(n->handle))
1285 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1286 goto nla_put_failure;
1287 return skb->len;
1288
1289 nla_put_failure:
1290 nla_nest_cancel(skb, nest);
1291 return -1;
1292 }
1293
1294 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1295 .kind = "u32",
1296 .classify = u32_classify,
1297 .init = u32_init,
1298 .destroy = u32_destroy,
1299 .get = u32_get,
1300 .change = u32_change,
1301 .delete = u32_delete,
1302 .walk = u32_walk,
1303 .dump = u32_dump,
1304 .bind_class = u32_bind_class,
1305 .owner = THIS_MODULE,
1306 };
1307
1308 static int __init init_u32(void)
1309 {
1310 int i, ret;
1311
1312 pr_info("u32 classifier\n");
1313 #ifdef CONFIG_CLS_U32_PERF
1314 pr_info(" Performance counters on\n");
1315 #endif
1316 #ifdef CONFIG_NET_CLS_IND
1317 pr_info(" input device check on\n");
1318 #endif
1319 #ifdef CONFIG_NET_CLS_ACT
1320 pr_info(" Actions configured\n");
1321 #endif
1322 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1323 sizeof(struct hlist_head),
1324 GFP_KERNEL);
1325 if (!tc_u_common_hash)
1326 return -ENOMEM;
1327
1328 for (i = 0; i < U32_HASH_SIZE; i++)
1329 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1330
1331 ret = register_tcf_proto_ops(&cls_u32_ops);
1332 if (ret)
1333 kvfree(tc_u_common_hash);
1334 return ret;
1335 }
1336
1337 static void __exit exit_u32(void)
1338 {
1339 unregister_tcf_proto_ops(&cls_u32_ops);
1340 kvfree(tc_u_common_hash);
1341 }
1342
1343 module_init(init_u32)
1344 module_exit(exit_u32)
1345 MODULE_LICENSE("GPL");