]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/sched/cls_u32.c
Merge remote-tracking branches 'asoc/topic/cs35l32', 'asoc/topic/cs35l34', 'asoc...
[mirror_ubuntu-jammy-kernel.git] / net / sched / cls_u32.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
14 *
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
19 *
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
22 *
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
29 *
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
1da177e4 33#include <linux/module.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <linux/types.h>
36#include <linux/kernel.h>
1da177e4 37#include <linux/string.h>
1da177e4 38#include <linux/errno.h>
1ce87720 39#include <linux/percpu.h>
1da177e4 40#include <linux/rtnetlink.h>
1da177e4 41#include <linux/skbuff.h>
7801db8a 42#include <linux/bitmap.h>
3cd904ec
WC
43#include <linux/netdevice.h>
44#include <linux/hash.h>
0ba48053 45#include <net/netlink.h>
1da177e4
LT
46#include <net/act_api.h>
47#include <net/pkt_cls.h>
e7614370 48#include <linux/idr.h>
1da177e4 49
cc7ec456 50struct tc_u_knode {
1ce87720 51 struct tc_u_knode __rcu *next;
1da177e4 52 u32 handle;
1ce87720 53 struct tc_u_hnode __rcu *ht_up;
1da177e4
LT
54 struct tcf_exts exts;
55#ifdef CONFIG_NET_CLS_IND
2519a602 56 int ifindex;
1da177e4
LT
57#endif
58 u8 fshift;
59 struct tcf_result res;
1ce87720 60 struct tc_u_hnode __rcu *ht_down;
1da177e4 61#ifdef CONFIG_CLS_U32_PERF
459d5f62 62 struct tc_u32_pcnt __percpu *pf;
1da177e4 63#endif
9e8ce79c 64 u32 flags;
1da177e4 65#ifdef CONFIG_CLS_U32_MARK
459d5f62
JF
66 u32 val;
67 u32 mask;
68 u32 __percpu *pcpu_success;
1da177e4 69#endif
1ce87720 70 struct tcf_proto *tp;
c0d378ef
CW
71 union {
72 struct work_struct work;
73 struct rcu_head rcu;
74 };
4e2840ee
JF
75 /* The 'sel' field MUST be the last field in structure to allow for
76 * tc_u32_keys allocated at end of structure.
77 */
78 struct tc_u32_sel sel;
1da177e4
LT
79};
80
cc7ec456 81struct tc_u_hnode {
1ce87720 82 struct tc_u_hnode __rcu *next;
1da177e4
LT
83 u32 handle;
84 u32 prio;
85 struct tc_u_common *tp_c;
86 int refcnt;
cc7ec456 87 unsigned int divisor;
e7614370 88 struct idr handle_idr;
1ce87720 89 struct rcu_head rcu;
5778d39d
WC
90 /* The 'ht' field MUST be the last field in structure to allow for
91 * more entries allocated at end of structure.
92 */
93 struct tc_u_knode __rcu *ht[1];
1da177e4
LT
94};
95
cc7ec456 96struct tc_u_common {
1ce87720 97 struct tc_u_hnode __rcu *hlist;
7fa9d974 98 struct tcf_block *block;
1da177e4 99 int refcnt;
e7614370 100 struct idr handle_idr;
3cd904ec 101 struct hlist_node hnode;
1ce87720 102 struct rcu_head rcu;
1da177e4
LT
103};
104
cc7ec456
ED
105static inline unsigned int u32_hash_fold(__be32 key,
106 const struct tc_u32_sel *sel,
107 u8 fshift)
1da177e4 108{
cc7ec456 109 unsigned int h = ntohl(key & sel->hmask) >> fshift;
1da177e4
LT
110
111 return h;
112}
113
5a7a5555
JHS
114static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
115 struct tcf_result *res)
1da177e4
LT
116{
117 struct {
118 struct tc_u_knode *knode;
fbc2e7d9 119 unsigned int off;
1da177e4
LT
120 } stack[TC_U32_MAXDEPTH];
121
1ce87720 122 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
fbc2e7d9 123 unsigned int off = skb_network_offset(skb);
1da177e4
LT
124 struct tc_u_knode *n;
125 int sdepth = 0;
126 int off2 = 0;
127 int sel = 0;
128#ifdef CONFIG_CLS_U32_PERF
129 int j;
130#endif
131 int i, r;
132
133next_ht:
1ce87720 134 n = rcu_dereference_bh(ht->ht[sel]);
1da177e4
LT
135
136next_knode:
137 if (n) {
138 struct tc_u32_key *key = n->sel.keys;
139
140#ifdef CONFIG_CLS_U32_PERF
459d5f62 141 __this_cpu_inc(n->pf->rcnt);
1da177e4
LT
142 j = 0;
143#endif
144
d34e3e18
SS
145 if (tc_skip_sw(n->flags)) {
146 n = rcu_dereference_bh(n->next);
147 goto next_knode;
148 }
149
1da177e4 150#ifdef CONFIG_CLS_U32_MARK
459d5f62 151 if ((skb->mark & n->mask) != n->val) {
1ce87720 152 n = rcu_dereference_bh(n->next);
1da177e4
LT
153 goto next_knode;
154 } else {
459d5f62 155 __this_cpu_inc(*n->pcpu_success);
1da177e4
LT
156 }
157#endif
158
cc7ec456 159 for (i = n->sel.nkeys; i > 0; i--, key++) {
66d50d25 160 int toff = off + key->off + (off2 & key->offmask);
86fce3ba 161 __be32 *data, hdata;
fbc2e7d9 162
4e18b3ed 163 if (skb_headroom(skb) + toff > INT_MAX)
66d50d25 164 goto out;
165
86fce3ba 166 data = skb_header_pointer(skb, toff, 4, &hdata);
fbc2e7d9
CG
167 if (!data)
168 goto out;
169 if ((*data ^ key->val) & key->mask) {
1ce87720 170 n = rcu_dereference_bh(n->next);
1da177e4
LT
171 goto next_knode;
172 }
173#ifdef CONFIG_CLS_U32_PERF
459d5f62 174 __this_cpu_inc(n->pf->kcnts[j]);
1da177e4
LT
175 j++;
176#endif
177 }
1ce87720
JF
178
179 ht = rcu_dereference_bh(n->ht_down);
180 if (!ht) {
1da177e4 181check_terminal:
cc7ec456 182 if (n->sel.flags & TC_U32_TERMINAL) {
1da177e4
LT
183
184 *res = n->res;
185#ifdef CONFIG_NET_CLS_IND
2519a602 186 if (!tcf_match_indev(skb, n->ifindex)) {
1ce87720 187 n = rcu_dereference_bh(n->next);
1da177e4
LT
188 goto next_knode;
189 }
190#endif
191#ifdef CONFIG_CLS_U32_PERF
459d5f62 192 __this_cpu_inc(n->pf->rhit);
1da177e4
LT
193#endif
194 r = tcf_exts_exec(skb, &n->exts, res);
195 if (r < 0) {
1ce87720 196 n = rcu_dereference_bh(n->next);
1da177e4
LT
197 goto next_knode;
198 }
199
200 return r;
201 }
1ce87720 202 n = rcu_dereference_bh(n->next);
1da177e4
LT
203 goto next_knode;
204 }
205
206 /* PUSH */
207 if (sdepth >= TC_U32_MAXDEPTH)
208 goto deadloop;
209 stack[sdepth].knode = n;
fbc2e7d9 210 stack[sdepth].off = off;
1da177e4
LT
211 sdepth++;
212
1ce87720 213 ht = rcu_dereference_bh(n->ht_down);
1da177e4 214 sel = 0;
fbc2e7d9 215 if (ht->divisor) {
86fce3ba 216 __be32 *data, hdata;
fbc2e7d9
CG
217
218 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
86fce3ba 219 &hdata);
fbc2e7d9
CG
220 if (!data)
221 goto out;
222 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
223 n->fshift);
224 }
cc7ec456 225 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
1da177e4
LT
226 goto next_ht;
227
cc7ec456 228 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
1da177e4 229 off2 = n->sel.off + 3;
fbc2e7d9 230 if (n->sel.flags & TC_U32_VAROFFSET) {
86fce3ba 231 __be16 *data, hdata;
fbc2e7d9
CG
232
233 data = skb_header_pointer(skb,
234 off + n->sel.offoff,
86fce3ba 235 2, &hdata);
fbc2e7d9
CG
236 if (!data)
237 goto out;
238 off2 += ntohs(n->sel.offmask & *data) >>
239 n->sel.offshift;
240 }
1da177e4
LT
241 off2 &= ~3;
242 }
cc7ec456 243 if (n->sel.flags & TC_U32_EAT) {
fbc2e7d9 244 off += off2;
1da177e4
LT
245 off2 = 0;
246 }
247
fbc2e7d9 248 if (off < skb->len)
1da177e4
LT
249 goto next_ht;
250 }
251
252 /* POP */
253 if (sdepth--) {
254 n = stack[sdepth].knode;
1ce87720 255 ht = rcu_dereference_bh(n->ht_up);
fbc2e7d9 256 off = stack[sdepth].off;
1da177e4
LT
257 goto check_terminal;
258 }
fbc2e7d9 259out:
1da177e4
LT
260 return -1;
261
262deadloop:
e87cc472 263 net_warn_ratelimited("cls_u32: dead loop\n");
1da177e4
LT
264 return -1;
265}
266
5a7a5555 267static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
1da177e4
LT
268{
269 struct tc_u_hnode *ht;
270
1ce87720
JF
271 for (ht = rtnl_dereference(tp_c->hlist);
272 ht;
273 ht = rtnl_dereference(ht->next))
1da177e4
LT
274 if (ht->handle == handle)
275 break;
276
277 return ht;
278}
279
5a7a5555 280static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
1da177e4 281{
cc7ec456 282 unsigned int sel;
1da177e4
LT
283 struct tc_u_knode *n = NULL;
284
285 sel = TC_U32_HASH(handle);
286 if (sel > ht->divisor)
287 goto out;
288
1ce87720
JF
289 for (n = rtnl_dereference(ht->ht[sel]);
290 n;
291 n = rtnl_dereference(n->next))
1da177e4
LT
292 if (n->handle == handle)
293 break;
294out:
295 return n;
296}
297
298
8113c095 299static void *u32_get(struct tcf_proto *tp, u32 handle)
1da177e4
LT
300{
301 struct tc_u_hnode *ht;
302 struct tc_u_common *tp_c = tp->data;
303
304 if (TC_U32_HTID(handle) == TC_U32_ROOT)
1ce87720 305 ht = rtnl_dereference(tp->root);
1da177e4
LT
306 else
307 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
308
309 if (!ht)
8113c095 310 return NULL;
1da177e4
LT
311
312 if (TC_U32_KEY(handle) == 0)
8113c095 313 return ht;
1da177e4 314
8113c095 315 return u32_lookup_key(ht, handle);
1da177e4
LT
316}
317
e7614370 318static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
1da177e4 319{
e7614370
CW
320 unsigned long idr_index;
321 int err;
1da177e4 322
e7614370 323 /* This is only used inside rtnl lock it is safe to increment
1ce87720
JF
324 * without read _copy_ update semantics
325 */
e7614370
CW
326 err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index,
327 1, 0x7FF, GFP_KERNEL);
328 if (err)
329 return 0;
330 return (u32)(idr_index | 0x800) << 20;
1da177e4
LT
331}
332
3cd904ec
WC
333static struct hlist_head *tc_u_common_hash;
334
335#define U32_HASH_SHIFT 10
336#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
337
338static unsigned int tc_u_hash(const struct tcf_proto *tp)
339{
d18b4b35 340 return hash_ptr(tp->chain->block, U32_HASH_SHIFT);
3cd904ec
WC
341}
342
343static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
344{
345 struct tc_u_common *tc;
346 unsigned int h;
347
348 h = tc_u_hash(tp);
349 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
7fa9d974 350 if (tc->block == tp->chain->block)
3cd904ec
WC
351 return tc;
352 }
353 return NULL;
354}
355
1da177e4
LT
356static int u32_init(struct tcf_proto *tp)
357{
358 struct tc_u_hnode *root_ht;
359 struct tc_u_common *tp_c;
3cd904ec 360 unsigned int h;
1da177e4 361
3cd904ec 362 tp_c = tc_u_common_find(tp);
1da177e4 363
0da974f4 364 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
1da177e4
LT
365 if (root_ht == NULL)
366 return -ENOBUFS;
367
1da177e4 368 root_ht->refcnt++;
e7614370 369 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
1da177e4 370 root_ht->prio = tp->prio;
e7614370 371 idr_init(&root_ht->handle_idr);
1da177e4
LT
372
373 if (tp_c == NULL) {
0da974f4 374 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
1da177e4
LT
375 if (tp_c == NULL) {
376 kfree(root_ht);
377 return -ENOBUFS;
378 }
7fa9d974 379 tp_c->block = tp->chain->block;
3cd904ec 380 INIT_HLIST_NODE(&tp_c->hnode);
e7614370 381 idr_init(&tp_c->handle_idr);
3cd904ec
WC
382
383 h = tc_u_hash(tp);
384 hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
1da177e4
LT
385 }
386
387 tp_c->refcnt++;
1ce87720
JF
388 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
389 rcu_assign_pointer(tp_c->hlist, root_ht);
1da177e4
LT
390 root_ht->tp_c = tp_c;
391
1ce87720 392 rcu_assign_pointer(tp->root, root_ht);
1da177e4
LT
393 tp->data = tp_c;
394 return 0;
395}
396
5a7a5555 397static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
de5df632 398 bool free_pf)
1da177e4 399{
18d0264f 400 tcf_exts_destroy(&n->exts);
35c55fc1 401 tcf_exts_put_net(&n->exts);
1da177e4
LT
402 if (n->ht_down)
403 n->ht_down->refcnt--;
404#ifdef CONFIG_CLS_U32_PERF
de5df632
JF
405 if (free_pf)
406 free_percpu(n->pf);
a1ddcfee
JF
407#endif
408#ifdef CONFIG_CLS_U32_MARK
de5df632
JF
409 if (free_pf)
410 free_percpu(n->pcpu_success);
1da177e4
LT
411#endif
412 kfree(n);
413 return 0;
414}
415
de5df632
JF
416/* u32_delete_key_rcu should be called when free'ing a copied
417 * version of a tc_u_knode obtained from u32_init_knode(). When
418 * copies are obtained from u32_init_knode() the statistics are
419 * shared between the old and new copies to allow readers to
420 * continue to update the statistics during the copy. To support
421 * this the u32_delete_key_rcu variant does not free the percpu
422 * statistics.
423 */
c0d378ef
CW
424static void u32_delete_key_work(struct work_struct *work)
425{
426 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
427
428 rtnl_lock();
429 u32_destroy_key(key->tp, key, false);
430 rtnl_unlock();
431}
432
1ce87720
JF
433static void u32_delete_key_rcu(struct rcu_head *rcu)
434{
435 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
436
c0d378ef
CW
437 INIT_WORK(&key->work, u32_delete_key_work);
438 tcf_queue_work(&key->work);
de5df632
JF
439}
440
441/* u32_delete_key_freepf_rcu is the rcu callback variant
442 * that free's the entire structure including the statistics
443 * percpu variables. Only use this if the key is not a copy
444 * returned by u32_init_knode(). See u32_delete_key_rcu()
445 * for the variant that should be used with keys return from
446 * u32_init_knode()
447 */
c0d378ef
CW
448static void u32_delete_key_freepf_work(struct work_struct *work)
449{
450 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
451
452 rtnl_lock();
453 u32_destroy_key(key->tp, key, true);
454 rtnl_unlock();
455}
456
de5df632
JF
457static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
458{
459 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
460
c0d378ef
CW
461 INIT_WORK(&key->work, u32_delete_key_freepf_work);
462 tcf_queue_work(&key->work);
1ce87720
JF
463}
464
82d567c2 465static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
1da177e4 466{
1ce87720
JF
467 struct tc_u_knode __rcu **kp;
468 struct tc_u_knode *pkp;
a96366bf 469 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
1da177e4
LT
470
471 if (ht) {
1ce87720
JF
472 kp = &ht->ht[TC_U32_HASH(key->handle)];
473 for (pkp = rtnl_dereference(*kp); pkp;
474 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
475 if (pkp == key) {
476 RCU_INIT_POINTER(*kp, key->next);
1da177e4 477
a0efb80c 478 tcf_unbind_filter(tp, &key->res);
35c55fc1 479 tcf_exts_get_net(&key->exts);
de5df632 480 call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
1da177e4
LT
481 return 0;
482 }
483 }
484 }
547b792c 485 WARN_ON(1);
1da177e4
LT
486 return 0;
487}
488
77460411 489static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
a1b7c5fd 490{
245dc512 491 struct tcf_block *block = tp->chain->block;
de4784ca 492 struct tc_cls_u32_offload cls_u32 = {};
a1b7c5fd 493
de4784ca 494 tc_cls_common_offload_init(&cls_u32.common, tp);
77460411
JP
495 cls_u32.command = TC_CLSU32_DELETE_HNODE;
496 cls_u32.hnode.divisor = h->divisor;
497 cls_u32.hnode.handle = h->handle;
498 cls_u32.hnode.prio = h->prio;
de4784ca 499
245dc512 500 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
a1b7c5fd
JF
501}
502
5a7a5555
JHS
503static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
504 u32 flags)
a1b7c5fd 505{
245dc512 506 struct tcf_block *block = tp->chain->block;
de4784ca 507 struct tc_cls_u32_offload cls_u32 = {};
245dc512
JP
508 bool skip_sw = tc_skip_sw(flags);
509 bool offloaded = false;
d34e3e18 510 int err;
a1b7c5fd 511
de4784ca
JP
512 tc_cls_common_offload_init(&cls_u32.common, tp);
513 cls_u32.command = TC_CLSU32_NEW_HNODE;
514 cls_u32.hnode.divisor = h->divisor;
515 cls_u32.hnode.handle = h->handle;
516 cls_u32.hnode.prio = h->prio;
a1b7c5fd 517
245dc512
JP
518 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
519 if (err < 0) {
520 u32_clear_hw_hnode(tp, h);
d47a0f38 521 return err;
245dc512
JP
522 } else if (err > 0) {
523 offloaded = true;
524 }
525
526 if (skip_sw && !offloaded)
527 return -EINVAL;
d34e3e18
SS
528
529 return 0;
a1b7c5fd
JF
530}
531
77460411 532static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
a1b7c5fd 533{
245dc512 534 struct tcf_block *block = tp->chain->block;
de4784ca 535 struct tc_cls_u32_offload cls_u32 = {};
a1b7c5fd 536
de4784ca 537 tc_cls_common_offload_init(&cls_u32.common, tp);
77460411
JP
538 cls_u32.command = TC_CLSU32_DELETE_KNODE;
539 cls_u32.knode.handle = handle;
a1b7c5fd 540
245dc512 541 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
a1b7c5fd
JF
542}
543
5a7a5555
JHS
544static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
545 u32 flags)
a1b7c5fd 546{
245dc512 547 struct tcf_block *block = tp->chain->block;
de4784ca 548 struct tc_cls_u32_offload cls_u32 = {};
245dc512 549 bool skip_sw = tc_skip_sw(flags);
d34e3e18 550 int err;
a1b7c5fd 551
de4784ca
JP
552 tc_cls_common_offload_init(&cls_u32.common, tp);
553 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
554 cls_u32.knode.handle = n->handle;
555 cls_u32.knode.fshift = n->fshift;
a1b7c5fd 556#ifdef CONFIG_CLS_U32_MARK
de4784ca
JP
557 cls_u32.knode.val = n->val;
558 cls_u32.knode.mask = n->mask;
a1b7c5fd 559#else
de4784ca
JP
560 cls_u32.knode.val = 0;
561 cls_u32.knode.mask = 0;
a1b7c5fd 562#endif
de4784ca
JP
563 cls_u32.knode.sel = &n->sel;
564 cls_u32.knode.exts = &n->exts;
201c44bd 565 if (n->ht_down)
de4784ca 566 cls_u32.knode.link_handle = n->ht_down->handle;
201c44bd 567
245dc512
JP
568 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
569 if (err < 0) {
570 u32_remove_hw_knode(tp, n->handle);
201c44bd 571 return err;
245dc512
JP
572 } else if (err > 0) {
573 n->flags |= TCA_CLS_FLAGS_IN_HW;
574 }
575
0f04d057 576 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
245dc512 577 return -EINVAL;
d34e3e18
SS
578
579 return 0;
a1b7c5fd
JF
580}
581
a0efb80c 582static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
1da177e4
LT
583{
584 struct tc_u_knode *n;
cc7ec456 585 unsigned int h;
1da177e4 586
cc7ec456 587 for (h = 0; h <= ht->divisor; h++) {
1ce87720
JF
588 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
589 RCU_INIT_POINTER(ht->ht[h],
590 rtnl_dereference(n->next));
a0efb80c 591 tcf_unbind_filter(tp, &n->res);
a1b7c5fd 592 u32_remove_hw_knode(tp, n->handle);
e7614370 593 idr_remove_ext(&ht->handle_idr, n->handle);
35c55fc1
CW
594 if (tcf_exts_get_net(&n->exts))
595 call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
596 else
597 u32_destroy_key(n->tp, n, true);
1da177e4
LT
598 }
599 }
600}
601
602static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
603{
604 struct tc_u_common *tp_c = tp->data;
1ce87720
JF
605 struct tc_u_hnode __rcu **hn;
606 struct tc_u_hnode *phn;
1da177e4 607
547b792c 608 WARN_ON(ht->refcnt);
1da177e4 609
a0efb80c 610 u32_clear_hnode(tp, ht);
1da177e4 611
1ce87720
JF
612 hn = &tp_c->hlist;
613 for (phn = rtnl_dereference(*hn);
614 phn;
615 hn = &phn->next, phn = rtnl_dereference(*hn)) {
616 if (phn == ht) {
a1b7c5fd 617 u32_clear_hw_hnode(tp, ht);
e7614370
CW
618 idr_destroy(&ht->handle_idr);
619 idr_remove_ext(&tp_c->handle_idr, ht->handle);
1ce87720
JF
620 RCU_INIT_POINTER(*hn, ht->next);
621 kfree_rcu(ht, rcu);
1da177e4
LT
622 return 0;
623 }
624 }
625
1da177e4
LT
626 return -ENOENT;
627}
628
1e052be6
CW
629static bool ht_empty(struct tc_u_hnode *ht)
630{
631 unsigned int h;
632
633 for (h = 0; h <= ht->divisor; h++)
634 if (rcu_access_pointer(ht->ht[h]))
635 return false;
636
637 return true;
638}
639
763dbf63 640static void u32_destroy(struct tcf_proto *tp)
1da177e4
LT
641{
642 struct tc_u_common *tp_c = tp->data;
1ce87720 643 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
1da177e4 644
547b792c 645 WARN_ON(root_ht == NULL);
1da177e4
LT
646
647 if (root_ht && --root_ht->refcnt == 0)
648 u32_destroy_hnode(tp, root_ht);
649
650 if (--tp_c->refcnt == 0) {
651 struct tc_u_hnode *ht;
1da177e4 652
3cd904ec 653 hlist_del(&tp_c->hnode);
1da177e4 654
1ce87720
JF
655 for (ht = rtnl_dereference(tp_c->hlist);
656 ht;
657 ht = rtnl_dereference(ht->next)) {
e56cfad1 658 ht->refcnt--;
a0efb80c 659 u32_clear_hnode(tp, ht);
e56cfad1 660 }
1da177e4 661
1ce87720
JF
662 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
663 RCU_INIT_POINTER(tp_c->hlist, ht->next);
664 kfree_rcu(ht, rcu);
3ff50b79 665 }
1da177e4 666
e7614370 667 idr_destroy(&tp_c->handle_idr);
1da177e4
LT
668 kfree(tp_c);
669 }
670
671 tp->data = NULL;
672}
673
8113c095 674static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
1da177e4 675{
8113c095 676 struct tc_u_hnode *ht = arg;
1ce87720 677 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
763dbf63
WC
678 struct tc_u_common *tp_c = tp->data;
679 int ret = 0;
1da177e4
LT
680
681 if (ht == NULL)
763dbf63 682 goto out;
1da177e4 683
a1b7c5fd
JF
684 if (TC_U32_KEY(ht->handle)) {
685 u32_remove_hw_knode(tp, ht->handle);
763dbf63
WC
686 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
687 goto out;
a1b7c5fd 688 }
1da177e4 689
1ce87720 690 if (root_ht == ht)
1da177e4
LT
691 return -EINVAL;
692
e56cfad1
JP
693 if (ht->refcnt == 1) {
694 ht->refcnt--;
1da177e4 695 u32_destroy_hnode(tp, ht);
e56cfad1
JP
696 } else {
697 return -EBUSY;
698 }
1da177e4 699
763dbf63
WC
700out:
701 *last = true;
702 if (root_ht) {
703 if (root_ht->refcnt > 1) {
704 *last = false;
705 goto ret;
706 }
707 if (root_ht->refcnt == 1) {
708 if (!ht_empty(root_ht)) {
709 *last = false;
710 goto ret;
711 }
712 }
713 }
714
715 if (tp_c->refcnt > 1) {
716 *last = false;
717 goto ret;
718 }
719
720 if (tp_c->refcnt == 1) {
721 struct tc_u_hnode *ht;
722
723 for (ht = rtnl_dereference(tp_c->hlist);
724 ht;
725 ht = rtnl_dereference(ht->next))
726 if (!ht_empty(ht)) {
727 *last = false;
728 break;
729 }
730 }
731
732ret:
733 return ret;
1da177e4
LT
734}
735
e7614370 736static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
1da177e4 737{
e7614370
CW
738 unsigned long idr_index;
739 u32 start = htid | 0x800;
740 u32 max = htid | 0xFFF;
741 u32 min = htid;
742
743 if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
744 start, max + 1, GFP_KERNEL)) {
745 if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
746 min + 1, max + 1, GFP_KERNEL))
747 return max;
748 }
7801db8a 749
e7614370 750 return (u32)idr_index;
1da177e4
LT
751}
752
6fa8c014
PM
753static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
754 [TCA_U32_CLASSID] = { .type = NLA_U32 },
755 [TCA_U32_HASH] = { .type = NLA_U32 },
756 [TCA_U32_LINK] = { .type = NLA_U32 },
757 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
758 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
759 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
760 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
9e8ce79c 761 [TCA_U32_FLAGS] = { .type = NLA_U32 },
6fa8c014
PM
762};
763
c1b52739
BL
764static int u32_set_parms(struct net *net, struct tcf_proto *tp,
765 unsigned long base, struct tc_u_hnode *ht,
add93b61 766 struct tc_u_knode *n, struct nlattr **tb,
2f7ef2f8 767 struct nlattr *est, bool ovr)
1da177e4 768{
b9a24bb7 769 int err;
1da177e4 770
705c7091 771 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr);
1da177e4
LT
772 if (err < 0)
773 return err;
774
add93b61 775 if (tb[TCA_U32_LINK]) {
1587bac4 776 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
47a1a1d4 777 struct tc_u_hnode *ht_down = NULL, *ht_old;
1da177e4
LT
778
779 if (TC_U32_KEY(handle))
705c7091 780 return -EINVAL;
1da177e4
LT
781
782 if (handle) {
783 ht_down = u32_lookup_ht(ht->tp_c, handle);
784
785 if (ht_down == NULL)
705c7091 786 return -EINVAL;
1da177e4
LT
787 ht_down->refcnt++;
788 }
789
1ce87720
JF
790 ht_old = rtnl_dereference(n->ht_down);
791 rcu_assign_pointer(n->ht_down, ht_down);
1da177e4 792
47a1a1d4
PM
793 if (ht_old)
794 ht_old->refcnt--;
1da177e4 795 }
add93b61 796 if (tb[TCA_U32_CLASSID]) {
1587bac4 797 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
1da177e4
LT
798 tcf_bind_filter(tp, &n->res, base);
799 }
800
801#ifdef CONFIG_NET_CLS_IND
add93b61 802 if (tb[TCA_U32_INDEV]) {
2519a602
WC
803 int ret;
804 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
805 if (ret < 0)
705c7091 806 return -EINVAL;
2519a602 807 n->ifindex = ret;
1da177e4
LT
808 }
809#endif
1da177e4 810 return 0;
1da177e4
LT
811}
812
5a7a5555 813static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
de5df632
JF
814 struct tc_u_knode *n)
815{
816 struct tc_u_knode __rcu **ins;
817 struct tc_u_knode *pins;
818 struct tc_u_hnode *ht;
819
820 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
821 ht = rtnl_dereference(tp->root);
822 else
823 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
824
825 ins = &ht->ht[TC_U32_HASH(n->handle)];
826
827 /* The node must always exist for it to be replaced if this is not the
828 * case then something went very wrong elsewhere.
829 */
830 for (pins = rtnl_dereference(*ins); ;
831 ins = &pins->next, pins = rtnl_dereference(*ins))
832 if (pins->handle == n->handle)
833 break;
834
e7614370 835 idr_replace_ext(&ht->handle_idr, n, n->handle);
de5df632
JF
836 RCU_INIT_POINTER(n->next, pins->next);
837 rcu_assign_pointer(*ins, n);
838}
839
840static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
841 struct tc_u_knode *n)
842{
843 struct tc_u_knode *new;
844 struct tc_u32_sel *s = &n->sel;
845
846 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
847 GFP_KERNEL);
848
849 if (!new)
850 return NULL;
851
852 RCU_INIT_POINTER(new->next, n->next);
853 new->handle = n->handle;
854 RCU_INIT_POINTER(new->ht_up, n->ht_up);
855
856#ifdef CONFIG_NET_CLS_IND
857 new->ifindex = n->ifindex;
858#endif
859 new->fshift = n->fshift;
860 new->res = n->res;
9e8ce79c 861 new->flags = n->flags;
de5df632
JF
862 RCU_INIT_POINTER(new->ht_down, n->ht_down);
863
864 /* bump reference count as long as we hold pointer to structure */
865 if (new->ht_down)
866 new->ht_down->refcnt++;
867
868#ifdef CONFIG_CLS_U32_PERF
869 /* Statistics may be incremented by readers during update
870 * so we must keep them in tact. When the node is later destroyed
871 * a special destroy call must be made to not free the pf memory.
872 */
873 new->pf = n->pf;
874#endif
875
876#ifdef CONFIG_CLS_U32_MARK
877 new->val = n->val;
878 new->mask = n->mask;
879 /* Similarly success statistics must be moved as pointers */
880 new->pcpu_success = n->pcpu_success;
881#endif
882 new->tp = tp;
883 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
884
b9a24bb7
WC
885 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
886 kfree(new);
887 return NULL;
888 }
de5df632
JF
889
890 return new;
891}
892
c1b52739 893static int u32_change(struct net *net, struct sk_buff *in_skb,
af4c6641 894 struct tcf_proto *tp, unsigned long base, u32 handle,
8113c095 895 struct nlattr **tca, void **arg, bool ovr)
1da177e4
LT
896{
897 struct tc_u_common *tp_c = tp->data;
898 struct tc_u_hnode *ht;
899 struct tc_u_knode *n;
900 struct tc_u32_sel *s;
add93b61
PM
901 struct nlattr *opt = tca[TCA_OPTIONS];
902 struct nlattr *tb[TCA_U32_MAX + 1];
9e8ce79c 903 u32 htid, flags = 0;
1da177e4 904 int err;
459d5f62
JF
905#ifdef CONFIG_CLS_U32_PERF
906 size_t size;
907#endif
1da177e4
LT
908
909 if (opt == NULL)
910 return handle ? -EINVAL : 0;
911
fceb6435 912 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, NULL);
cee63723
PM
913 if (err < 0)
914 return err;
1da177e4 915
d34e3e18 916 if (tb[TCA_U32_FLAGS]) {
9e8ce79c 917 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
d34e3e18 918 if (!tc_flags_valid(flags))
1a0f7d29 919 return -EINVAL;
d34e3e18 920 }
9e8ce79c 921
8113c095 922 n = *arg;
cc7ec456 923 if (n) {
de5df632
JF
924 struct tc_u_knode *new;
925
1da177e4
LT
926 if (TC_U32_KEY(n->handle) == 0)
927 return -EINVAL;
928
9e8ce79c
JF
929 if (n->flags != flags)
930 return -EINVAL;
931
de5df632
JF
932 new = u32_init_knode(tp, n);
933 if (!new)
934 return -ENOMEM;
935
936 err = u32_set_parms(net, tp, base,
937 rtnl_dereference(n->ht_up), new, tb,
938 tca[TCA_RATE], ovr);
939
940 if (err) {
941 u32_destroy_key(tp, new, false);
942 return err;
943 }
944
d34e3e18
SS
945 err = u32_replace_hw_knode(tp, new, flags);
946 if (err) {
947 u32_destroy_key(tp, new, false);
948 return err;
949 }
950
24d3dc6d
OG
951 if (!tc_in_hw(new->flags))
952 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
953
de5df632 954 u32_replace_knode(tp, tp_c, new);
a0efb80c 955 tcf_unbind_filter(tp, &n->res);
35c55fc1 956 tcf_exts_get_net(&n->exts);
de5df632
JF
957 call_rcu(&n->rcu, u32_delete_key_rcu);
958 return 0;
1da177e4
LT
959 }
960
add93b61 961 if (tb[TCA_U32_DIVISOR]) {
cc7ec456 962 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
1da177e4
LT
963
964 if (--divisor > 0x100)
965 return -EINVAL;
966 if (TC_U32_KEY(handle))
967 return -EINVAL;
cc7ec456 968 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
1da177e4
LT
969 if (ht == NULL)
970 return -ENOBUFS;
e7614370
CW
971 if (handle == 0) {
972 handle = gen_new_htid(tp->data, ht);
973 if (handle == 0) {
974 kfree(ht);
975 return -ENOMEM;
976 }
977 } else {
978 err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL,
979 handle, handle + 1, GFP_KERNEL);
980 if (err) {
981 kfree(ht);
982 return err;
983 }
984 }
1da177e4 985 ht->tp_c = tp_c;
e56cfad1 986 ht->refcnt = 1;
1da177e4
LT
987 ht->divisor = divisor;
988 ht->handle = handle;
989 ht->prio = tp->prio;
e7614370 990 idr_init(&ht->handle_idr);
6eef3801
JK
991
992 err = u32_replace_hw_hnode(tp, ht, flags);
993 if (err) {
e7614370 994 idr_remove_ext(&tp_c->handle_idr, handle);
6eef3801
JK
995 kfree(ht);
996 return err;
997 }
998
1ce87720
JF
999 RCU_INIT_POINTER(ht->next, tp_c->hlist);
1000 rcu_assign_pointer(tp_c->hlist, ht);
8113c095 1001 *arg = ht;
a1b7c5fd 1002
1da177e4
LT
1003 return 0;
1004 }
1005
add93b61 1006 if (tb[TCA_U32_HASH]) {
1587bac4 1007 htid = nla_get_u32(tb[TCA_U32_HASH]);
1da177e4 1008 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1ce87720 1009 ht = rtnl_dereference(tp->root);
1da177e4
LT
1010 htid = ht->handle;
1011 } else {
1012 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1013 if (ht == NULL)
1014 return -EINVAL;
1015 }
1016 } else {
1ce87720 1017 ht = rtnl_dereference(tp->root);
1da177e4
LT
1018 htid = ht->handle;
1019 }
1020
1021 if (ht->divisor < TC_U32_HASH(htid))
1022 return -EINVAL;
1023
1024 if (handle) {
1025 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
1026 return -EINVAL;
1027 handle = htid | TC_U32_NODE(handle);
e7614370
CW
1028 err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
1029 handle, handle + 1,
1030 GFP_KERNEL);
1031 if (err)
1032 return err;
1da177e4
LT
1033 } else
1034 handle = gen_new_kid(ht, htid);
1035
e7614370
CW
1036 if (tb[TCA_U32_SEL] == NULL) {
1037 err = -EINVAL;
1038 goto erridr;
1039 }
1da177e4 1040
add93b61 1041 s = nla_data(tb[TCA_U32_SEL]);
1da177e4 1042
0da974f4 1043 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
e7614370
CW
1044 if (n == NULL) {
1045 err = -ENOBUFS;
1046 goto erridr;
1047 }
1da177e4 1048
1da177e4 1049#ifdef CONFIG_CLS_U32_PERF
459d5f62
JF
1050 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1051 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1052 if (!n->pf) {
e7614370
CW
1053 err = -ENOBUFS;
1054 goto errfree;
1da177e4 1055 }
1da177e4
LT
1056#endif
1057
1058 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
a96366bf 1059 RCU_INIT_POINTER(n->ht_up, ht);
1da177e4 1060 n->handle = handle;
b2268016 1061 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
9e8ce79c 1062 n->flags = flags;
1ce87720 1063 n->tp = tp;
1da177e4 1064
b9a24bb7
WC
1065 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1066 if (err < 0)
1067 goto errout;
1068
1da177e4 1069#ifdef CONFIG_CLS_U32_MARK
459d5f62 1070 n->pcpu_success = alloc_percpu(u32);
a1ddcfee
JF
1071 if (!n->pcpu_success) {
1072 err = -ENOMEM;
1073 goto errout;
1074 }
459d5f62 1075
add93b61 1076 if (tb[TCA_U32_MARK]) {
1da177e4
LT
1077 struct tc_u32_mark *mark;
1078
add93b61 1079 mark = nla_data(tb[TCA_U32_MARK]);
459d5f62
JF
1080 n->val = mark->val;
1081 n->mask = mark->mask;
1da177e4
LT
1082 }
1083#endif
1084
2f7ef2f8 1085 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
1da177e4 1086 if (err == 0) {
1ce87720
JF
1087 struct tc_u_knode __rcu **ins;
1088 struct tc_u_knode *pins;
1089
d34e3e18
SS
1090 err = u32_replace_hw_knode(tp, n, flags);
1091 if (err)
1092 goto errhw;
1093
24d3dc6d
OG
1094 if (!tc_in_hw(n->flags))
1095 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1096
1ce87720
JF
1097 ins = &ht->ht[TC_U32_HASH(handle)];
1098 for (pins = rtnl_dereference(*ins); pins;
1099 ins = &pins->next, pins = rtnl_dereference(*ins))
1100 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1da177e4
LT
1101 break;
1102
1ce87720
JF
1103 RCU_INIT_POINTER(n->next, pins);
1104 rcu_assign_pointer(*ins, n);
8113c095 1105 *arg = n;
1da177e4
LT
1106 return 0;
1107 }
a1ddcfee 1108
d34e3e18 1109errhw:
a1ddcfee
JF
1110#ifdef CONFIG_CLS_U32_MARK
1111 free_percpu(n->pcpu_success);
1112#endif
1113
b9a24bb7
WC
1114errout:
1115 tcf_exts_destroy(&n->exts);
1da177e4 1116#ifdef CONFIG_CLS_U32_PERF
e7614370 1117errfree:
1ce87720 1118 free_percpu(n->pf);
1da177e4
LT
1119#endif
1120 kfree(n);
e7614370
CW
1121erridr:
1122 idr_remove_ext(&ht->handle_idr, handle);
1da177e4
LT
1123 return err;
1124}
1125
1126static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1127{
1128 struct tc_u_common *tp_c = tp->data;
1129 struct tc_u_hnode *ht;
1130 struct tc_u_knode *n;
cc7ec456 1131 unsigned int h;
1da177e4
LT
1132
1133 if (arg->stop)
1134 return;
1135
1ce87720
JF
1136 for (ht = rtnl_dereference(tp_c->hlist);
1137 ht;
1138 ht = rtnl_dereference(ht->next)) {
1da177e4
LT
1139 if (ht->prio != tp->prio)
1140 continue;
1141 if (arg->count >= arg->skip) {
8113c095 1142 if (arg->fn(tp, ht, arg) < 0) {
1da177e4
LT
1143 arg->stop = 1;
1144 return;
1145 }
1146 }
1147 arg->count++;
1148 for (h = 0; h <= ht->divisor; h++) {
1ce87720
JF
1149 for (n = rtnl_dereference(ht->ht[h]);
1150 n;
1151 n = rtnl_dereference(n->next)) {
1da177e4
LT
1152 if (arg->count < arg->skip) {
1153 arg->count++;
1154 continue;
1155 }
8113c095 1156 if (arg->fn(tp, n, arg) < 0) {
1da177e4
LT
1157 arg->stop = 1;
1158 return;
1159 }
1160 arg->count++;
1161 }
1162 }
1163 }
1164}
1165
07d79fc7
CW
1166static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
1167{
1168 struct tc_u_knode *n = fh;
1169
1170 if (n && n->res.classid == classid)
1171 n->res.class = cl;
1172}
1173
8113c095 1174static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
5a7a5555 1175 struct sk_buff *skb, struct tcmsg *t)
1da177e4 1176{
8113c095 1177 struct tc_u_knode *n = fh;
1ce87720 1178 struct tc_u_hnode *ht_up, *ht_down;
4b3550ef 1179 struct nlattr *nest;
1da177e4
LT
1180
1181 if (n == NULL)
1182 return skb->len;
1183
1184 t->tcm_handle = n->handle;
1185
4b3550ef
PM
1186 nest = nla_nest_start(skb, TCA_OPTIONS);
1187 if (nest == NULL)
1188 goto nla_put_failure;
1da177e4
LT
1189
1190 if (TC_U32_KEY(n->handle) == 0) {
8113c095 1191 struct tc_u_hnode *ht = fh;
cc7ec456
ED
1192 u32 divisor = ht->divisor + 1;
1193
1b34ec43
DM
1194 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1195 goto nla_put_failure;
1da177e4 1196 } else {
459d5f62
JF
1197#ifdef CONFIG_CLS_U32_PERF
1198 struct tc_u32_pcnt *gpf;
459d5f62 1199 int cpu;
80aab73d 1200#endif
459d5f62 1201
1b34ec43
DM
1202 if (nla_put(skb, TCA_U32_SEL,
1203 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1204 &n->sel))
1205 goto nla_put_failure;
1ce87720
JF
1206
1207 ht_up = rtnl_dereference(n->ht_up);
1208 if (ht_up) {
1da177e4 1209 u32 htid = n->handle & 0xFFFFF000;
1b34ec43
DM
1210 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1211 goto nla_put_failure;
1da177e4 1212 }
1b34ec43
DM
1213 if (n->res.classid &&
1214 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1215 goto nla_put_failure;
1ce87720
JF
1216
1217 ht_down = rtnl_dereference(n->ht_down);
1218 if (ht_down &&
1219 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1b34ec43 1220 goto nla_put_failure;
1da177e4 1221
9e8ce79c
JF
1222 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1223 goto nla_put_failure;
1224
1da177e4 1225#ifdef CONFIG_CLS_U32_MARK
459d5f62
JF
1226 if ((n->val || n->mask)) {
1227 struct tc_u32_mark mark = {.val = n->val,
1228 .mask = n->mask,
1229 .success = 0};
80aab73d 1230 int cpum;
459d5f62 1231
80aab73d
JF
1232 for_each_possible_cpu(cpum) {
1233 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
459d5f62
JF
1234
1235 mark.success += cnt;
1236 }
1237
1238 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1239 goto nla_put_failure;
1240 }
1da177e4
LT
1241#endif
1242
5da57f42 1243 if (tcf_exts_dump(skb, &n->exts) < 0)
add93b61 1244 goto nla_put_failure;
1da177e4
LT
1245
1246#ifdef CONFIG_NET_CLS_IND
2519a602
WC
1247 if (n->ifindex) {
1248 struct net_device *dev;
1249 dev = __dev_get_by_index(net, n->ifindex);
1250 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1251 goto nla_put_failure;
1252 }
1da177e4
LT
1253#endif
1254#ifdef CONFIG_CLS_U32_PERF
459d5f62
JF
1255 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1256 n->sel.nkeys * sizeof(u64),
1257 GFP_KERNEL);
1258 if (!gpf)
1259 goto nla_put_failure;
1260
1261 for_each_possible_cpu(cpu) {
1262 int i;
1263 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1264
1265 gpf->rcnt += pf->rcnt;
1266 gpf->rhit += pf->rhit;
1267 for (i = 0; i < n->sel.nkeys; i++)
1268 gpf->kcnts[i] += pf->kcnts[i];
1269 }
1270
9854518e
ND
1271 if (nla_put_64bit(skb, TCA_U32_PCNT,
1272 sizeof(struct tc_u32_pcnt) +
1273 n->sel.nkeys * sizeof(u64),
1274 gpf, TCA_U32_PAD)) {
459d5f62 1275 kfree(gpf);
1b34ec43 1276 goto nla_put_failure;
459d5f62
JF
1277 }
1278 kfree(gpf);
1da177e4
LT
1279#endif
1280 }
1281
4b3550ef
PM
1282 nla_nest_end(skb, nest);
1283
1da177e4 1284 if (TC_U32_KEY(n->handle))
5da57f42 1285 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
add93b61 1286 goto nla_put_failure;
1da177e4
LT
1287 return skb->len;
1288
add93b61 1289nla_put_failure:
4b3550ef 1290 nla_nest_cancel(skb, nest);
1da177e4
LT
1291 return -1;
1292}
1293
2eb9d75c 1294static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1da177e4
LT
1295 .kind = "u32",
1296 .classify = u32_classify,
1297 .init = u32_init,
1298 .destroy = u32_destroy,
1299 .get = u32_get,
1da177e4
LT
1300 .change = u32_change,
1301 .delete = u32_delete,
1302 .walk = u32_walk,
1303 .dump = u32_dump,
07d79fc7 1304 .bind_class = u32_bind_class,
1da177e4
LT
1305 .owner = THIS_MODULE,
1306};
1307
1308static int __init init_u32(void)
1309{
3cd904ec
WC
1310 int i, ret;
1311
6ff9c364 1312 pr_info("u32 classifier\n");
1da177e4 1313#ifdef CONFIG_CLS_U32_PERF
6ff9c364 1314 pr_info(" Performance counters on\n");
1da177e4 1315#endif
1da177e4 1316#ifdef CONFIG_NET_CLS_IND
6ff9c364 1317 pr_info(" input device check on\n");
1da177e4
LT
1318#endif
1319#ifdef CONFIG_NET_CLS_ACT
6ff9c364 1320 pr_info(" Actions configured\n");
1da177e4 1321#endif
3cd904ec
WC
1322 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1323 sizeof(struct hlist_head),
1324 GFP_KERNEL);
1325 if (!tc_u_common_hash)
1326 return -ENOMEM;
1327
1328 for (i = 0; i < U32_HASH_SIZE; i++)
1329 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1330
1331 ret = register_tcf_proto_ops(&cls_u32_ops);
1332 if (ret)
1333 kvfree(tc_u_common_hash);
1334 return ret;
1da177e4
LT
1335}
1336
10297b99 1337static void __exit exit_u32(void)
1da177e4
LT
1338{
1339 unregister_tcf_proto_ops(&cls_u32_ops);
3cd904ec 1340 kvfree(tc_u_common_hash);
1da177e4
LT
1341}
1342
1343module_init(init_u32)
1344module_exit(exit_u32)
1345MODULE_LICENSE("GPL");