]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/sched/cls_flower.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[mirror_ubuntu-zesty-kernel.git] / net / sched / cls_flower.c
1 /*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
20 #include <linux/ip.h>
21
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26
27 #include <net/dst.h>
28 #include <net/dst_metadata.h>
29
30 struct fl_flow_key {
31 int indev_ifindex;
32 struct flow_dissector_key_control control;
33 struct flow_dissector_key_control enc_control;
34 struct flow_dissector_key_basic basic;
35 struct flow_dissector_key_eth_addrs eth;
36 struct flow_dissector_key_vlan vlan;
37 union {
38 struct flow_dissector_key_ipv4_addrs ipv4;
39 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
42 struct flow_dissector_key_icmp icmp;
43 struct flow_dissector_key_keyid enc_key_id;
44 union {
45 struct flow_dissector_key_ipv4_addrs enc_ipv4;
46 struct flow_dissector_key_ipv6_addrs enc_ipv6;
47 };
48 struct flow_dissector_key_ports enc_tp;
49 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
50
51 struct fl_flow_mask_range {
52 unsigned short int start;
53 unsigned short int end;
54 };
55
56 struct fl_flow_mask {
57 struct fl_flow_key key;
58 struct fl_flow_mask_range range;
59 struct rcu_head rcu;
60 };
61
62 struct cls_fl_head {
63 struct rhashtable ht;
64 struct fl_flow_mask mask;
65 struct flow_dissector dissector;
66 u32 hgen;
67 bool mask_assigned;
68 struct list_head filters;
69 struct rhashtable_params ht_params;
70 union {
71 struct work_struct work;
72 struct rcu_head rcu;
73 };
74 };
75
76 struct cls_fl_filter {
77 struct rhash_head ht_node;
78 struct fl_flow_key mkey;
79 struct tcf_exts exts;
80 struct tcf_result res;
81 struct fl_flow_key key;
82 struct list_head list;
83 u32 handle;
84 u32 flags;
85 struct rcu_head rcu;
86 struct tc_to_netdev tc;
87 struct net_device *hw_dev;
88 };
89
90 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
91 {
92 return mask->range.end - mask->range.start;
93 }
94
95 static void fl_mask_update_range(struct fl_flow_mask *mask)
96 {
97 const u8 *bytes = (const u8 *) &mask->key;
98 size_t size = sizeof(mask->key);
99 size_t i, first = 0, last = size - 1;
100
101 for (i = 0; i < sizeof(mask->key); i++) {
102 if (bytes[i]) {
103 if (!first && i)
104 first = i;
105 last = i;
106 }
107 }
108 mask->range.start = rounddown(first, sizeof(long));
109 mask->range.end = roundup(last + 1, sizeof(long));
110 }
111
112 static void *fl_key_get_start(struct fl_flow_key *key,
113 const struct fl_flow_mask *mask)
114 {
115 return (u8 *) key + mask->range.start;
116 }
117
118 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
119 struct fl_flow_mask *mask)
120 {
121 const long *lkey = fl_key_get_start(key, mask);
122 const long *lmask = fl_key_get_start(&mask->key, mask);
123 long *lmkey = fl_key_get_start(mkey, mask);
124 int i;
125
126 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
127 *lmkey++ = *lkey++ & *lmask++;
128 }
129
130 static void fl_clear_masked_range(struct fl_flow_key *key,
131 struct fl_flow_mask *mask)
132 {
133 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
134 }
135
136 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
137 struct tcf_result *res)
138 {
139 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
140 struct cls_fl_filter *f;
141 struct fl_flow_key skb_key;
142 struct fl_flow_key skb_mkey;
143 struct ip_tunnel_info *info;
144
145 if (!atomic_read(&head->ht.nelems))
146 return -1;
147
148 fl_clear_masked_range(&skb_key, &head->mask);
149
150 info = skb_tunnel_info(skb);
151 if (info) {
152 struct ip_tunnel_key *key = &info->key;
153
154 switch (ip_tunnel_info_af(info)) {
155 case AF_INET:
156 skb_key.enc_control.addr_type =
157 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
158 skb_key.enc_ipv4.src = key->u.ipv4.src;
159 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
160 break;
161 case AF_INET6:
162 skb_key.enc_control.addr_type =
163 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
164 skb_key.enc_ipv6.src = key->u.ipv6.src;
165 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
166 break;
167 }
168
169 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
170 skb_key.enc_tp.src = key->tp_src;
171 skb_key.enc_tp.dst = key->tp_dst;
172 }
173
174 skb_key.indev_ifindex = skb->skb_iif;
175 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
176 * so do it rather here.
177 */
178 skb_key.basic.n_proto = skb->protocol;
179 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
180
181 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
182
183 f = rhashtable_lookup_fast(&head->ht,
184 fl_key_get_start(&skb_mkey, &head->mask),
185 head->ht_params);
186 if (f && !tc_skip_sw(f->flags)) {
187 *res = f->res;
188 return tcf_exts_exec(skb, &f->exts, res);
189 }
190 return -1;
191 }
192
193 static int fl_init(struct tcf_proto *tp)
194 {
195 struct cls_fl_head *head;
196
197 head = kzalloc(sizeof(*head), GFP_KERNEL);
198 if (!head)
199 return -ENOBUFS;
200
201 INIT_LIST_HEAD_RCU(&head->filters);
202 rcu_assign_pointer(tp->root, head);
203
204 return 0;
205 }
206
207 static void fl_destroy_filter(struct rcu_head *head)
208 {
209 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
210
211 tcf_exts_destroy(&f->exts);
212 kfree(f);
213 }
214
215 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
216 {
217 struct tc_cls_flower_offload offload = {0};
218 struct net_device *dev = f->hw_dev;
219 struct tc_to_netdev *tc = &f->tc;
220
221 if (!tc_can_offload(dev, tp))
222 return;
223
224 offload.command = TC_CLSFLOWER_DESTROY;
225 offload.cookie = (unsigned long)f;
226
227 tc->type = TC_SETUP_CLSFLOWER;
228 tc->cls_flower = &offload;
229
230 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
231 }
232
233 static int fl_hw_replace_filter(struct tcf_proto *tp,
234 struct flow_dissector *dissector,
235 struct fl_flow_key *mask,
236 struct cls_fl_filter *f)
237 {
238 struct net_device *dev = tp->q->dev_queue->dev;
239 struct tc_cls_flower_offload offload = {0};
240 struct tc_to_netdev *tc = &f->tc;
241 int err;
242
243 if (!tc_can_offload(dev, tp)) {
244 if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
245 (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
246 f->hw_dev = dev;
247 return tc_skip_sw(f->flags) ? -EINVAL : 0;
248 }
249 dev = f->hw_dev;
250 tc->egress_dev = true;
251 } else {
252 f->hw_dev = dev;
253 }
254
255 offload.command = TC_CLSFLOWER_REPLACE;
256 offload.cookie = (unsigned long)f;
257 offload.dissector = dissector;
258 offload.mask = mask;
259 offload.key = &f->mkey;
260 offload.exts = &f->exts;
261
262 tc->type = TC_SETUP_CLSFLOWER;
263 tc->cls_flower = &offload;
264
265 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
266 tc);
267
268 if (tc_skip_sw(f->flags))
269 return err;
270 return 0;
271 }
272
273 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
274 {
275 struct tc_cls_flower_offload offload = {0};
276 struct net_device *dev = f->hw_dev;
277 struct tc_to_netdev *tc = &f->tc;
278
279 if (!tc_can_offload(dev, tp))
280 return;
281
282 offload.command = TC_CLSFLOWER_STATS;
283 offload.cookie = (unsigned long)f;
284 offload.exts = &f->exts;
285
286 tc->type = TC_SETUP_CLSFLOWER;
287 tc->cls_flower = &offload;
288
289 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
290 }
291
292 static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
293 {
294 list_del_rcu(&f->list);
295 if (!tc_skip_hw(f->flags))
296 fl_hw_destroy_filter(tp, f);
297 tcf_unbind_filter(tp, &f->res);
298 call_rcu(&f->rcu, fl_destroy_filter);
299 }
300
301 static void fl_destroy_sleepable(struct work_struct *work)
302 {
303 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
304 work);
305 if (head->mask_assigned)
306 rhashtable_destroy(&head->ht);
307 kfree(head);
308 module_put(THIS_MODULE);
309 }
310
311 static void fl_destroy_rcu(struct rcu_head *rcu)
312 {
313 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
314
315 INIT_WORK(&head->work, fl_destroy_sleepable);
316 schedule_work(&head->work);
317 }
318
319 static bool fl_destroy(struct tcf_proto *tp, bool force)
320 {
321 struct cls_fl_head *head = rtnl_dereference(tp->root);
322 struct cls_fl_filter *f, *next;
323
324 if (!force && !list_empty(&head->filters))
325 return false;
326
327 list_for_each_entry_safe(f, next, &head->filters, list)
328 __fl_delete(tp, f);
329
330 __module_get(THIS_MODULE);
331 call_rcu(&head->rcu, fl_destroy_rcu);
332
333 return true;
334 }
335
336 static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
337 {
338 struct cls_fl_head *head = rtnl_dereference(tp->root);
339 struct cls_fl_filter *f;
340
341 list_for_each_entry(f, &head->filters, list)
342 if (f->handle == handle)
343 return (unsigned long) f;
344 return 0;
345 }
346
347 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
348 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
349 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
350 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
351 .len = IFNAMSIZ },
352 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
353 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
354 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
355 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
356 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
357 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
358 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
359 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
360 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
361 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
362 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
363 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
364 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
365 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
366 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
367 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
368 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
369 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
370 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
371 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
372 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
373 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
374 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
375 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
376 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
377 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
378 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
379 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
380 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
381 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
382 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
383 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
384 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
385 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
386 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
387 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
388 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
389 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
390 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
391 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
392 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
393 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
394 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
395 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
396 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
397 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
398 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
399 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
400 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
401 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
402 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
403 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
404 };
405
406 static void fl_set_key_val(struct nlattr **tb,
407 void *val, int val_type,
408 void *mask, int mask_type, int len)
409 {
410 if (!tb[val_type])
411 return;
412 memcpy(val, nla_data(tb[val_type]), len);
413 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
414 memset(mask, 0xff, len);
415 else
416 memcpy(mask, nla_data(tb[mask_type]), len);
417 }
418
419 static void fl_set_key_vlan(struct nlattr **tb,
420 struct flow_dissector_key_vlan *key_val,
421 struct flow_dissector_key_vlan *key_mask)
422 {
423 #define VLAN_PRIORITY_MASK 0x7
424
425 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
426 key_val->vlan_id =
427 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
428 key_mask->vlan_id = VLAN_VID_MASK;
429 }
430 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
431 key_val->vlan_priority =
432 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
433 VLAN_PRIORITY_MASK;
434 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
435 }
436 }
437
438 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
439 u32 *dissector_key, u32 *dissector_mask,
440 u32 flower_flag_bit, u32 dissector_flag_bit)
441 {
442 if (flower_mask & flower_flag_bit) {
443 *dissector_mask |= dissector_flag_bit;
444 if (flower_key & flower_flag_bit)
445 *dissector_key |= dissector_flag_bit;
446 }
447 }
448
449 static int fl_set_key_flags(struct nlattr **tb,
450 u32 *flags_key, u32 *flags_mask)
451 {
452 u32 key, mask;
453
454 /* mask is mandatory for flags */
455 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
456 return -EINVAL;
457
458 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
459 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
460
461 *flags_key = 0;
462 *flags_mask = 0;
463
464 fl_set_key_flag(key, mask, flags_key, flags_mask,
465 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
466
467 return 0;
468 }
469
470 static int fl_set_key(struct net *net, struct nlattr **tb,
471 struct fl_flow_key *key, struct fl_flow_key *mask)
472 {
473 __be16 ethertype;
474 int ret = 0;
475 #ifdef CONFIG_NET_CLS_IND
476 if (tb[TCA_FLOWER_INDEV]) {
477 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
478 if (err < 0)
479 return err;
480 key->indev_ifindex = err;
481 mask->indev_ifindex = 0xffffffff;
482 }
483 #endif
484
485 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
486 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
487 sizeof(key->eth.dst));
488 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
489 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
490 sizeof(key->eth.src));
491
492 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
493 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
494
495 if (ethertype == htons(ETH_P_8021Q)) {
496 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
497 fl_set_key_val(tb, &key->basic.n_proto,
498 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
499 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
500 sizeof(key->basic.n_proto));
501 } else {
502 key->basic.n_proto = ethertype;
503 mask->basic.n_proto = cpu_to_be16(~0);
504 }
505 }
506
507 if (key->basic.n_proto == htons(ETH_P_IP) ||
508 key->basic.n_proto == htons(ETH_P_IPV6)) {
509 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
510 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
511 sizeof(key->basic.ip_proto));
512 }
513
514 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
515 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
516 mask->control.addr_type = ~0;
517 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
518 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
519 sizeof(key->ipv4.src));
520 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
521 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
522 sizeof(key->ipv4.dst));
523 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
524 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
525 mask->control.addr_type = ~0;
526 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
527 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
528 sizeof(key->ipv6.src));
529 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
530 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
531 sizeof(key->ipv6.dst));
532 }
533
534 if (key->basic.ip_proto == IPPROTO_TCP) {
535 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
536 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
537 sizeof(key->tp.src));
538 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
539 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
540 sizeof(key->tp.dst));
541 } else if (key->basic.ip_proto == IPPROTO_UDP) {
542 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
543 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
544 sizeof(key->tp.src));
545 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
546 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
547 sizeof(key->tp.dst));
548 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
549 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
550 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
551 sizeof(key->tp.src));
552 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
553 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
554 sizeof(key->tp.dst));
555 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
556 key->basic.ip_proto == IPPROTO_ICMP) {
557 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
558 &mask->icmp.type,
559 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
560 sizeof(key->icmp.type));
561 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
562 &mask->icmp.code,
563 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
564 sizeof(key->icmp.code));
565 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
566 key->basic.ip_proto == IPPROTO_ICMPV6) {
567 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
568 &mask->icmp.type,
569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
570 sizeof(key->icmp.type));
571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
572 &mask->icmp.code,
573 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
574 sizeof(key->icmp.code));
575 }
576
577 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
578 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
579 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
580 mask->enc_control.addr_type = ~0;
581 fl_set_key_val(tb, &key->enc_ipv4.src,
582 TCA_FLOWER_KEY_ENC_IPV4_SRC,
583 &mask->enc_ipv4.src,
584 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
585 sizeof(key->enc_ipv4.src));
586 fl_set_key_val(tb, &key->enc_ipv4.dst,
587 TCA_FLOWER_KEY_ENC_IPV4_DST,
588 &mask->enc_ipv4.dst,
589 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
590 sizeof(key->enc_ipv4.dst));
591 }
592
593 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
594 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
595 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
596 mask->enc_control.addr_type = ~0;
597 fl_set_key_val(tb, &key->enc_ipv6.src,
598 TCA_FLOWER_KEY_ENC_IPV6_SRC,
599 &mask->enc_ipv6.src,
600 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
601 sizeof(key->enc_ipv6.src));
602 fl_set_key_val(tb, &key->enc_ipv6.dst,
603 TCA_FLOWER_KEY_ENC_IPV6_DST,
604 &mask->enc_ipv6.dst,
605 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
606 sizeof(key->enc_ipv6.dst));
607 }
608
609 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
610 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
611 sizeof(key->enc_key_id.keyid));
612
613 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
614 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
615 sizeof(key->enc_tp.src));
616
617 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
618 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
619 sizeof(key->enc_tp.dst));
620
621 if (tb[TCA_FLOWER_KEY_FLAGS])
622 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
623
624 return ret;
625 }
626
627 static bool fl_mask_eq(struct fl_flow_mask *mask1,
628 struct fl_flow_mask *mask2)
629 {
630 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
631 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
632
633 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
634 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
635 }
636
637 static const struct rhashtable_params fl_ht_params = {
638 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
639 .head_offset = offsetof(struct cls_fl_filter, ht_node),
640 .automatic_shrinking = true,
641 };
642
643 static int fl_init_hashtable(struct cls_fl_head *head,
644 struct fl_flow_mask *mask)
645 {
646 head->ht_params = fl_ht_params;
647 head->ht_params.key_len = fl_mask_range(mask);
648 head->ht_params.key_offset += mask->range.start;
649
650 return rhashtable_init(&head->ht, &head->ht_params);
651 }
652
653 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
654 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
655
656 #define FL_KEY_IS_MASKED(mask, member) \
657 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
658 0, FL_KEY_MEMBER_SIZE(member)) \
659
660 #define FL_KEY_SET(keys, cnt, id, member) \
661 do { \
662 keys[cnt].key_id = id; \
663 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
664 cnt++; \
665 } while(0);
666
667 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
668 do { \
669 if (FL_KEY_IS_MASKED(mask, member)) \
670 FL_KEY_SET(keys, cnt, id, member); \
671 } while(0);
672
673 static void fl_init_dissector(struct cls_fl_head *head,
674 struct fl_flow_mask *mask)
675 {
676 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
677 size_t cnt = 0;
678
679 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
680 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
681 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
682 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
683 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
684 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
685 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
686 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
687 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
688 FLOW_DISSECTOR_KEY_PORTS, tp);
689 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
690 FLOW_DISSECTOR_KEY_ICMP, icmp);
691 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
692 FLOW_DISSECTOR_KEY_VLAN, vlan);
693 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
694 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
695 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
696 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
697 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
698 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
699 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
700 FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
701 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
702 enc_control);
703 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
704 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
705
706 skb_flow_dissector_init(&head->dissector, keys, cnt);
707 }
708
709 static int fl_check_assign_mask(struct cls_fl_head *head,
710 struct fl_flow_mask *mask)
711 {
712 int err;
713
714 if (head->mask_assigned) {
715 if (!fl_mask_eq(&head->mask, mask))
716 return -EINVAL;
717 else
718 return 0;
719 }
720
721 /* Mask is not assigned yet. So assign it and init hashtable
722 * according to that.
723 */
724 err = fl_init_hashtable(head, mask);
725 if (err)
726 return err;
727 memcpy(&head->mask, mask, sizeof(head->mask));
728 head->mask_assigned = true;
729
730 fl_init_dissector(head, mask);
731
732 return 0;
733 }
734
735 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
736 struct cls_fl_filter *f, struct fl_flow_mask *mask,
737 unsigned long base, struct nlattr **tb,
738 struct nlattr *est, bool ovr)
739 {
740 struct tcf_exts e;
741 int err;
742
743 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
744 if (err < 0)
745 return err;
746 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
747 if (err < 0)
748 goto errout;
749
750 if (tb[TCA_FLOWER_CLASSID]) {
751 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
752 tcf_bind_filter(tp, &f->res, base);
753 }
754
755 err = fl_set_key(net, tb, &f->key, &mask->key);
756 if (err)
757 goto errout;
758
759 fl_mask_update_range(mask);
760 fl_set_masked_key(&f->mkey, &f->key, mask);
761
762 tcf_exts_change(tp, &f->exts, &e);
763
764 return 0;
765 errout:
766 tcf_exts_destroy(&e);
767 return err;
768 }
769
770 static u32 fl_grab_new_handle(struct tcf_proto *tp,
771 struct cls_fl_head *head)
772 {
773 unsigned int i = 0x80000000;
774 u32 handle;
775
776 do {
777 if (++head->hgen == 0x7FFFFFFF)
778 head->hgen = 1;
779 } while (--i > 0 && fl_get(tp, head->hgen));
780
781 if (unlikely(i == 0)) {
782 pr_err("Insufficient number of handles\n");
783 handle = 0;
784 } else {
785 handle = head->hgen;
786 }
787
788 return handle;
789 }
790
791 static int fl_change(struct net *net, struct sk_buff *in_skb,
792 struct tcf_proto *tp, unsigned long base,
793 u32 handle, struct nlattr **tca,
794 unsigned long *arg, bool ovr)
795 {
796 struct cls_fl_head *head = rtnl_dereference(tp->root);
797 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
798 struct cls_fl_filter *fnew;
799 struct nlattr *tb[TCA_FLOWER_MAX + 1];
800 struct fl_flow_mask mask = {};
801 int err;
802
803 if (!tca[TCA_OPTIONS])
804 return -EINVAL;
805
806 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
807 if (err < 0)
808 return err;
809
810 if (fold && handle && fold->handle != handle)
811 return -EINVAL;
812
813 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
814 if (!fnew)
815 return -ENOBUFS;
816
817 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
818 if (err < 0)
819 goto errout;
820
821 if (!handle) {
822 handle = fl_grab_new_handle(tp, head);
823 if (!handle) {
824 err = -EINVAL;
825 goto errout;
826 }
827 }
828 fnew->handle = handle;
829
830 if (tb[TCA_FLOWER_FLAGS]) {
831 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
832
833 if (!tc_flags_valid(fnew->flags)) {
834 err = -EINVAL;
835 goto errout;
836 }
837 }
838
839 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
840 if (err)
841 goto errout;
842
843 err = fl_check_assign_mask(head, &mask);
844 if (err)
845 goto errout;
846
847 if (!tc_skip_sw(fnew->flags)) {
848 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
849 head->ht_params);
850 if (err)
851 goto errout;
852 }
853
854 if (!tc_skip_hw(fnew->flags)) {
855 err = fl_hw_replace_filter(tp,
856 &head->dissector,
857 &mask.key,
858 fnew);
859 if (err)
860 goto errout;
861 }
862
863 if (fold) {
864 if (!tc_skip_sw(fold->flags))
865 rhashtable_remove_fast(&head->ht, &fold->ht_node,
866 head->ht_params);
867 if (!tc_skip_hw(fold->flags))
868 fl_hw_destroy_filter(tp, fold);
869 }
870
871 *arg = (unsigned long) fnew;
872
873 if (fold) {
874 list_replace_rcu(&fold->list, &fnew->list);
875 tcf_unbind_filter(tp, &fold->res);
876 call_rcu(&fold->rcu, fl_destroy_filter);
877 } else {
878 list_add_tail_rcu(&fnew->list, &head->filters);
879 }
880
881 return 0;
882
883 errout:
884 tcf_exts_destroy(&fnew->exts);
885 kfree(fnew);
886 return err;
887 }
888
889 static int fl_delete(struct tcf_proto *tp, unsigned long arg)
890 {
891 struct cls_fl_head *head = rtnl_dereference(tp->root);
892 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
893
894 if (!tc_skip_sw(f->flags))
895 rhashtable_remove_fast(&head->ht, &f->ht_node,
896 head->ht_params);
897 __fl_delete(tp, f);
898 return 0;
899 }
900
901 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
902 {
903 struct cls_fl_head *head = rtnl_dereference(tp->root);
904 struct cls_fl_filter *f;
905
906 list_for_each_entry_rcu(f, &head->filters, list) {
907 if (arg->count < arg->skip)
908 goto skip;
909 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
910 arg->stop = 1;
911 break;
912 }
913 skip:
914 arg->count++;
915 }
916 }
917
918 static int fl_dump_key_val(struct sk_buff *skb,
919 void *val, int val_type,
920 void *mask, int mask_type, int len)
921 {
922 int err;
923
924 if (!memchr_inv(mask, 0, len))
925 return 0;
926 err = nla_put(skb, val_type, len, val);
927 if (err)
928 return err;
929 if (mask_type != TCA_FLOWER_UNSPEC) {
930 err = nla_put(skb, mask_type, len, mask);
931 if (err)
932 return err;
933 }
934 return 0;
935 }
936
937 static int fl_dump_key_vlan(struct sk_buff *skb,
938 struct flow_dissector_key_vlan *vlan_key,
939 struct flow_dissector_key_vlan *vlan_mask)
940 {
941 int err;
942
943 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
944 return 0;
945 if (vlan_mask->vlan_id) {
946 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
947 vlan_key->vlan_id);
948 if (err)
949 return err;
950 }
951 if (vlan_mask->vlan_priority) {
952 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
953 vlan_key->vlan_priority);
954 if (err)
955 return err;
956 }
957 return 0;
958 }
959
960 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
961 u32 *flower_key, u32 *flower_mask,
962 u32 flower_flag_bit, u32 dissector_flag_bit)
963 {
964 if (dissector_mask & dissector_flag_bit) {
965 *flower_mask |= flower_flag_bit;
966 if (dissector_key & dissector_flag_bit)
967 *flower_key |= flower_flag_bit;
968 }
969 }
970
971 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
972 {
973 u32 key, mask;
974 __be32 _key, _mask;
975 int err;
976
977 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
978 return 0;
979
980 key = 0;
981 mask = 0;
982
983 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
984 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
985
986 _key = cpu_to_be32(key);
987 _mask = cpu_to_be32(mask);
988
989 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
990 if (err)
991 return err;
992
993 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
994 }
995
996 static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
997 struct sk_buff *skb, struct tcmsg *t)
998 {
999 struct cls_fl_head *head = rtnl_dereference(tp->root);
1000 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
1001 struct nlattr *nest;
1002 struct fl_flow_key *key, *mask;
1003
1004 if (!f)
1005 return skb->len;
1006
1007 t->tcm_handle = f->handle;
1008
1009 nest = nla_nest_start(skb, TCA_OPTIONS);
1010 if (!nest)
1011 goto nla_put_failure;
1012
1013 if (f->res.classid &&
1014 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1015 goto nla_put_failure;
1016
1017 key = &f->key;
1018 mask = &head->mask.key;
1019
1020 if (mask->indev_ifindex) {
1021 struct net_device *dev;
1022
1023 dev = __dev_get_by_index(net, key->indev_ifindex);
1024 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1025 goto nla_put_failure;
1026 }
1027
1028 if (!tc_skip_hw(f->flags))
1029 fl_hw_update_stats(tp, f);
1030
1031 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1032 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1033 sizeof(key->eth.dst)) ||
1034 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1035 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1036 sizeof(key->eth.src)) ||
1037 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1038 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1039 sizeof(key->basic.n_proto)))
1040 goto nla_put_failure;
1041
1042 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
1043 goto nla_put_failure;
1044
1045 if ((key->basic.n_proto == htons(ETH_P_IP) ||
1046 key->basic.n_proto == htons(ETH_P_IPV6)) &&
1047 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1048 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1049 sizeof(key->basic.ip_proto)))
1050 goto nla_put_failure;
1051
1052 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1053 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1054 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1055 sizeof(key->ipv4.src)) ||
1056 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1057 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1058 sizeof(key->ipv4.dst))))
1059 goto nla_put_failure;
1060 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1061 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1062 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1063 sizeof(key->ipv6.src)) ||
1064 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1065 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1066 sizeof(key->ipv6.dst))))
1067 goto nla_put_failure;
1068
1069 if (key->basic.ip_proto == IPPROTO_TCP &&
1070 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1071 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1072 sizeof(key->tp.src)) ||
1073 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1074 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1075 sizeof(key->tp.dst))))
1076 goto nla_put_failure;
1077 else if (key->basic.ip_proto == IPPROTO_UDP &&
1078 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1079 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1080 sizeof(key->tp.src)) ||
1081 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1082 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1083 sizeof(key->tp.dst))))
1084 goto nla_put_failure;
1085 else if (key->basic.ip_proto == IPPROTO_SCTP &&
1086 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1087 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1088 sizeof(key->tp.src)) ||
1089 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1090 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1091 sizeof(key->tp.dst))))
1092 goto nla_put_failure;
1093 else if (key->basic.n_proto == htons(ETH_P_IP) &&
1094 key->basic.ip_proto == IPPROTO_ICMP &&
1095 (fl_dump_key_val(skb, &key->icmp.type,
1096 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1097 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1098 sizeof(key->icmp.type)) ||
1099 fl_dump_key_val(skb, &key->icmp.code,
1100 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1101 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1102 sizeof(key->icmp.code))))
1103 goto nla_put_failure;
1104 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1105 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1106 (fl_dump_key_val(skb, &key->icmp.type,
1107 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1108 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1109 sizeof(key->icmp.type)) ||
1110 fl_dump_key_val(skb, &key->icmp.code,
1111 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1112 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1113 sizeof(key->icmp.code))))
1114 goto nla_put_failure;
1115
1116 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1117 (fl_dump_key_val(skb, &key->enc_ipv4.src,
1118 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1119 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1120 sizeof(key->enc_ipv4.src)) ||
1121 fl_dump_key_val(skb, &key->enc_ipv4.dst,
1122 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1123 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1124 sizeof(key->enc_ipv4.dst))))
1125 goto nla_put_failure;
1126 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1127 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1128 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1129 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1130 sizeof(key->enc_ipv6.src)) ||
1131 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1132 TCA_FLOWER_KEY_ENC_IPV6_DST,
1133 &mask->enc_ipv6.dst,
1134 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1135 sizeof(key->enc_ipv6.dst))))
1136 goto nla_put_failure;
1137
1138 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
1139 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
1140 sizeof(key->enc_key_id)) ||
1141 fl_dump_key_val(skb, &key->enc_tp.src,
1142 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1143 &mask->enc_tp.src,
1144 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1145 sizeof(key->enc_tp.src)) ||
1146 fl_dump_key_val(skb, &key->enc_tp.dst,
1147 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1148 &mask->enc_tp.dst,
1149 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1150 sizeof(key->enc_tp.dst)))
1151 goto nla_put_failure;
1152
1153 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1154 goto nla_put_failure;
1155
1156 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
1157
1158 if (tcf_exts_dump(skb, &f->exts))
1159 goto nla_put_failure;
1160
1161 nla_nest_end(skb, nest);
1162
1163 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1164 goto nla_put_failure;
1165
1166 return skb->len;
1167
1168 nla_put_failure:
1169 nla_nest_cancel(skb, nest);
1170 return -1;
1171 }
1172
1173 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1174 .kind = "flower",
1175 .classify = fl_classify,
1176 .init = fl_init,
1177 .destroy = fl_destroy,
1178 .get = fl_get,
1179 .change = fl_change,
1180 .delete = fl_delete,
1181 .walk = fl_walk,
1182 .dump = fl_dump,
1183 .owner = THIS_MODULE,
1184 };
1185
1186 static int __init cls_fl_init(void)
1187 {
1188 return register_tcf_proto_ops(&cls_fl_ops);
1189 }
1190
1191 static void __exit cls_fl_exit(void)
1192 {
1193 unregister_tcf_proto_ops(&cls_fl_ops);
1194 }
1195
1196 module_init(cls_fl_init);
1197 module_exit(cls_fl_exit);
1198
1199 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1200 MODULE_DESCRIPTION("Flower classifier");
1201 MODULE_LICENSE("GPL v2");