]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/sched/cls_flower.c
rxrpc: Fix several cases where a padded len isn't checked in ticket decode
[mirror_ubuntu-zesty-kernel.git] / net / sched / cls_flower.c
1 /*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
20 #include <linux/ip.h>
21
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26
27 #include <net/dst.h>
28 #include <net/dst_metadata.h>
29
30 struct fl_flow_key {
31 int indev_ifindex;
32 struct flow_dissector_key_control control;
33 struct flow_dissector_key_control enc_control;
34 struct flow_dissector_key_basic basic;
35 struct flow_dissector_key_eth_addrs eth;
36 struct flow_dissector_key_vlan vlan;
37 union {
38 struct flow_dissector_key_ipv4_addrs ipv4;
39 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
42 struct flow_dissector_key_icmp icmp;
43 struct flow_dissector_key_keyid enc_key_id;
44 union {
45 struct flow_dissector_key_ipv4_addrs enc_ipv4;
46 struct flow_dissector_key_ipv6_addrs enc_ipv6;
47 };
48 struct flow_dissector_key_ports enc_tp;
49 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
50
51 struct fl_flow_mask_range {
52 unsigned short int start;
53 unsigned short int end;
54 };
55
56 struct fl_flow_mask {
57 struct fl_flow_key key;
58 struct fl_flow_mask_range range;
59 struct rcu_head rcu;
60 };
61
62 struct cls_fl_head {
63 struct rhashtable ht;
64 struct fl_flow_mask mask;
65 struct flow_dissector dissector;
66 u32 hgen;
67 bool mask_assigned;
68 struct list_head filters;
69 struct rhashtable_params ht_params;
70 union {
71 struct work_struct work;
72 struct rcu_head rcu;
73 };
74 };
75
76 struct cls_fl_filter {
77 struct rhash_head ht_node;
78 struct fl_flow_key mkey;
79 struct tcf_exts exts;
80 struct tcf_result res;
81 struct fl_flow_key key;
82 struct list_head list;
83 u32 handle;
84 u32 flags;
85 struct rcu_head rcu;
86 struct tc_to_netdev tc;
87 struct net_device *hw_dev;
88 };
89
90 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
91 {
92 return mask->range.end - mask->range.start;
93 }
94
95 static void fl_mask_update_range(struct fl_flow_mask *mask)
96 {
97 const u8 *bytes = (const u8 *) &mask->key;
98 size_t size = sizeof(mask->key);
99 size_t i, first = 0, last = size - 1;
100
101 for (i = 0; i < sizeof(mask->key); i++) {
102 if (bytes[i]) {
103 if (!first && i)
104 first = i;
105 last = i;
106 }
107 }
108 mask->range.start = rounddown(first, sizeof(long));
109 mask->range.end = roundup(last + 1, sizeof(long));
110 }
111
112 static void *fl_key_get_start(struct fl_flow_key *key,
113 const struct fl_flow_mask *mask)
114 {
115 return (u8 *) key + mask->range.start;
116 }
117
118 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
119 struct fl_flow_mask *mask)
120 {
121 const long *lkey = fl_key_get_start(key, mask);
122 const long *lmask = fl_key_get_start(&mask->key, mask);
123 long *lmkey = fl_key_get_start(mkey, mask);
124 int i;
125
126 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
127 *lmkey++ = *lkey++ & *lmask++;
128 }
129
130 static void fl_clear_masked_range(struct fl_flow_key *key,
131 struct fl_flow_mask *mask)
132 {
133 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
134 }
135
136 static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
137 struct fl_flow_key *mkey)
138 {
139 return rhashtable_lookup_fast(&head->ht,
140 fl_key_get_start(mkey, &head->mask),
141 head->ht_params);
142 }
143
144 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
145 struct tcf_result *res)
146 {
147 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
148 struct cls_fl_filter *f;
149 struct fl_flow_key skb_key;
150 struct fl_flow_key skb_mkey;
151 struct ip_tunnel_info *info;
152
153 if (!atomic_read(&head->ht.nelems))
154 return -1;
155
156 fl_clear_masked_range(&skb_key, &head->mask);
157
158 info = skb_tunnel_info(skb);
159 if (info) {
160 struct ip_tunnel_key *key = &info->key;
161
162 switch (ip_tunnel_info_af(info)) {
163 case AF_INET:
164 skb_key.enc_control.addr_type =
165 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
166 skb_key.enc_ipv4.src = key->u.ipv4.src;
167 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
168 break;
169 case AF_INET6:
170 skb_key.enc_control.addr_type =
171 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
172 skb_key.enc_ipv6.src = key->u.ipv6.src;
173 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
174 break;
175 }
176
177 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
178 skb_key.enc_tp.src = key->tp_src;
179 skb_key.enc_tp.dst = key->tp_dst;
180 }
181
182 skb_key.indev_ifindex = skb->skb_iif;
183 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
184 * so do it rather here.
185 */
186 skb_key.basic.n_proto = skb->protocol;
187 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
188
189 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
190
191 f = fl_lookup(head, &skb_mkey);
192 if (f && !tc_skip_sw(f->flags)) {
193 *res = f->res;
194 return tcf_exts_exec(skb, &f->exts, res);
195 }
196 return -1;
197 }
198
199 static int fl_init(struct tcf_proto *tp)
200 {
201 struct cls_fl_head *head;
202
203 head = kzalloc(sizeof(*head), GFP_KERNEL);
204 if (!head)
205 return -ENOBUFS;
206
207 INIT_LIST_HEAD_RCU(&head->filters);
208 rcu_assign_pointer(tp->root, head);
209
210 return 0;
211 }
212
213 static void fl_destroy_filter(struct rcu_head *head)
214 {
215 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
216
217 tcf_exts_destroy(&f->exts);
218 kfree(f);
219 }
220
221 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
222 {
223 struct tc_cls_flower_offload offload = {0};
224 struct net_device *dev = f->hw_dev;
225 struct tc_to_netdev *tc = &f->tc;
226
227 if (!tc_can_offload(dev, tp))
228 return;
229
230 offload.command = TC_CLSFLOWER_DESTROY;
231 offload.cookie = (unsigned long)f;
232
233 tc->type = TC_SETUP_CLSFLOWER;
234 tc->cls_flower = &offload;
235
236 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
237 }
238
239 static int fl_hw_replace_filter(struct tcf_proto *tp,
240 struct flow_dissector *dissector,
241 struct fl_flow_key *mask,
242 struct cls_fl_filter *f)
243 {
244 struct net_device *dev = tp->q->dev_queue->dev;
245 struct tc_cls_flower_offload offload = {0};
246 struct tc_to_netdev *tc = &f->tc;
247 int err;
248
249 if (!tc_can_offload(dev, tp)) {
250 if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
251 (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
252 f->hw_dev = dev;
253 return tc_skip_sw(f->flags) ? -EINVAL : 0;
254 }
255 dev = f->hw_dev;
256 tc->egress_dev = true;
257 } else {
258 f->hw_dev = dev;
259 }
260
261 offload.command = TC_CLSFLOWER_REPLACE;
262 offload.cookie = (unsigned long)f;
263 offload.dissector = dissector;
264 offload.mask = mask;
265 offload.key = &f->mkey;
266 offload.exts = &f->exts;
267
268 tc->type = TC_SETUP_CLSFLOWER;
269 tc->cls_flower = &offload;
270
271 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
272 tc);
273 if (!err)
274 f->flags |= TCA_CLS_FLAGS_IN_HW;
275
276 if (tc_skip_sw(f->flags))
277 return err;
278 return 0;
279 }
280
281 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
282 {
283 struct tc_cls_flower_offload offload = {0};
284 struct net_device *dev = f->hw_dev;
285 struct tc_to_netdev *tc = &f->tc;
286
287 if (!tc_can_offload(dev, tp))
288 return;
289
290 offload.command = TC_CLSFLOWER_STATS;
291 offload.cookie = (unsigned long)f;
292 offload.exts = &f->exts;
293
294 tc->type = TC_SETUP_CLSFLOWER;
295 tc->cls_flower = &offload;
296
297 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
298 }
299
300 static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
301 {
302 list_del_rcu(&f->list);
303 if (!tc_skip_hw(f->flags))
304 fl_hw_destroy_filter(tp, f);
305 tcf_unbind_filter(tp, &f->res);
306 call_rcu(&f->rcu, fl_destroy_filter);
307 }
308
309 static void fl_destroy_sleepable(struct work_struct *work)
310 {
311 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
312 work);
313 if (head->mask_assigned)
314 rhashtable_destroy(&head->ht);
315 kfree(head);
316 module_put(THIS_MODULE);
317 }
318
319 static void fl_destroy_rcu(struct rcu_head *rcu)
320 {
321 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
322
323 INIT_WORK(&head->work, fl_destroy_sleepable);
324 schedule_work(&head->work);
325 }
326
327 static bool fl_destroy(struct tcf_proto *tp, bool force)
328 {
329 struct cls_fl_head *head = rtnl_dereference(tp->root);
330 struct cls_fl_filter *f, *next;
331
332 if (!force && !list_empty(&head->filters))
333 return false;
334
335 list_for_each_entry_safe(f, next, &head->filters, list)
336 __fl_delete(tp, f);
337
338 __module_get(THIS_MODULE);
339 call_rcu(&head->rcu, fl_destroy_rcu);
340
341 return true;
342 }
343
344 static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
345 {
346 struct cls_fl_head *head = rtnl_dereference(tp->root);
347 struct cls_fl_filter *f;
348
349 list_for_each_entry(f, &head->filters, list)
350 if (f->handle == handle)
351 return (unsigned long) f;
352 return 0;
353 }
354
355 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
356 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
357 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
358 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
359 .len = IFNAMSIZ },
360 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
361 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
362 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
363 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
364 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
365 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
366 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
367 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
368 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
369 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
370 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
371 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
372 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
373 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
374 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
375 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
376 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
377 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
378 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
379 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
380 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
381 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
382 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
383 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
384 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
385 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
386 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
387 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
388 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
389 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
390 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
391 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
392 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
393 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
394 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
395 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
396 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
397 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
398 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
399 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
400 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
401 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
402 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
403 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
404 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
405 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
406 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
407 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
408 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
409 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
410 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
411 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
412 };
413
414 static void fl_set_key_val(struct nlattr **tb,
415 void *val, int val_type,
416 void *mask, int mask_type, int len)
417 {
418 if (!tb[val_type])
419 return;
420 memcpy(val, nla_data(tb[val_type]), len);
421 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
422 memset(mask, 0xff, len);
423 else
424 memcpy(mask, nla_data(tb[mask_type]), len);
425 }
426
427 static void fl_set_key_vlan(struct nlattr **tb,
428 struct flow_dissector_key_vlan *key_val,
429 struct flow_dissector_key_vlan *key_mask)
430 {
431 #define VLAN_PRIORITY_MASK 0x7
432
433 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
434 key_val->vlan_id =
435 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
436 key_mask->vlan_id = VLAN_VID_MASK;
437 }
438 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
439 key_val->vlan_priority =
440 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
441 VLAN_PRIORITY_MASK;
442 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
443 }
444 }
445
446 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
447 u32 *dissector_key, u32 *dissector_mask,
448 u32 flower_flag_bit, u32 dissector_flag_bit)
449 {
450 if (flower_mask & flower_flag_bit) {
451 *dissector_mask |= dissector_flag_bit;
452 if (flower_key & flower_flag_bit)
453 *dissector_key |= dissector_flag_bit;
454 }
455 }
456
457 static int fl_set_key_flags(struct nlattr **tb,
458 u32 *flags_key, u32 *flags_mask)
459 {
460 u32 key, mask;
461
462 /* mask is mandatory for flags */
463 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
464 return -EINVAL;
465
466 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
467 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
468
469 *flags_key = 0;
470 *flags_mask = 0;
471
472 fl_set_key_flag(key, mask, flags_key, flags_mask,
473 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
474
475 return 0;
476 }
477
478 static int fl_set_key(struct net *net, struct nlattr **tb,
479 struct fl_flow_key *key, struct fl_flow_key *mask)
480 {
481 __be16 ethertype;
482 int ret = 0;
483 #ifdef CONFIG_NET_CLS_IND
484 if (tb[TCA_FLOWER_INDEV]) {
485 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
486 if (err < 0)
487 return err;
488 key->indev_ifindex = err;
489 mask->indev_ifindex = 0xffffffff;
490 }
491 #endif
492
493 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
494 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
495 sizeof(key->eth.dst));
496 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
497 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
498 sizeof(key->eth.src));
499
500 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
501 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
502
503 if (ethertype == htons(ETH_P_8021Q)) {
504 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
505 fl_set_key_val(tb, &key->basic.n_proto,
506 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
507 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
508 sizeof(key->basic.n_proto));
509 } else {
510 key->basic.n_proto = ethertype;
511 mask->basic.n_proto = cpu_to_be16(~0);
512 }
513 }
514
515 if (key->basic.n_proto == htons(ETH_P_IP) ||
516 key->basic.n_proto == htons(ETH_P_IPV6)) {
517 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
518 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
519 sizeof(key->basic.ip_proto));
520 }
521
522 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
523 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
524 mask->control.addr_type = ~0;
525 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
526 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
527 sizeof(key->ipv4.src));
528 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
529 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
530 sizeof(key->ipv4.dst));
531 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
532 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
533 mask->control.addr_type = ~0;
534 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
535 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
536 sizeof(key->ipv6.src));
537 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
538 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
539 sizeof(key->ipv6.dst));
540 }
541
542 if (key->basic.ip_proto == IPPROTO_TCP) {
543 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
544 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
545 sizeof(key->tp.src));
546 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
547 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
548 sizeof(key->tp.dst));
549 } else if (key->basic.ip_proto == IPPROTO_UDP) {
550 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
551 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
552 sizeof(key->tp.src));
553 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
554 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
555 sizeof(key->tp.dst));
556 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
557 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
558 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
559 sizeof(key->tp.src));
560 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
561 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
562 sizeof(key->tp.dst));
563 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
564 key->basic.ip_proto == IPPROTO_ICMP) {
565 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
566 &mask->icmp.type,
567 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
568 sizeof(key->icmp.type));
569 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
570 &mask->icmp.code,
571 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
572 sizeof(key->icmp.code));
573 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
574 key->basic.ip_proto == IPPROTO_ICMPV6) {
575 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
576 &mask->icmp.type,
577 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
578 sizeof(key->icmp.type));
579 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
580 &mask->icmp.code,
581 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
582 sizeof(key->icmp.code));
583 }
584
585 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
586 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
587 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
588 mask->enc_control.addr_type = ~0;
589 fl_set_key_val(tb, &key->enc_ipv4.src,
590 TCA_FLOWER_KEY_ENC_IPV4_SRC,
591 &mask->enc_ipv4.src,
592 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
593 sizeof(key->enc_ipv4.src));
594 fl_set_key_val(tb, &key->enc_ipv4.dst,
595 TCA_FLOWER_KEY_ENC_IPV4_DST,
596 &mask->enc_ipv4.dst,
597 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
598 sizeof(key->enc_ipv4.dst));
599 }
600
601 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
602 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
603 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
604 mask->enc_control.addr_type = ~0;
605 fl_set_key_val(tb, &key->enc_ipv6.src,
606 TCA_FLOWER_KEY_ENC_IPV6_SRC,
607 &mask->enc_ipv6.src,
608 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
609 sizeof(key->enc_ipv6.src));
610 fl_set_key_val(tb, &key->enc_ipv6.dst,
611 TCA_FLOWER_KEY_ENC_IPV6_DST,
612 &mask->enc_ipv6.dst,
613 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
614 sizeof(key->enc_ipv6.dst));
615 }
616
617 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
618 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
619 sizeof(key->enc_key_id.keyid));
620
621 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
622 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
623 sizeof(key->enc_tp.src));
624
625 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
626 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
627 sizeof(key->enc_tp.dst));
628
629 if (tb[TCA_FLOWER_KEY_FLAGS])
630 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
631
632 return ret;
633 }
634
635 static bool fl_mask_eq(struct fl_flow_mask *mask1,
636 struct fl_flow_mask *mask2)
637 {
638 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
639 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
640
641 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
642 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
643 }
644
645 static const struct rhashtable_params fl_ht_params = {
646 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
647 .head_offset = offsetof(struct cls_fl_filter, ht_node),
648 .automatic_shrinking = true,
649 };
650
651 static int fl_init_hashtable(struct cls_fl_head *head,
652 struct fl_flow_mask *mask)
653 {
654 head->ht_params = fl_ht_params;
655 head->ht_params.key_len = fl_mask_range(mask);
656 head->ht_params.key_offset += mask->range.start;
657
658 return rhashtable_init(&head->ht, &head->ht_params);
659 }
660
661 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
662 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
663
664 #define FL_KEY_IS_MASKED(mask, member) \
665 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
666 0, FL_KEY_MEMBER_SIZE(member)) \
667
668 #define FL_KEY_SET(keys, cnt, id, member) \
669 do { \
670 keys[cnt].key_id = id; \
671 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
672 cnt++; \
673 } while(0);
674
675 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
676 do { \
677 if (FL_KEY_IS_MASKED(mask, member)) \
678 FL_KEY_SET(keys, cnt, id, member); \
679 } while(0);
680
681 static void fl_init_dissector(struct cls_fl_head *head,
682 struct fl_flow_mask *mask)
683 {
684 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
685 size_t cnt = 0;
686
687 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
688 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
689 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
690 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
691 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
692 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
693 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
694 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
695 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
696 FLOW_DISSECTOR_KEY_PORTS, tp);
697 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
698 FLOW_DISSECTOR_KEY_ICMP, icmp);
699 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
700 FLOW_DISSECTOR_KEY_VLAN, vlan);
701 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
702 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
703 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
704 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
705 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
706 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
707 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
708 FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
709 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
710 enc_control);
711 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
712 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
713
714 skb_flow_dissector_init(&head->dissector, keys, cnt);
715 }
716
717 static int fl_check_assign_mask(struct cls_fl_head *head,
718 struct fl_flow_mask *mask)
719 {
720 int err;
721
722 if (head->mask_assigned) {
723 if (!fl_mask_eq(&head->mask, mask))
724 return -EINVAL;
725 else
726 return 0;
727 }
728
729 /* Mask is not assigned yet. So assign it and init hashtable
730 * according to that.
731 */
732 err = fl_init_hashtable(head, mask);
733 if (err)
734 return err;
735 memcpy(&head->mask, mask, sizeof(head->mask));
736 head->mask_assigned = true;
737
738 fl_init_dissector(head, mask);
739
740 return 0;
741 }
742
743 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
744 struct cls_fl_filter *f, struct fl_flow_mask *mask,
745 unsigned long base, struct nlattr **tb,
746 struct nlattr *est, bool ovr)
747 {
748 struct tcf_exts e;
749 int err;
750
751 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
752 if (err < 0)
753 return err;
754 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
755 if (err < 0)
756 goto errout;
757
758 if (tb[TCA_FLOWER_CLASSID]) {
759 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
760 tcf_bind_filter(tp, &f->res, base);
761 }
762
763 err = fl_set_key(net, tb, &f->key, &mask->key);
764 if (err)
765 goto errout;
766
767 fl_mask_update_range(mask);
768 fl_set_masked_key(&f->mkey, &f->key, mask);
769
770 tcf_exts_change(tp, &f->exts, &e);
771
772 return 0;
773 errout:
774 tcf_exts_destroy(&e);
775 return err;
776 }
777
778 static u32 fl_grab_new_handle(struct tcf_proto *tp,
779 struct cls_fl_head *head)
780 {
781 unsigned int i = 0x80000000;
782 u32 handle;
783
784 do {
785 if (++head->hgen == 0x7FFFFFFF)
786 head->hgen = 1;
787 } while (--i > 0 && fl_get(tp, head->hgen));
788
789 if (unlikely(i == 0)) {
790 pr_err("Insufficient number of handles\n");
791 handle = 0;
792 } else {
793 handle = head->hgen;
794 }
795
796 return handle;
797 }
798
799 static int fl_change(struct net *net, struct sk_buff *in_skb,
800 struct tcf_proto *tp, unsigned long base,
801 u32 handle, struct nlattr **tca,
802 unsigned long *arg, bool ovr)
803 {
804 struct cls_fl_head *head = rtnl_dereference(tp->root);
805 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
806 struct cls_fl_filter *fnew;
807 struct nlattr *tb[TCA_FLOWER_MAX + 1];
808 struct fl_flow_mask mask = {};
809 int err;
810
811 if (!tca[TCA_OPTIONS])
812 return -EINVAL;
813
814 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
815 if (err < 0)
816 return err;
817
818 if (fold && handle && fold->handle != handle)
819 return -EINVAL;
820
821 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
822 if (!fnew)
823 return -ENOBUFS;
824
825 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
826 if (err < 0)
827 goto errout;
828
829 if (!handle) {
830 handle = fl_grab_new_handle(tp, head);
831 if (!handle) {
832 err = -EINVAL;
833 goto errout;
834 }
835 }
836 fnew->handle = handle;
837
838 if (tb[TCA_FLOWER_FLAGS]) {
839 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
840
841 if (!tc_flags_valid(fnew->flags)) {
842 err = -EINVAL;
843 goto errout;
844 }
845 }
846
847 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
848 if (err)
849 goto errout;
850
851 err = fl_check_assign_mask(head, &mask);
852 if (err)
853 goto errout;
854
855 if (!tc_skip_sw(fnew->flags)) {
856 if (!fold && fl_lookup(head, &fnew->mkey)) {
857 err = -EEXIST;
858 goto errout;
859 }
860
861 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
862 head->ht_params);
863 if (err)
864 goto errout;
865 }
866
867 if (!tc_skip_hw(fnew->flags)) {
868 err = fl_hw_replace_filter(tp,
869 &head->dissector,
870 &mask.key,
871 fnew);
872 if (err)
873 goto errout;
874 }
875
876 if (!tc_in_hw(fnew->flags))
877 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
878
879 if (fold) {
880 if (!tc_skip_sw(fold->flags))
881 rhashtable_remove_fast(&head->ht, &fold->ht_node,
882 head->ht_params);
883 if (!tc_skip_hw(fold->flags))
884 fl_hw_destroy_filter(tp, fold);
885 }
886
887 *arg = (unsigned long) fnew;
888
889 if (fold) {
890 list_replace_rcu(&fold->list, &fnew->list);
891 tcf_unbind_filter(tp, &fold->res);
892 call_rcu(&fold->rcu, fl_destroy_filter);
893 } else {
894 list_add_tail_rcu(&fnew->list, &head->filters);
895 }
896
897 return 0;
898
899 errout:
900 tcf_exts_destroy(&fnew->exts);
901 kfree(fnew);
902 return err;
903 }
904
905 static int fl_delete(struct tcf_proto *tp, unsigned long arg)
906 {
907 struct cls_fl_head *head = rtnl_dereference(tp->root);
908 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
909
910 if (!tc_skip_sw(f->flags))
911 rhashtable_remove_fast(&head->ht, &f->ht_node,
912 head->ht_params);
913 __fl_delete(tp, f);
914 return 0;
915 }
916
917 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
918 {
919 struct cls_fl_head *head = rtnl_dereference(tp->root);
920 struct cls_fl_filter *f;
921
922 list_for_each_entry_rcu(f, &head->filters, list) {
923 if (arg->count < arg->skip)
924 goto skip;
925 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
926 arg->stop = 1;
927 break;
928 }
929 skip:
930 arg->count++;
931 }
932 }
933
934 static int fl_dump_key_val(struct sk_buff *skb,
935 void *val, int val_type,
936 void *mask, int mask_type, int len)
937 {
938 int err;
939
940 if (!memchr_inv(mask, 0, len))
941 return 0;
942 err = nla_put(skb, val_type, len, val);
943 if (err)
944 return err;
945 if (mask_type != TCA_FLOWER_UNSPEC) {
946 err = nla_put(skb, mask_type, len, mask);
947 if (err)
948 return err;
949 }
950 return 0;
951 }
952
953 static int fl_dump_key_vlan(struct sk_buff *skb,
954 struct flow_dissector_key_vlan *vlan_key,
955 struct flow_dissector_key_vlan *vlan_mask)
956 {
957 int err;
958
959 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
960 return 0;
961 if (vlan_mask->vlan_id) {
962 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
963 vlan_key->vlan_id);
964 if (err)
965 return err;
966 }
967 if (vlan_mask->vlan_priority) {
968 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
969 vlan_key->vlan_priority);
970 if (err)
971 return err;
972 }
973 return 0;
974 }
975
976 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
977 u32 *flower_key, u32 *flower_mask,
978 u32 flower_flag_bit, u32 dissector_flag_bit)
979 {
980 if (dissector_mask & dissector_flag_bit) {
981 *flower_mask |= flower_flag_bit;
982 if (dissector_key & dissector_flag_bit)
983 *flower_key |= flower_flag_bit;
984 }
985 }
986
987 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
988 {
989 u32 key, mask;
990 __be32 _key, _mask;
991 int err;
992
993 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
994 return 0;
995
996 key = 0;
997 mask = 0;
998
999 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1000 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1001
1002 _key = cpu_to_be32(key);
1003 _mask = cpu_to_be32(mask);
1004
1005 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
1006 if (err)
1007 return err;
1008
1009 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
1010 }
1011
1012 static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
1013 struct sk_buff *skb, struct tcmsg *t)
1014 {
1015 struct cls_fl_head *head = rtnl_dereference(tp->root);
1016 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
1017 struct nlattr *nest;
1018 struct fl_flow_key *key, *mask;
1019
1020 if (!f)
1021 return skb->len;
1022
1023 t->tcm_handle = f->handle;
1024
1025 nest = nla_nest_start(skb, TCA_OPTIONS);
1026 if (!nest)
1027 goto nla_put_failure;
1028
1029 if (f->res.classid &&
1030 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1031 goto nla_put_failure;
1032
1033 key = &f->key;
1034 mask = &head->mask.key;
1035
1036 if (mask->indev_ifindex) {
1037 struct net_device *dev;
1038
1039 dev = __dev_get_by_index(net, key->indev_ifindex);
1040 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1041 goto nla_put_failure;
1042 }
1043
1044 if (!tc_skip_hw(f->flags))
1045 fl_hw_update_stats(tp, f);
1046
1047 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1048 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1049 sizeof(key->eth.dst)) ||
1050 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1051 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1052 sizeof(key->eth.src)) ||
1053 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1054 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1055 sizeof(key->basic.n_proto)))
1056 goto nla_put_failure;
1057
1058 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
1059 goto nla_put_failure;
1060
1061 if ((key->basic.n_proto == htons(ETH_P_IP) ||
1062 key->basic.n_proto == htons(ETH_P_IPV6)) &&
1063 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1064 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1065 sizeof(key->basic.ip_proto)))
1066 goto nla_put_failure;
1067
1068 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1069 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1070 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1071 sizeof(key->ipv4.src)) ||
1072 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1073 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1074 sizeof(key->ipv4.dst))))
1075 goto nla_put_failure;
1076 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1077 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1078 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1079 sizeof(key->ipv6.src)) ||
1080 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1081 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1082 sizeof(key->ipv6.dst))))
1083 goto nla_put_failure;
1084
1085 if (key->basic.ip_proto == IPPROTO_TCP &&
1086 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1087 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1088 sizeof(key->tp.src)) ||
1089 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1090 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1091 sizeof(key->tp.dst))))
1092 goto nla_put_failure;
1093 else if (key->basic.ip_proto == IPPROTO_UDP &&
1094 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1095 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1096 sizeof(key->tp.src)) ||
1097 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1098 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1099 sizeof(key->tp.dst))))
1100 goto nla_put_failure;
1101 else if (key->basic.ip_proto == IPPROTO_SCTP &&
1102 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1103 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1104 sizeof(key->tp.src)) ||
1105 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1106 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1107 sizeof(key->tp.dst))))
1108 goto nla_put_failure;
1109 else if (key->basic.n_proto == htons(ETH_P_IP) &&
1110 key->basic.ip_proto == IPPROTO_ICMP &&
1111 (fl_dump_key_val(skb, &key->icmp.type,
1112 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1113 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1114 sizeof(key->icmp.type)) ||
1115 fl_dump_key_val(skb, &key->icmp.code,
1116 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1117 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1118 sizeof(key->icmp.code))))
1119 goto nla_put_failure;
1120 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1121 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1122 (fl_dump_key_val(skb, &key->icmp.type,
1123 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1124 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1125 sizeof(key->icmp.type)) ||
1126 fl_dump_key_val(skb, &key->icmp.code,
1127 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1128 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1129 sizeof(key->icmp.code))))
1130 goto nla_put_failure;
1131
1132 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1133 (fl_dump_key_val(skb, &key->enc_ipv4.src,
1134 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1135 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1136 sizeof(key->enc_ipv4.src)) ||
1137 fl_dump_key_val(skb, &key->enc_ipv4.dst,
1138 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1139 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1140 sizeof(key->enc_ipv4.dst))))
1141 goto nla_put_failure;
1142 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1143 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1144 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1145 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1146 sizeof(key->enc_ipv6.src)) ||
1147 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1148 TCA_FLOWER_KEY_ENC_IPV6_DST,
1149 &mask->enc_ipv6.dst,
1150 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1151 sizeof(key->enc_ipv6.dst))))
1152 goto nla_put_failure;
1153
1154 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
1155 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
1156 sizeof(key->enc_key_id)) ||
1157 fl_dump_key_val(skb, &key->enc_tp.src,
1158 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1159 &mask->enc_tp.src,
1160 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1161 sizeof(key->enc_tp.src)) ||
1162 fl_dump_key_val(skb, &key->enc_tp.dst,
1163 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1164 &mask->enc_tp.dst,
1165 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1166 sizeof(key->enc_tp.dst)))
1167 goto nla_put_failure;
1168
1169 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1170 goto nla_put_failure;
1171
1172 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
1173 goto nla_put_failure;
1174
1175 if (tcf_exts_dump(skb, &f->exts))
1176 goto nla_put_failure;
1177
1178 nla_nest_end(skb, nest);
1179
1180 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1181 goto nla_put_failure;
1182
1183 return skb->len;
1184
1185 nla_put_failure:
1186 nla_nest_cancel(skb, nest);
1187 return -1;
1188 }
1189
1190 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1191 .kind = "flower",
1192 .classify = fl_classify,
1193 .init = fl_init,
1194 .destroy = fl_destroy,
1195 .get = fl_get,
1196 .change = fl_change,
1197 .delete = fl_delete,
1198 .walk = fl_walk,
1199 .dump = fl_dump,
1200 .owner = THIS_MODULE,
1201 };
1202
1203 static int __init cls_fl_init(void)
1204 {
1205 return register_tcf_proto_ops(&cls_fl_ops);
1206 }
1207
1208 static void __exit cls_fl_exit(void)
1209 {
1210 unregister_tcf_proto_ops(&cls_fl_ops);
1211 }
1212
1213 module_init(cls_fl_init);
1214 module_exit(cls_fl_exit);
1215
1216 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1217 MODULE_DESCRIPTION("Flower classifier");
1218 MODULE_LICENSE("GPL v2");