]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/sched/cls_flower.c
net/sched: cls_flower: Properly handle classifier flags dumping
[mirror_ubuntu-zesty-kernel.git] / net / sched / cls_flower.c
CommitLineData
77b9900e
JP
1/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
d9363774 16#include <linux/workqueue.h>
77b9900e
JP
17
18#include <linux/if_ether.h>
19#include <linux/in6.h>
20#include <linux/ip.h>
21
22#include <net/sch_generic.h>
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/flow_dissector.h>
26
bc3103f1
AV
27#include <net/dst.h>
28#include <net/dst_metadata.h>
29
77b9900e
JP
30struct fl_flow_key {
31 int indev_ifindex;
42aecaa9 32 struct flow_dissector_key_control control;
bc3103f1 33 struct flow_dissector_key_control enc_control;
77b9900e
JP
34 struct flow_dissector_key_basic basic;
35 struct flow_dissector_key_eth_addrs eth;
9399ae9a 36 struct flow_dissector_key_vlan vlan;
77b9900e 37 union {
c3f83241 38 struct flow_dissector_key_ipv4_addrs ipv4;
77b9900e
JP
39 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
7b684884 42 struct flow_dissector_key_icmp icmp;
bc3103f1
AV
43 struct flow_dissector_key_keyid enc_key_id;
44 union {
45 struct flow_dissector_key_ipv4_addrs enc_ipv4;
46 struct flow_dissector_key_ipv6_addrs enc_ipv6;
47 };
f4d997fd 48 struct flow_dissector_key_ports enc_tp;
77b9900e
JP
49} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
50
51struct fl_flow_mask_range {
52 unsigned short int start;
53 unsigned short int end;
54};
55
56struct fl_flow_mask {
57 struct fl_flow_key key;
58 struct fl_flow_mask_range range;
59 struct rcu_head rcu;
60};
61
62struct cls_fl_head {
63 struct rhashtable ht;
64 struct fl_flow_mask mask;
65 struct flow_dissector dissector;
66 u32 hgen;
67 bool mask_assigned;
68 struct list_head filters;
69 struct rhashtable_params ht_params;
d9363774
DB
70 union {
71 struct work_struct work;
72 struct rcu_head rcu;
73 };
77b9900e
JP
74};
75
76struct cls_fl_filter {
77 struct rhash_head ht_node;
78 struct fl_flow_key mkey;
79 struct tcf_exts exts;
80 struct tcf_result res;
81 struct fl_flow_key key;
82 struct list_head list;
83 u32 handle;
e69985c6 84 u32 flags;
77b9900e 85 struct rcu_head rcu;
7091d8c7
HHZ
86 struct tc_to_netdev tc;
87 struct net_device *hw_dev;
77b9900e
JP
88};
89
90static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
91{
92 return mask->range.end - mask->range.start;
93}
94
95static void fl_mask_update_range(struct fl_flow_mask *mask)
96{
97 const u8 *bytes = (const u8 *) &mask->key;
98 size_t size = sizeof(mask->key);
99 size_t i, first = 0, last = size - 1;
100
101 for (i = 0; i < sizeof(mask->key); i++) {
102 if (bytes[i]) {
103 if (!first && i)
104 first = i;
105 last = i;
106 }
107 }
108 mask->range.start = rounddown(first, sizeof(long));
109 mask->range.end = roundup(last + 1, sizeof(long));
110}
111
112static void *fl_key_get_start(struct fl_flow_key *key,
113 const struct fl_flow_mask *mask)
114{
115 return (u8 *) key + mask->range.start;
116}
117
118static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
119 struct fl_flow_mask *mask)
120{
121 const long *lkey = fl_key_get_start(key, mask);
122 const long *lmask = fl_key_get_start(&mask->key, mask);
123 long *lmkey = fl_key_get_start(mkey, mask);
124 int i;
125
126 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
127 *lmkey++ = *lkey++ & *lmask++;
128}
129
130static void fl_clear_masked_range(struct fl_flow_key *key,
131 struct fl_flow_mask *mask)
132{
133 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
134}
135
02444f9a
PB
136static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
137 struct fl_flow_key *mkey)
138{
139 return rhashtable_lookup_fast(&head->ht,
140 fl_key_get_start(mkey, &head->mask),
141 head->ht_params);
142}
143
77b9900e
JP
144static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
145 struct tcf_result *res)
146{
147 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
148 struct cls_fl_filter *f;
149 struct fl_flow_key skb_key;
150 struct fl_flow_key skb_mkey;
bc3103f1 151 struct ip_tunnel_info *info;
77b9900e 152
e69985c6
AV
153 if (!atomic_read(&head->ht.nelems))
154 return -1;
155
77b9900e 156 fl_clear_masked_range(&skb_key, &head->mask);
bc3103f1
AV
157
158 info = skb_tunnel_info(skb);
159 if (info) {
160 struct ip_tunnel_key *key = &info->key;
161
162 switch (ip_tunnel_info_af(info)) {
163 case AF_INET:
0df0f207
PB
164 skb_key.enc_control.addr_type =
165 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
bc3103f1
AV
166 skb_key.enc_ipv4.src = key->u.ipv4.src;
167 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
168 break;
169 case AF_INET6:
0df0f207
PB
170 skb_key.enc_control.addr_type =
171 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
bc3103f1
AV
172 skb_key.enc_ipv6.src = key->u.ipv6.src;
173 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
174 break;
175 }
176
177 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
f4d997fd
HHZ
178 skb_key.enc_tp.src = key->tp_src;
179 skb_key.enc_tp.dst = key->tp_dst;
bc3103f1
AV
180 }
181
77b9900e
JP
182 skb_key.indev_ifindex = skb->skb_iif;
183 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
184 * so do it rather here.
185 */
186 skb_key.basic.n_proto = skb->protocol;
cd79a238 187 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
77b9900e
JP
188
189 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
190
02444f9a 191 f = fl_lookup(head, &skb_mkey);
e8eb36cd 192 if (f && !tc_skip_sw(f->flags)) {
77b9900e
JP
193 *res = f->res;
194 return tcf_exts_exec(skb, &f->exts, res);
195 }
196 return -1;
197}
198
199static int fl_init(struct tcf_proto *tp)
200{
201 struct cls_fl_head *head;
202
203 head = kzalloc(sizeof(*head), GFP_KERNEL);
204 if (!head)
205 return -ENOBUFS;
206
207 INIT_LIST_HEAD_RCU(&head->filters);
208 rcu_assign_pointer(tp->root, head);
209
210 return 0;
211}
212
213static void fl_destroy_filter(struct rcu_head *head)
214{
215 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
216
217 tcf_exts_destroy(&f->exts);
218 kfree(f);
219}
220
3036dab6 221static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
5b33f488 222{
5b33f488 223 struct tc_cls_flower_offload offload = {0};
7091d8c7
HHZ
224 struct net_device *dev = f->hw_dev;
225 struct tc_to_netdev *tc = &f->tc;
5b33f488 226
79685219 227 if (!tc_can_offload(dev, tp))
5b33f488
AV
228 return;
229
230 offload.command = TC_CLSFLOWER_DESTROY;
3036dab6 231 offload.cookie = (unsigned long)f;
5b33f488 232
7091d8c7
HHZ
233 tc->type = TC_SETUP_CLSFLOWER;
234 tc->cls_flower = &offload;
5b33f488 235
7091d8c7 236 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
5b33f488
AV
237}
238
e8eb36cd
AV
239static int fl_hw_replace_filter(struct tcf_proto *tp,
240 struct flow_dissector *dissector,
241 struct fl_flow_key *mask,
3036dab6 242 struct cls_fl_filter *f)
5b33f488
AV
243{
244 struct net_device *dev = tp->q->dev_queue->dev;
245 struct tc_cls_flower_offload offload = {0};
7091d8c7 246 struct tc_to_netdev *tc = &f->tc;
e8eb36cd 247 int err;
5b33f488 248
7091d8c7 249 if (!tc_can_offload(dev, tp)) {
a6e16931
HHZ
250 if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
251 (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
252 f->hw_dev = dev;
7091d8c7 253 return tc_skip_sw(f->flags) ? -EINVAL : 0;
a6e16931 254 }
7091d8c7
HHZ
255 dev = f->hw_dev;
256 tc->egress_dev = true;
257 } else {
258 f->hw_dev = dev;
259 }
5b33f488
AV
260
261 offload.command = TC_CLSFLOWER_REPLACE;
3036dab6 262 offload.cookie = (unsigned long)f;
5b33f488
AV
263 offload.dissector = dissector;
264 offload.mask = mask;
f93bd17b 265 offload.key = &f->mkey;
3036dab6 266 offload.exts = &f->exts;
5b33f488 267
7091d8c7
HHZ
268 tc->type = TC_SETUP_CLSFLOWER;
269 tc->cls_flower = &offload;
5b33f488 270
5a7a5555 271 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
7091d8c7 272 tc);
e8eb36cd 273
3036dab6 274 if (tc_skip_sw(f->flags))
e8eb36cd 275 return err;
e8eb36cd 276 return 0;
5b33f488
AV
277}
278
10cbc684
AV
279static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
280{
10cbc684 281 struct tc_cls_flower_offload offload = {0};
7091d8c7
HHZ
282 struct net_device *dev = f->hw_dev;
283 struct tc_to_netdev *tc = &f->tc;
10cbc684 284
79685219 285 if (!tc_can_offload(dev, tp))
10cbc684
AV
286 return;
287
288 offload.command = TC_CLSFLOWER_STATS;
289 offload.cookie = (unsigned long)f;
290 offload.exts = &f->exts;
291
7091d8c7
HHZ
292 tc->type = TC_SETUP_CLSFLOWER;
293 tc->cls_flower = &offload;
10cbc684 294
7091d8c7 295 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
10cbc684
AV
296}
297
13fa876e
RD
298static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
299{
300 list_del_rcu(&f->list);
79685219 301 if (!tc_skip_hw(f->flags))
3036dab6 302 fl_hw_destroy_filter(tp, f);
13fa876e
RD
303 tcf_unbind_filter(tp, &f->res);
304 call_rcu(&f->rcu, fl_destroy_filter);
305}
306
d9363774
DB
307static void fl_destroy_sleepable(struct work_struct *work)
308{
309 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
310 work);
311 if (head->mask_assigned)
312 rhashtable_destroy(&head->ht);
313 kfree(head);
314 module_put(THIS_MODULE);
315}
316
317static void fl_destroy_rcu(struct rcu_head *rcu)
318{
319 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
320
321 INIT_WORK(&head->work, fl_destroy_sleepable);
322 schedule_work(&head->work);
323}
324
77b9900e
JP
325static bool fl_destroy(struct tcf_proto *tp, bool force)
326{
327 struct cls_fl_head *head = rtnl_dereference(tp->root);
328 struct cls_fl_filter *f, *next;
329
330 if (!force && !list_empty(&head->filters))
331 return false;
332
13fa876e
RD
333 list_for_each_entry_safe(f, next, &head->filters, list)
334 __fl_delete(tp, f);
d9363774
DB
335
336 __module_get(THIS_MODULE);
337 call_rcu(&head->rcu, fl_destroy_rcu);
2745529a 338
77b9900e
JP
339 return true;
340}
341
342static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
343{
344 struct cls_fl_head *head = rtnl_dereference(tp->root);
345 struct cls_fl_filter *f;
346
347 list_for_each_entry(f, &head->filters, list)
348 if (f->handle == handle)
349 return (unsigned long) f;
350 return 0;
351}
352
353static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
354 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
355 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
356 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
357 .len = IFNAMSIZ },
358 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
359 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
360 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
361 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
362 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
363 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
364 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
365 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
366 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
367 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
368 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
369 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
370 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
371 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
372 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
373 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
b175c3a4
JHS
374 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
375 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
9399ae9a
HHZ
376 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
377 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
378 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
bc3103f1
AV
379 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
380 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
381 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
382 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
383 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
384 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
385 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
386 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
387 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
aa72d708
OG
388 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
389 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
390 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
391 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
5976c5f4
SH
392 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
393 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
394 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
395 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
f4d997fd
HHZ
396 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
397 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
398 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
399 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
faa3ffce
OG
400 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
401 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
7b684884
SH
402 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
403 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
404 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
405 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
406 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
407 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
408 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
409 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
77b9900e
JP
410};
411
412static void fl_set_key_val(struct nlattr **tb,
413 void *val, int val_type,
414 void *mask, int mask_type, int len)
415{
416 if (!tb[val_type])
417 return;
418 memcpy(val, nla_data(tb[val_type]), len);
419 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
420 memset(mask, 0xff, len);
421 else
422 memcpy(mask, nla_data(tb[mask_type]), len);
423}
424
9399ae9a
HHZ
425static void fl_set_key_vlan(struct nlattr **tb,
426 struct flow_dissector_key_vlan *key_val,
427 struct flow_dissector_key_vlan *key_mask)
428{
429#define VLAN_PRIORITY_MASK 0x7
430
431 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
432 key_val->vlan_id =
433 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
434 key_mask->vlan_id = VLAN_VID_MASK;
435 }
436 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
437 key_val->vlan_priority =
438 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
439 VLAN_PRIORITY_MASK;
440 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
441 }
442}
443
faa3ffce
OG
444static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
445 u32 *dissector_key, u32 *dissector_mask,
446 u32 flower_flag_bit, u32 dissector_flag_bit)
447{
448 if (flower_mask & flower_flag_bit) {
449 *dissector_mask |= dissector_flag_bit;
450 if (flower_key & flower_flag_bit)
451 *dissector_key |= dissector_flag_bit;
452 }
453}
454
d9724772
OG
455static int fl_set_key_flags(struct nlattr **tb,
456 u32 *flags_key, u32 *flags_mask)
faa3ffce
OG
457{
458 u32 key, mask;
459
d9724772
OG
460 /* mask is mandatory for flags */
461 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
462 return -EINVAL;
faa3ffce
OG
463
464 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
d9724772 465 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
faa3ffce
OG
466
467 *flags_key = 0;
468 *flags_mask = 0;
469
470 fl_set_key_flag(key, mask, flags_key, flags_mask,
471 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
d9724772
OG
472
473 return 0;
faa3ffce
OG
474}
475
77b9900e
JP
476static int fl_set_key(struct net *net, struct nlattr **tb,
477 struct fl_flow_key *key, struct fl_flow_key *mask)
478{
9399ae9a 479 __be16 ethertype;
d9724772 480 int ret = 0;
dd3aa3b5 481#ifdef CONFIG_NET_CLS_IND
77b9900e 482 if (tb[TCA_FLOWER_INDEV]) {
dd3aa3b5 483 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
77b9900e
JP
484 if (err < 0)
485 return err;
486 key->indev_ifindex = err;
487 mask->indev_ifindex = 0xffffffff;
488 }
dd3aa3b5 489#endif
77b9900e
JP
490
491 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
492 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
493 sizeof(key->eth.dst));
494 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
495 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
496 sizeof(key->eth.src));
66530bdf 497
0b498a52 498 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
9399ae9a
HHZ
499 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
500
0b498a52
AB
501 if (ethertype == htons(ETH_P_8021Q)) {
502 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
503 fl_set_key_val(tb, &key->basic.n_proto,
504 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
505 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
506 sizeof(key->basic.n_proto));
507 } else {
508 key->basic.n_proto = ethertype;
509 mask->basic.n_proto = cpu_to_be16(~0);
510 }
9399ae9a 511 }
66530bdf 512
77b9900e
JP
513 if (key->basic.n_proto == htons(ETH_P_IP) ||
514 key->basic.n_proto == htons(ETH_P_IPV6)) {
515 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
516 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
517 sizeof(key->basic.ip_proto));
518 }
66530bdf
JHS
519
520 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
521 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
970bfcd0 522 mask->control.addr_type = ~0;
77b9900e
JP
523 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
524 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
525 sizeof(key->ipv4.src));
526 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
527 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
528 sizeof(key->ipv4.dst));
66530bdf
JHS
529 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
530 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
970bfcd0 531 mask->control.addr_type = ~0;
77b9900e
JP
532 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
533 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
534 sizeof(key->ipv6.src));
535 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
536 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
537 sizeof(key->ipv6.dst));
538 }
66530bdf 539
77b9900e
JP
540 if (key->basic.ip_proto == IPPROTO_TCP) {
541 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
aa72d708 542 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
77b9900e
JP
543 sizeof(key->tp.src));
544 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
aa72d708 545 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
77b9900e
JP
546 sizeof(key->tp.dst));
547 } else if (key->basic.ip_proto == IPPROTO_UDP) {
548 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
aa72d708 549 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
77b9900e
JP
550 sizeof(key->tp.src));
551 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
aa72d708 552 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
77b9900e 553 sizeof(key->tp.dst));
5976c5f4
SH
554 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
555 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
556 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
557 sizeof(key->tp.src));
558 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
559 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
560 sizeof(key->tp.dst));
7b684884
SH
561 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
562 key->basic.ip_proto == IPPROTO_ICMP) {
563 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
564 &mask->icmp.type,
565 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
566 sizeof(key->icmp.type));
567 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
568 &mask->icmp.code,
569 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
570 sizeof(key->icmp.code));
571 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
572 key->basic.ip_proto == IPPROTO_ICMPV6) {
573 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
574 &mask->icmp.type,
575 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
576 sizeof(key->icmp.type));
040587af 577 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
7b684884 578 &mask->icmp.code,
040587af 579 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
7b684884 580 sizeof(key->icmp.code));
77b9900e
JP
581 }
582
bc3103f1
AV
583 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
584 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
585 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
970bfcd0 586 mask->enc_control.addr_type = ~0;
bc3103f1
AV
587 fl_set_key_val(tb, &key->enc_ipv4.src,
588 TCA_FLOWER_KEY_ENC_IPV4_SRC,
589 &mask->enc_ipv4.src,
590 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
591 sizeof(key->enc_ipv4.src));
592 fl_set_key_val(tb, &key->enc_ipv4.dst,
593 TCA_FLOWER_KEY_ENC_IPV4_DST,
594 &mask->enc_ipv4.dst,
595 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
596 sizeof(key->enc_ipv4.dst));
597 }
598
599 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
600 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
601 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
970bfcd0 602 mask->enc_control.addr_type = ~0;
bc3103f1
AV
603 fl_set_key_val(tb, &key->enc_ipv6.src,
604 TCA_FLOWER_KEY_ENC_IPV6_SRC,
605 &mask->enc_ipv6.src,
606 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
607 sizeof(key->enc_ipv6.src));
608 fl_set_key_val(tb, &key->enc_ipv6.dst,
609 TCA_FLOWER_KEY_ENC_IPV6_DST,
610 &mask->enc_ipv6.dst,
611 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
612 sizeof(key->enc_ipv6.dst));
613 }
614
615 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
eb523f42 616 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
bc3103f1
AV
617 sizeof(key->enc_key_id.keyid));
618
f4d997fd
HHZ
619 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
620 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
621 sizeof(key->enc_tp.src));
622
623 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
624 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
625 sizeof(key->enc_tp.dst));
626
d9724772
OG
627 if (tb[TCA_FLOWER_KEY_FLAGS])
628 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
faa3ffce 629
d9724772 630 return ret;
77b9900e
JP
631}
632
633static bool fl_mask_eq(struct fl_flow_mask *mask1,
634 struct fl_flow_mask *mask2)
635{
636 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
637 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
638
639 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
640 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
641}
642
643static const struct rhashtable_params fl_ht_params = {
644 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
645 .head_offset = offsetof(struct cls_fl_filter, ht_node),
646 .automatic_shrinking = true,
647};
648
649static int fl_init_hashtable(struct cls_fl_head *head,
650 struct fl_flow_mask *mask)
651{
652 head->ht_params = fl_ht_params;
653 head->ht_params.key_len = fl_mask_range(mask);
654 head->ht_params.key_offset += mask->range.start;
655
656 return rhashtable_init(&head->ht, &head->ht_params);
657}
658
659#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
660#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
77b9900e 661
339ba878
HHZ
662#define FL_KEY_IS_MASKED(mask, member) \
663 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
664 0, FL_KEY_MEMBER_SIZE(member)) \
77b9900e
JP
665
666#define FL_KEY_SET(keys, cnt, id, member) \
667 do { \
668 keys[cnt].key_id = id; \
669 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
670 cnt++; \
671 } while(0);
672
339ba878 673#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
77b9900e 674 do { \
339ba878 675 if (FL_KEY_IS_MASKED(mask, member)) \
77b9900e
JP
676 FL_KEY_SET(keys, cnt, id, member); \
677 } while(0);
678
679static void fl_init_dissector(struct cls_fl_head *head,
680 struct fl_flow_mask *mask)
681{
682 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
683 size_t cnt = 0;
684
42aecaa9 685 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
77b9900e 686 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
339ba878
HHZ
687 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
688 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
689 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
690 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
691 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
692 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
693 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
694 FLOW_DISSECTOR_KEY_PORTS, tp);
7b684884
SH
695 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
696 FLOW_DISSECTOR_KEY_ICMP, icmp);
9399ae9a
HHZ
697 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
698 FLOW_DISSECTOR_KEY_VLAN, vlan);
519d1052
HHZ
699 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
700 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
701 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
702 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
703 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
704 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
705 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
706 FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
707 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
708 enc_control);
f4d997fd
HHZ
709 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
710 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
77b9900e
JP
711
712 skb_flow_dissector_init(&head->dissector, keys, cnt);
713}
714
715static int fl_check_assign_mask(struct cls_fl_head *head,
716 struct fl_flow_mask *mask)
717{
718 int err;
719
720 if (head->mask_assigned) {
721 if (!fl_mask_eq(&head->mask, mask))
722 return -EINVAL;
723 else
724 return 0;
725 }
726
727 /* Mask is not assigned yet. So assign it and init hashtable
728 * according to that.
729 */
730 err = fl_init_hashtable(head, mask);
731 if (err)
732 return err;
733 memcpy(&head->mask, mask, sizeof(head->mask));
734 head->mask_assigned = true;
735
736 fl_init_dissector(head, mask);
737
738 return 0;
739}
740
741static int fl_set_parms(struct net *net, struct tcf_proto *tp,
742 struct cls_fl_filter *f, struct fl_flow_mask *mask,
743 unsigned long base, struct nlattr **tb,
744 struct nlattr *est, bool ovr)
745{
746 struct tcf_exts e;
747 int err;
748
b9a24bb7 749 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
77b9900e
JP
750 if (err < 0)
751 return err;
b9a24bb7
WC
752 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
753 if (err < 0)
754 goto errout;
77b9900e
JP
755
756 if (tb[TCA_FLOWER_CLASSID]) {
757 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
758 tcf_bind_filter(tp, &f->res, base);
759 }
760
761 err = fl_set_key(net, tb, &f->key, &mask->key);
762 if (err)
763 goto errout;
764
765 fl_mask_update_range(mask);
766 fl_set_masked_key(&f->mkey, &f->key, mask);
767
768 tcf_exts_change(tp, &f->exts, &e);
769
770 return 0;
771errout:
772 tcf_exts_destroy(&e);
773 return err;
774}
775
776static u32 fl_grab_new_handle(struct tcf_proto *tp,
777 struct cls_fl_head *head)
778{
779 unsigned int i = 0x80000000;
780 u32 handle;
781
782 do {
783 if (++head->hgen == 0x7FFFFFFF)
784 head->hgen = 1;
785 } while (--i > 0 && fl_get(tp, head->hgen));
786
787 if (unlikely(i == 0)) {
788 pr_err("Insufficient number of handles\n");
789 handle = 0;
790 } else {
791 handle = head->hgen;
792 }
793
794 return handle;
795}
796
797static int fl_change(struct net *net, struct sk_buff *in_skb,
798 struct tcf_proto *tp, unsigned long base,
799 u32 handle, struct nlattr **tca,
800 unsigned long *arg, bool ovr)
801{
802 struct cls_fl_head *head = rtnl_dereference(tp->root);
803 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
804 struct cls_fl_filter *fnew;
805 struct nlattr *tb[TCA_FLOWER_MAX + 1];
806 struct fl_flow_mask mask = {};
807 int err;
808
809 if (!tca[TCA_OPTIONS])
810 return -EINVAL;
811
812 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
813 if (err < 0)
814 return err;
815
816 if (fold && handle && fold->handle != handle)
817 return -EINVAL;
818
819 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
820 if (!fnew)
821 return -ENOBUFS;
822
b9a24bb7
WC
823 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
824 if (err < 0)
825 goto errout;
77b9900e
JP
826
827 if (!handle) {
828 handle = fl_grab_new_handle(tp, head);
829 if (!handle) {
830 err = -EINVAL;
831 goto errout;
832 }
833 }
834 fnew->handle = handle;
835
e69985c6
AV
836 if (tb[TCA_FLOWER_FLAGS]) {
837 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
838
839 if (!tc_flags_valid(fnew->flags)) {
840 err = -EINVAL;
841 goto errout;
842 }
843 }
5b33f488 844
77b9900e
JP
845 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
846 if (err)
847 goto errout;
848
849 err = fl_check_assign_mask(head, &mask);
850 if (err)
851 goto errout;
852
e8eb36cd 853 if (!tc_skip_sw(fnew->flags)) {
02444f9a
PB
854 if (!fold && fl_lookup(head, &fnew->mkey)) {
855 err = -EEXIST;
856 goto errout;
857 }
858
e69985c6
AV
859 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
860 head->ht_params);
861 if (err)
862 goto errout;
863 }
5b33f488 864
79685219
HHZ
865 if (!tc_skip_hw(fnew->flags)) {
866 err = fl_hw_replace_filter(tp,
867 &head->dissector,
868 &mask.key,
3036dab6 869 fnew);
79685219
HHZ
870 if (err)
871 goto errout;
872 }
5b33f488
AV
873
874 if (fold) {
725cbb62
JP
875 if (!tc_skip_sw(fold->flags))
876 rhashtable_remove_fast(&head->ht, &fold->ht_node,
877 head->ht_params);
79685219 878 if (!tc_skip_hw(fold->flags))
3036dab6 879 fl_hw_destroy_filter(tp, fold);
5b33f488 880 }
77b9900e
JP
881
882 *arg = (unsigned long) fnew;
883
884 if (fold) {
ff3532f2 885 list_replace_rcu(&fold->list, &fnew->list);
77b9900e
JP
886 tcf_unbind_filter(tp, &fold->res);
887 call_rcu(&fold->rcu, fl_destroy_filter);
888 } else {
889 list_add_tail_rcu(&fnew->list, &head->filters);
890 }
891
892 return 0;
893
894errout:
b9a24bb7 895 tcf_exts_destroy(&fnew->exts);
77b9900e
JP
896 kfree(fnew);
897 return err;
898}
899
900static int fl_delete(struct tcf_proto *tp, unsigned long arg)
901{
902 struct cls_fl_head *head = rtnl_dereference(tp->root);
903 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
904
725cbb62
JP
905 if (!tc_skip_sw(f->flags))
906 rhashtable_remove_fast(&head->ht, &f->ht_node,
907 head->ht_params);
13fa876e 908 __fl_delete(tp, f);
77b9900e
JP
909 return 0;
910}
911
912static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
913{
914 struct cls_fl_head *head = rtnl_dereference(tp->root);
915 struct cls_fl_filter *f;
916
917 list_for_each_entry_rcu(f, &head->filters, list) {
918 if (arg->count < arg->skip)
919 goto skip;
920 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
921 arg->stop = 1;
922 break;
923 }
924skip:
925 arg->count++;
926 }
927}
928
929static int fl_dump_key_val(struct sk_buff *skb,
930 void *val, int val_type,
931 void *mask, int mask_type, int len)
932{
933 int err;
934
935 if (!memchr_inv(mask, 0, len))
936 return 0;
937 err = nla_put(skb, val_type, len, val);
938 if (err)
939 return err;
940 if (mask_type != TCA_FLOWER_UNSPEC) {
941 err = nla_put(skb, mask_type, len, mask);
942 if (err)
943 return err;
944 }
945 return 0;
946}
947
9399ae9a
HHZ
948static int fl_dump_key_vlan(struct sk_buff *skb,
949 struct flow_dissector_key_vlan *vlan_key,
950 struct flow_dissector_key_vlan *vlan_mask)
951{
952 int err;
953
954 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
955 return 0;
956 if (vlan_mask->vlan_id) {
957 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
958 vlan_key->vlan_id);
959 if (err)
960 return err;
961 }
962 if (vlan_mask->vlan_priority) {
963 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
964 vlan_key->vlan_priority);
965 if (err)
966 return err;
967 }
968 return 0;
969}
970
faa3ffce
OG
971static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
972 u32 *flower_key, u32 *flower_mask,
973 u32 flower_flag_bit, u32 dissector_flag_bit)
974{
975 if (dissector_mask & dissector_flag_bit) {
976 *flower_mask |= flower_flag_bit;
977 if (dissector_key & dissector_flag_bit)
978 *flower_key |= flower_flag_bit;
979 }
980}
981
982static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
983{
984 u32 key, mask;
985 __be32 _key, _mask;
986 int err;
987
988 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
989 return 0;
990
991 key = 0;
992 mask = 0;
993
994 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
995 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
996
997 _key = cpu_to_be32(key);
998 _mask = cpu_to_be32(mask);
999
1000 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
1001 if (err)
1002 return err;
1003
1004 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
1005}
1006
77b9900e
JP
1007static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
1008 struct sk_buff *skb, struct tcmsg *t)
1009{
1010 struct cls_fl_head *head = rtnl_dereference(tp->root);
1011 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
1012 struct nlattr *nest;
1013 struct fl_flow_key *key, *mask;
1014
1015 if (!f)
1016 return skb->len;
1017
1018 t->tcm_handle = f->handle;
1019
1020 nest = nla_nest_start(skb, TCA_OPTIONS);
1021 if (!nest)
1022 goto nla_put_failure;
1023
1024 if (f->res.classid &&
1025 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1026 goto nla_put_failure;
1027
1028 key = &f->key;
1029 mask = &head->mask.key;
1030
1031 if (mask->indev_ifindex) {
1032 struct net_device *dev;
1033
1034 dev = __dev_get_by_index(net, key->indev_ifindex);
1035 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1036 goto nla_put_failure;
1037 }
1038
79685219
HHZ
1039 if (!tc_skip_hw(f->flags))
1040 fl_hw_update_stats(tp, f);
10cbc684 1041
77b9900e
JP
1042 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1043 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1044 sizeof(key->eth.dst)) ||
1045 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1046 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1047 sizeof(key->eth.src)) ||
1048 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1049 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1050 sizeof(key->basic.n_proto)))
1051 goto nla_put_failure;
9399ae9a
HHZ
1052
1053 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
1054 goto nla_put_failure;
1055
77b9900e
JP
1056 if ((key->basic.n_proto == htons(ETH_P_IP) ||
1057 key->basic.n_proto == htons(ETH_P_IPV6)) &&
1058 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1059 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1060 sizeof(key->basic.ip_proto)))
1061 goto nla_put_failure;
1062
c3f83241 1063 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
77b9900e
JP
1064 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1065 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1066 sizeof(key->ipv4.src)) ||
1067 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1068 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1069 sizeof(key->ipv4.dst))))
1070 goto nla_put_failure;
c3f83241 1071 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
77b9900e
JP
1072 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1073 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1074 sizeof(key->ipv6.src)) ||
1075 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1076 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1077 sizeof(key->ipv6.dst))))
1078 goto nla_put_failure;
1079
1080 if (key->basic.ip_proto == IPPROTO_TCP &&
1081 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
aa72d708 1082 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
77b9900e
JP
1083 sizeof(key->tp.src)) ||
1084 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
aa72d708 1085 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
77b9900e
JP
1086 sizeof(key->tp.dst))))
1087 goto nla_put_failure;
1088 else if (key->basic.ip_proto == IPPROTO_UDP &&
1089 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
aa72d708 1090 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
77b9900e
JP
1091 sizeof(key->tp.src)) ||
1092 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
aa72d708 1093 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
5976c5f4
SH
1094 sizeof(key->tp.dst))))
1095 goto nla_put_failure;
1096 else if (key->basic.ip_proto == IPPROTO_SCTP &&
1097 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1098 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1099 sizeof(key->tp.src)) ||
1100 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1101 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
77b9900e
JP
1102 sizeof(key->tp.dst))))
1103 goto nla_put_failure;
7b684884
SH
1104 else if (key->basic.n_proto == htons(ETH_P_IP) &&
1105 key->basic.ip_proto == IPPROTO_ICMP &&
1106 (fl_dump_key_val(skb, &key->icmp.type,
1107 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1108 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1109 sizeof(key->icmp.type)) ||
1110 fl_dump_key_val(skb, &key->icmp.code,
1111 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1112 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1113 sizeof(key->icmp.code))))
1114 goto nla_put_failure;
1115 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1116 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1117 (fl_dump_key_val(skb, &key->icmp.type,
1118 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1119 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1120 sizeof(key->icmp.type)) ||
1121 fl_dump_key_val(skb, &key->icmp.code,
1122 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1123 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1124 sizeof(key->icmp.code))))
1125 goto nla_put_failure;
77b9900e 1126
bc3103f1
AV
1127 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1128 (fl_dump_key_val(skb, &key->enc_ipv4.src,
1129 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1130 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1131 sizeof(key->enc_ipv4.src)) ||
1132 fl_dump_key_val(skb, &key->enc_ipv4.dst,
1133 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1134 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1135 sizeof(key->enc_ipv4.dst))))
1136 goto nla_put_failure;
1137 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1138 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1139 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1140 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1141 sizeof(key->enc_ipv6.src)) ||
1142 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1143 TCA_FLOWER_KEY_ENC_IPV6_DST,
1144 &mask->enc_ipv6.dst,
1145 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1146 sizeof(key->enc_ipv6.dst))))
1147 goto nla_put_failure;
1148
1149 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
eb523f42 1150 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
f4d997fd
HHZ
1151 sizeof(key->enc_key_id)) ||
1152 fl_dump_key_val(skb, &key->enc_tp.src,
1153 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1154 &mask->enc_tp.src,
1155 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1156 sizeof(key->enc_tp.src)) ||
1157 fl_dump_key_val(skb, &key->enc_tp.dst,
1158 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1159 &mask->enc_tp.dst,
1160 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1161 sizeof(key->enc_tp.dst)))
bc3103f1
AV
1162 goto nla_put_failure;
1163
faa3ffce
OG
1164 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1165 goto nla_put_failure;
1166
b18c2026
OG
1167 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
1168 goto nla_put_failure;
e69985c6 1169
77b9900e
JP
1170 if (tcf_exts_dump(skb, &f->exts))
1171 goto nla_put_failure;
1172
1173 nla_nest_end(skb, nest);
1174
1175 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1176 goto nla_put_failure;
1177
1178 return skb->len;
1179
1180nla_put_failure:
1181 nla_nest_cancel(skb, nest);
1182 return -1;
1183}
1184
1185static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1186 .kind = "flower",
1187 .classify = fl_classify,
1188 .init = fl_init,
1189 .destroy = fl_destroy,
1190 .get = fl_get,
1191 .change = fl_change,
1192 .delete = fl_delete,
1193 .walk = fl_walk,
1194 .dump = fl_dump,
1195 .owner = THIS_MODULE,
1196};
1197
1198static int __init cls_fl_init(void)
1199{
1200 return register_tcf_proto_ops(&cls_fl_ops);
1201}
1202
1203static void __exit cls_fl_exit(void)
1204{
1205 unregister_tcf_proto_ops(&cls_fl_ops);
1206}
1207
1208module_init(cls_fl_init);
1209module_exit(cls_fl_exit);
1210
1211MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1212MODULE_DESCRIPTION("Flower classifier");
1213MODULE_LICENSE("GPL v2");