]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/sched/cls_flower.c
Merge tag 'nfsd-5.2-1' of git://linux-nfs.org/~bfields/linux
[mirror_ubuntu-jammy-kernel.git] / net / sched / cls_flower.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
77b9900e
JP
2/*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
77b9900e
JP
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/rhashtable.h>
d9363774 12#include <linux/workqueue.h>
06177558 13#include <linux/refcount.h>
77b9900e
JP
14
15#include <linux/if_ether.h>
16#include <linux/in6.h>
17#include <linux/ip.h>
a577d8f7 18#include <linux/mpls.h>
77b9900e
JP
19
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
22#include <net/ip.h>
23#include <net/flow_dissector.h>
0a6e7778 24#include <net/geneve.h>
77b9900e 25
bc3103f1
AV
26#include <net/dst.h>
27#include <net/dst_metadata.h>
28
77b9900e
JP
29struct fl_flow_key {
30 int indev_ifindex;
42aecaa9 31 struct flow_dissector_key_control control;
bc3103f1 32 struct flow_dissector_key_control enc_control;
77b9900e
JP
33 struct flow_dissector_key_basic basic;
34 struct flow_dissector_key_eth_addrs eth;
9399ae9a 35 struct flow_dissector_key_vlan vlan;
d64efd09 36 struct flow_dissector_key_vlan cvlan;
77b9900e 37 union {
c3f83241 38 struct flow_dissector_key_ipv4_addrs ipv4;
77b9900e
JP
39 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
7b684884 42 struct flow_dissector_key_icmp icmp;
99d31326 43 struct flow_dissector_key_arp arp;
bc3103f1
AV
44 struct flow_dissector_key_keyid enc_key_id;
45 union {
46 struct flow_dissector_key_ipv4_addrs enc_ipv4;
47 struct flow_dissector_key_ipv6_addrs enc_ipv6;
48 };
f4d997fd 49 struct flow_dissector_key_ports enc_tp;
a577d8f7 50 struct flow_dissector_key_mpls mpls;
fdfc7dd6 51 struct flow_dissector_key_tcp tcp;
4d80cc0a 52 struct flow_dissector_key_ip ip;
0e2c17b6 53 struct flow_dissector_key_ip enc_ip;
0a6e7778 54 struct flow_dissector_key_enc_opts enc_opts;
5c72299f
AN
55 struct flow_dissector_key_ports tp_min;
56 struct flow_dissector_key_ports tp_max;
77b9900e
JP
57} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
58
59struct fl_flow_mask_range {
60 unsigned short int start;
61 unsigned short int end;
62};
63
64struct fl_flow_mask {
65 struct fl_flow_key key;
66 struct fl_flow_mask_range range;
5c72299f 67 u32 flags;
05cd271f
PB
68 struct rhash_head ht_node;
69 struct rhashtable ht;
70 struct rhashtable_params filter_ht_params;
71 struct flow_dissector dissector;
72 struct list_head filters;
44a5cd43 73 struct rcu_work rwork;
05cd271f 74 struct list_head list;
f48ef4d5 75 refcount_t refcnt;
77b9900e
JP
76};
77
b95ec7eb
JP
78struct fl_flow_tmplt {
79 struct fl_flow_key dummy_key;
80 struct fl_flow_key mask;
81 struct flow_dissector dissector;
82 struct tcf_chain *chain;
83};
84
77b9900e
JP
85struct cls_fl_head {
86 struct rhashtable ht;
259e60f9 87 spinlock_t masks_lock; /* Protect masks list */
05cd271f 88 struct list_head masks;
c049d56e 89 struct list_head hw_filters;
aaa908ff 90 struct rcu_work rwork;
c15ab236 91 struct idr handle_idr;
77b9900e
JP
92};
93
94struct cls_fl_filter {
05cd271f 95 struct fl_flow_mask *mask;
77b9900e
JP
96 struct rhash_head ht_node;
97 struct fl_flow_key mkey;
98 struct tcf_exts exts;
99 struct tcf_result res;
100 struct fl_flow_key key;
101 struct list_head list;
c049d56e 102 struct list_head hw_list;
77b9900e 103 u32 handle;
e69985c6 104 u32 flags;
86c55361 105 u32 in_hw_count;
aaa908ff 106 struct rcu_work rwork;
7091d8c7 107 struct net_device *hw_dev;
06177558
VB
108 /* Flower classifier is unlocked, which means that its reference counter
109 * can be changed concurrently without any kind of external
110 * synchronization. Use atomic reference counter to be concurrency-safe.
111 */
112 refcount_t refcnt;
b2552b8c 113 bool deleted;
77b9900e
JP
114};
115
05cd271f
PB
116static const struct rhashtable_params mask_ht_params = {
117 .key_offset = offsetof(struct fl_flow_mask, key),
118 .key_len = sizeof(struct fl_flow_key),
119 .head_offset = offsetof(struct fl_flow_mask, ht_node),
120 .automatic_shrinking = true,
121};
122
77b9900e
JP
123static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
124{
125 return mask->range.end - mask->range.start;
126}
127
128static void fl_mask_update_range(struct fl_flow_mask *mask)
129{
130 const u8 *bytes = (const u8 *) &mask->key;
131 size_t size = sizeof(mask->key);
05cd271f 132 size_t i, first = 0, last;
77b9900e 133
05cd271f
PB
134 for (i = 0; i < size; i++) {
135 if (bytes[i]) {
136 first = i;
137 break;
138 }
139 }
140 last = first;
141 for (i = size - 1; i != first; i--) {
77b9900e 142 if (bytes[i]) {
77b9900e 143 last = i;
05cd271f 144 break;
77b9900e
JP
145 }
146 }
147 mask->range.start = rounddown(first, sizeof(long));
148 mask->range.end = roundup(last + 1, sizeof(long));
149}
150
151static void *fl_key_get_start(struct fl_flow_key *key,
152 const struct fl_flow_mask *mask)
153{
154 return (u8 *) key + mask->range.start;
155}
156
157static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
158 struct fl_flow_mask *mask)
159{
160 const long *lkey = fl_key_get_start(key, mask);
161 const long *lmask = fl_key_get_start(&mask->key, mask);
162 long *lmkey = fl_key_get_start(mkey, mask);
163 int i;
164
165 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
166 *lmkey++ = *lkey++ & *lmask++;
167}
168
b95ec7eb
JP
169static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
170 struct fl_flow_mask *mask)
171{
172 const long *lmask = fl_key_get_start(&mask->key, mask);
173 const long *ltmplt;
174 int i;
175
176 if (!tmplt)
177 return true;
178 ltmplt = fl_key_get_start(&tmplt->mask, mask);
179 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
180 if (~*ltmplt++ & *lmask++)
181 return false;
182 }
183 return true;
184}
185
77b9900e
JP
186static void fl_clear_masked_range(struct fl_flow_key *key,
187 struct fl_flow_mask *mask)
188{
189 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
190}
191
5c72299f
AN
192static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
193 struct fl_flow_key *key,
194 struct fl_flow_key *mkey)
195{
196 __be16 min_mask, max_mask, min_val, max_val;
197
198 min_mask = htons(filter->mask->key.tp_min.dst);
199 max_mask = htons(filter->mask->key.tp_max.dst);
200 min_val = htons(filter->key.tp_min.dst);
201 max_val = htons(filter->key.tp_max.dst);
202
203 if (min_mask && max_mask) {
204 if (htons(key->tp.dst) < min_val ||
205 htons(key->tp.dst) > max_val)
206 return false;
207
208 /* skb does not have min and max values */
209 mkey->tp_min.dst = filter->mkey.tp_min.dst;
210 mkey->tp_max.dst = filter->mkey.tp_max.dst;
211 }
212 return true;
213}
214
215static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
216 struct fl_flow_key *key,
217 struct fl_flow_key *mkey)
218{
219 __be16 min_mask, max_mask, min_val, max_val;
220
221 min_mask = htons(filter->mask->key.tp_min.src);
222 max_mask = htons(filter->mask->key.tp_max.src);
223 min_val = htons(filter->key.tp_min.src);
224 max_val = htons(filter->key.tp_max.src);
225
226 if (min_mask && max_mask) {
227 if (htons(key->tp.src) < min_val ||
228 htons(key->tp.src) > max_val)
229 return false;
230
231 /* skb does not have min and max values */
232 mkey->tp_min.src = filter->mkey.tp_min.src;
233 mkey->tp_max.src = filter->mkey.tp_max.src;
234 }
235 return true;
236}
237
238static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
239 struct fl_flow_key *mkey)
a3308d8f 240{
05cd271f
PB
241 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
242 mask->filter_ht_params);
a3308d8f
PB
243}
244
5c72299f
AN
245static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
246 struct fl_flow_key *mkey,
247 struct fl_flow_key *key)
248{
249 struct cls_fl_filter *filter, *f;
250
251 list_for_each_entry_rcu(filter, &mask->filters, list) {
252 if (!fl_range_port_dst_cmp(filter, key, mkey))
253 continue;
254
255 if (!fl_range_port_src_cmp(filter, key, mkey))
256 continue;
257
258 f = __fl_lookup(mask, mkey);
259 if (f)
260 return f;
261 }
262 return NULL;
263}
264
265static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
266 struct fl_flow_key *mkey,
267 struct fl_flow_key *key)
268{
269 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
270 return fl_lookup_range(mask, mkey, key);
271
272 return __fl_lookup(mask, mkey);
273}
274
77b9900e
JP
275static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
276 struct tcf_result *res)
277{
278 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
279 struct cls_fl_filter *f;
05cd271f 280 struct fl_flow_mask *mask;
77b9900e
JP
281 struct fl_flow_key skb_key;
282 struct fl_flow_key skb_mkey;
283
05cd271f
PB
284 list_for_each_entry_rcu(mask, &head->masks, list) {
285 fl_clear_masked_range(&skb_key, mask);
bc3103f1 286
05cd271f
PB
287 skb_key.indev_ifindex = skb->skb_iif;
288 /* skb_flow_dissect() does not set n_proto in case an unknown
289 * protocol, so do it rather here.
290 */
291 skb_key.basic.n_proto = skb->protocol;
292 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
293 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
77b9900e 294
05cd271f 295 fl_set_masked_key(&skb_mkey, &skb_key, mask);
77b9900e 296
5c72299f 297 f = fl_lookup(mask, &skb_mkey, &skb_key);
05cd271f
PB
298 if (f && !tc_skip_sw(f->flags)) {
299 *res = f->res;
300 return tcf_exts_exec(skb, &f->exts, res);
301 }
77b9900e
JP
302 }
303 return -1;
304}
305
306static int fl_init(struct tcf_proto *tp)
307{
308 struct cls_fl_head *head;
309
310 head = kzalloc(sizeof(*head), GFP_KERNEL);
311 if (!head)
312 return -ENOBUFS;
313
259e60f9 314 spin_lock_init(&head->masks_lock);
05cd271f 315 INIT_LIST_HEAD_RCU(&head->masks);
c049d56e 316 INIT_LIST_HEAD(&head->hw_filters);
77b9900e 317 rcu_assign_pointer(tp->root, head);
c15ab236 318 idr_init(&head->handle_idr);
77b9900e 319
05cd271f
PB
320 return rhashtable_init(&head->ht, &mask_ht_params);
321}
322
44a5cd43
PA
323static void fl_mask_free(struct fl_flow_mask *mask)
324{
f48ef4d5 325 WARN_ON(!list_empty(&mask->filters));
44a5cd43
PA
326 rhashtable_destroy(&mask->ht);
327 kfree(mask);
328}
329
330static void fl_mask_free_work(struct work_struct *work)
331{
332 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
333 struct fl_flow_mask, rwork);
334
335 fl_mask_free(mask);
336}
337
9994677c 338static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
05cd271f 339{
f48ef4d5 340 if (!refcount_dec_and_test(&mask->refcnt))
05cd271f
PB
341 return false;
342
343 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
259e60f9
VB
344
345 spin_lock(&head->masks_lock);
05cd271f 346 list_del_rcu(&mask->list);
259e60f9
VB
347 spin_unlock(&head->masks_lock);
348
9994677c 349 tcf_queue_work(&mask->rwork, fl_mask_free_work);
05cd271f
PB
350
351 return true;
77b9900e
JP
352}
353
c049d56e
VB
354static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
355{
356 /* Flower classifier only changes root pointer during init and destroy.
357 * Users must obtain reference to tcf_proto instance before calling its
358 * API, so tp->root pointer is protected from concurrent call to
359 * fl_destroy() by reference counting.
360 */
361 return rcu_dereference_raw(tp->root);
362}
363
0dadc117
CW
364static void __fl_destroy_filter(struct cls_fl_filter *f)
365{
366 tcf_exts_destroy(&f->exts);
367 tcf_exts_put_net(&f->exts);
368 kfree(f);
369}
370
0552c8af 371static void fl_destroy_filter_work(struct work_struct *work)
77b9900e 372{
aaa908ff
CW
373 struct cls_fl_filter *f = container_of(to_rcu_work(work),
374 struct cls_fl_filter, rwork);
77b9900e 375
0dadc117 376 __fl_destroy_filter(f);
0552c8af
CW
377}
378
1b0f8037 379static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
c24e43d8 380 bool rtnl_held, struct netlink_ext_ack *extack)
5b33f488 381{
de4784ca 382 struct tc_cls_flower_offload cls_flower = {};
208c0f4b 383 struct tcf_block *block = tp->chain->block;
5b33f488 384
c24e43d8
VB
385 if (!rtnl_held)
386 rtnl_lock();
387
d6787147 388 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
de4784ca
JP
389 cls_flower.command = TC_CLSFLOWER_DESTROY;
390 cls_flower.cookie = (unsigned long) f;
5b33f488 391
aeb3fecd 392 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
3d81e711 393 spin_lock(&tp->lock);
c049d56e 394 list_del_init(&f->hw_list);
caa72601 395 tcf_block_offload_dec(block, &f->flags);
3d81e711 396 spin_unlock(&tp->lock);
c24e43d8
VB
397
398 if (!rtnl_held)
399 rtnl_unlock();
5b33f488
AV
400}
401
e8eb36cd 402static int fl_hw_replace_filter(struct tcf_proto *tp,
c24e43d8 403 struct cls_fl_filter *f, bool rtnl_held,
41002038 404 struct netlink_ext_ack *extack)
5b33f488 405{
c049d56e 406 struct cls_fl_head *head = fl_head_dereference(tp);
de4784ca 407 struct tc_cls_flower_offload cls_flower = {};
208c0f4b 408 struct tcf_block *block = tp->chain->block;
717503b9 409 bool skip_sw = tc_skip_sw(f->flags);
c24e43d8
VB
410 int err = 0;
411
412 if (!rtnl_held)
413 rtnl_lock();
5b33f488 414
e3ab786b 415 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
c24e43d8
VB
416 if (!cls_flower.rule) {
417 err = -ENOMEM;
418 goto errout;
419 }
8f256622 420
d6787147 421 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
de4784ca
JP
422 cls_flower.command = TC_CLSFLOWER_REPLACE;
423 cls_flower.cookie = (unsigned long) f;
8f256622
PNA
424 cls_flower.rule->match.dissector = &f->mask->dissector;
425 cls_flower.rule->match.mask = &f->mask->key;
426 cls_flower.rule->match.key = &f->mkey;
384c181e 427 cls_flower.classid = f->res.classid;
5b33f488 428
3a7b6861
PNA
429 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
430 if (err) {
431 kfree(cls_flower.rule);
c24e43d8 432 if (skip_sw)
1f15bb4f 433 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
c24e43d8
VB
434 else
435 err = 0;
436 goto errout;
3a7b6861
PNA
437 }
438
aeb3fecd 439 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
8f256622
PNA
440 kfree(cls_flower.rule);
441
717503b9 442 if (err < 0) {
c24e43d8
VB
443 fl_hw_destroy_filter(tp, f, true, NULL);
444 goto errout;
717503b9 445 } else if (err > 0) {
31533cba 446 f->in_hw_count = err;
c24e43d8 447 err = 0;
3d81e711 448 spin_lock(&tp->lock);
caa72601 449 tcf_block_offload_inc(block, &f->flags);
3d81e711 450 spin_unlock(&tp->lock);
717503b9
JP
451 }
452
c24e43d8
VB
453 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
454 err = -EINVAL;
455 goto errout;
456 }
717503b9 457
c049d56e
VB
458 spin_lock(&tp->lock);
459 list_add(&f->hw_list, &head->hw_filters);
460 spin_unlock(&tp->lock);
c24e43d8
VB
461errout:
462 if (!rtnl_held)
463 rtnl_unlock();
464
465 return err;
5b33f488
AV
466}
467
c24e43d8
VB
468static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
469 bool rtnl_held)
10cbc684 470{
de4784ca 471 struct tc_cls_flower_offload cls_flower = {};
208c0f4b 472 struct tcf_block *block = tp->chain->block;
10cbc684 473
c24e43d8
VB
474 if (!rtnl_held)
475 rtnl_lock();
476
d6787147 477 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
de4784ca
JP
478 cls_flower.command = TC_CLSFLOWER_STATS;
479 cls_flower.cookie = (unsigned long) f;
384c181e 480 cls_flower.classid = f->res.classid;
10cbc684 481
aeb3fecd 482 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
3b1903ef
PNA
483
484 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
485 cls_flower.stats.pkts,
486 cls_flower.stats.lastused);
c24e43d8
VB
487
488 if (!rtnl_held)
489 rtnl_unlock();
10cbc684
AV
490}
491
06177558
VB
492static void __fl_put(struct cls_fl_filter *f)
493{
494 if (!refcount_dec_and_test(&f->refcnt))
495 return;
496
497 if (tcf_exts_get_net(&f->exts))
498 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
499 else
500 __fl_destroy_filter(f);
501}
502
503static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
504{
505 struct cls_fl_filter *f;
506
507 rcu_read_lock();
508 f = idr_find(&head->handle_idr, handle);
509 if (f && !refcount_inc_not_zero(&f->refcnt))
510 f = NULL;
511 rcu_read_unlock();
512
513 return f;
514}
515
516static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
517 unsigned long *handle)
518{
519 struct cls_fl_head *head = fl_head_dereference(tp);
520 struct cls_fl_filter *f;
521
522 rcu_read_lock();
523 while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
524 /* don't return filters that are being deleted */
525 if (refcount_inc_not_zero(&f->refcnt))
526 break;
527 ++(*handle);
528 }
529 rcu_read_unlock();
530
531 return f;
532}
533
b2552b8c 534static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
c24e43d8
VB
535 bool *last, bool rtnl_held,
536 struct netlink_ext_ack *extack)
13fa876e 537{
e474619a 538 struct cls_fl_head *head = fl_head_dereference(tp);
c15ab236 539
b2552b8c
VB
540 *last = false;
541
3d81e711
VB
542 spin_lock(&tp->lock);
543 if (f->deleted) {
544 spin_unlock(&tp->lock);
b2552b8c 545 return -ENOENT;
3d81e711 546 }
b2552b8c
VB
547
548 f->deleted = true;
549 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
550 f->mask->filter_ht_params);
9c160941 551 idr_remove(&head->handle_idr, f->handle);
13fa876e 552 list_del_rcu(&f->list);
3d81e711
VB
553 spin_unlock(&tp->lock);
554
9994677c 555 *last = fl_mask_put(head, f->mask);
79685219 556 if (!tc_skip_hw(f->flags))
c24e43d8 557 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
13fa876e 558 tcf_unbind_filter(tp, &f->res);
06177558 559 __fl_put(f);
05cd271f 560
b2552b8c 561 return 0;
13fa876e
RD
562}
563
d9363774
DB
564static void fl_destroy_sleepable(struct work_struct *work)
565{
aaa908ff
CW
566 struct cls_fl_head *head = container_of(to_rcu_work(work),
567 struct cls_fl_head,
568 rwork);
de9dc650
PB
569
570 rhashtable_destroy(&head->ht);
d9363774
DB
571 kfree(head);
572 module_put(THIS_MODULE);
573}
574
12db03b6
VB
575static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
576 struct netlink_ext_ack *extack)
77b9900e 577{
e474619a 578 struct cls_fl_head *head = fl_head_dereference(tp);
05cd271f 579 struct fl_flow_mask *mask, *next_mask;
77b9900e 580 struct cls_fl_filter *f, *next;
b2552b8c 581 bool last;
77b9900e 582
05cd271f
PB
583 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
584 list_for_each_entry_safe(f, next, &mask->filters, list) {
c24e43d8 585 __fl_delete(tp, f, &last, rtnl_held, extack);
b2552b8c 586 if (last)
05cd271f
PB
587 break;
588 }
589 }
c15ab236 590 idr_destroy(&head->handle_idr);
d9363774
DB
591
592 __module_get(THIS_MODULE);
aaa908ff 593 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
77b9900e
JP
594}
595
06177558
VB
596static void fl_put(struct tcf_proto *tp, void *arg)
597{
598 struct cls_fl_filter *f = arg;
599
600 __fl_put(f);
601}
602
8113c095 603static void *fl_get(struct tcf_proto *tp, u32 handle)
77b9900e 604{
e474619a 605 struct cls_fl_head *head = fl_head_dereference(tp);
77b9900e 606
06177558 607 return __fl_get(head, handle);
77b9900e
JP
608}
609
610static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
611 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
612 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
613 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
614 .len = IFNAMSIZ },
615 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
616 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
617 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
618 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
619 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
620 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
621 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
622 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
623 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
624 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
625 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
626 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
627 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
628 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
629 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
630 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
b175c3a4
JHS
631 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
632 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
9399ae9a
HHZ
633 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
635 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
bc3103f1
AV
636 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
637 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
638 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
639 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
640 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
642 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
643 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
644 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
aa72d708
OG
645 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
646 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
647 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
648 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
5976c5f4
SH
649 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
651 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
652 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
f4d997fd
HHZ
653 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
faa3ffce
OG
657 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
658 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
7b684884
SH
659 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
660 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
661 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
662 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
663 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
665 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
666 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
99d31326
SH
667 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
668 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
669 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
670 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
671 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
674 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
675 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
676 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
a577d8f7
BL
677 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
678 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
fdfc7dd6
JP
681 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
682 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
4d80cc0a
OG
683 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
d64efd09
JL
687 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
688 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
0e2c17b6
OG
690 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
0a6e7778
PJV
694 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
695 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
696};
697
698static const struct nla_policy
699enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
700 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
701};
702
703static const struct nla_policy
704geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
705 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
706 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
707 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
708 .len = 128 },
77b9900e
JP
709};
710
711static void fl_set_key_val(struct nlattr **tb,
712 void *val, int val_type,
713 void *mask, int mask_type, int len)
714{
715 if (!tb[val_type])
716 return;
717 memcpy(val, nla_data(tb[val_type]), len);
718 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
719 memset(mask, 0xff, len);
720 else
721 memcpy(mask, nla_data(tb[mask_type]), len);
722}
723
5c72299f
AN
724static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
725 struct fl_flow_key *mask)
726{
727 fl_set_key_val(tb, &key->tp_min.dst,
728 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
729 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
730 fl_set_key_val(tb, &key->tp_max.dst,
731 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
732 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
733 fl_set_key_val(tb, &key->tp_min.src,
734 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
735 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
736 fl_set_key_val(tb, &key->tp_max.src,
737 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
738 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
739
740 if ((mask->tp_min.dst && mask->tp_max.dst &&
741 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
742 (mask->tp_min.src && mask->tp_max.src &&
743 htons(key->tp_max.src) <= htons(key->tp_min.src)))
744 return -EINVAL;
745
746 return 0;
747}
748
1a7fca63
BL
749static int fl_set_key_mpls(struct nlattr **tb,
750 struct flow_dissector_key_mpls *key_val,
751 struct flow_dissector_key_mpls *key_mask)
a577d8f7
BL
752{
753 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
754 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
755 key_mask->mpls_ttl = MPLS_TTL_MASK;
756 }
757 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
1a7fca63
BL
758 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
759
760 if (bos & ~MPLS_BOS_MASK)
761 return -EINVAL;
762 key_val->mpls_bos = bos;
a577d8f7
BL
763 key_mask->mpls_bos = MPLS_BOS_MASK;
764 }
765 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1a7fca63
BL
766 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
767
768 if (tc & ~MPLS_TC_MASK)
769 return -EINVAL;
770 key_val->mpls_tc = tc;
a577d8f7
BL
771 key_mask->mpls_tc = MPLS_TC_MASK;
772 }
773 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1a7fca63
BL
774 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
775
776 if (label & ~MPLS_LABEL_MASK)
777 return -EINVAL;
778 key_val->mpls_label = label;
a577d8f7
BL
779 key_mask->mpls_label = MPLS_LABEL_MASK;
780 }
1a7fca63 781 return 0;
a577d8f7
BL
782}
783
9399ae9a 784static void fl_set_key_vlan(struct nlattr **tb,
aaab0834 785 __be16 ethertype,
d64efd09 786 int vlan_id_key, int vlan_prio_key,
9399ae9a
HHZ
787 struct flow_dissector_key_vlan *key_val,
788 struct flow_dissector_key_vlan *key_mask)
789{
790#define VLAN_PRIORITY_MASK 0x7
791
d64efd09 792 if (tb[vlan_id_key]) {
9399ae9a 793 key_val->vlan_id =
d64efd09 794 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
9399ae9a
HHZ
795 key_mask->vlan_id = VLAN_VID_MASK;
796 }
d64efd09 797 if (tb[vlan_prio_key]) {
9399ae9a 798 key_val->vlan_priority =
d64efd09 799 nla_get_u8(tb[vlan_prio_key]) &
9399ae9a
HHZ
800 VLAN_PRIORITY_MASK;
801 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
802 }
aaab0834
JL
803 key_val->vlan_tpid = ethertype;
804 key_mask->vlan_tpid = cpu_to_be16(~0);
9399ae9a
HHZ
805}
806
faa3ffce
OG
807static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
808 u32 *dissector_key, u32 *dissector_mask,
809 u32 flower_flag_bit, u32 dissector_flag_bit)
810{
811 if (flower_mask & flower_flag_bit) {
812 *dissector_mask |= dissector_flag_bit;
813 if (flower_key & flower_flag_bit)
814 *dissector_key |= dissector_flag_bit;
815 }
816}
817
d9724772
OG
818static int fl_set_key_flags(struct nlattr **tb,
819 u32 *flags_key, u32 *flags_mask)
faa3ffce
OG
820{
821 u32 key, mask;
822
d9724772
OG
823 /* mask is mandatory for flags */
824 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
825 return -EINVAL;
faa3ffce
OG
826
827 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
d9724772 828 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
faa3ffce
OG
829
830 *flags_key = 0;
831 *flags_mask = 0;
832
833 fl_set_key_flag(key, mask, flags_key, flags_mask,
834 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
459d153d
PJV
835 fl_set_key_flag(key, mask, flags_key, flags_mask,
836 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
837 FLOW_DIS_FIRST_FRAG);
d9724772
OG
838
839 return 0;
faa3ffce
OG
840}
841
0e2c17b6 842static void fl_set_key_ip(struct nlattr **tb, bool encap,
4d80cc0a
OG
843 struct flow_dissector_key_ip *key,
844 struct flow_dissector_key_ip *mask)
845{
0e2c17b6
OG
846 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
847 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
848 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
849 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
4d80cc0a 850
0e2c17b6
OG
851 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
852 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
4d80cc0a
OG
853}
854
0a6e7778
PJV
855static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
856 int depth, int option_len,
857 struct netlink_ext_ack *extack)
858{
859 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
860 struct nlattr *class = NULL, *type = NULL, *data = NULL;
861 struct geneve_opt *opt;
862 int err, data_len = 0;
863
864 if (option_len > sizeof(struct geneve_opt))
865 data_len = option_len - sizeof(struct geneve_opt);
866
867 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
868 memset(opt, 0xff, option_len);
869 opt->length = data_len / 4;
870 opt->r1 = 0;
871 opt->r2 = 0;
872 opt->r3 = 0;
873
874 /* If no mask has been prodived we assume an exact match. */
875 if (!depth)
876 return sizeof(struct geneve_opt) + data_len;
877
878 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
879 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
880 return -EINVAL;
881 }
882
8cb08174
JB
883 err = nla_parse_nested_deprecated(tb,
884 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
885 nla, geneve_opt_policy, extack);
0a6e7778
PJV
886 if (err < 0)
887 return err;
888
889 /* We are not allowed to omit any of CLASS, TYPE or DATA
890 * fields from the key.
891 */
892 if (!option_len &&
893 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
894 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
895 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
896 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
897 return -EINVAL;
898 }
899
900 /* Omitting any of CLASS, TYPE or DATA fields is allowed
901 * for the mask.
902 */
903 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
904 int new_len = key->enc_opts.len;
905
906 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
907 data_len = nla_len(data);
908 if (data_len < 4) {
909 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
910 return -ERANGE;
911 }
912 if (data_len % 4) {
913 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
914 return -ERANGE;
915 }
916
917 new_len += sizeof(struct geneve_opt) + data_len;
918 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
919 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
920 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
921 return -ERANGE;
922 }
923 opt->length = data_len / 4;
924 memcpy(opt->opt_data, nla_data(data), data_len);
925 }
926
927 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
928 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
929 opt->opt_class = nla_get_be16(class);
930 }
931
932 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
933 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
934 opt->type = nla_get_u8(type);
935 }
936
937 return sizeof(struct geneve_opt) + data_len;
938}
939
940static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
941 struct fl_flow_key *mask,
942 struct netlink_ext_ack *extack)
943{
944 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
63c82997
JK
945 int err, option_len, key_depth, msk_depth = 0;
946
8cb08174
JB
947 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
948 TCA_FLOWER_KEY_ENC_OPTS_MAX,
949 enc_opts_policy, extack);
63c82997
JK
950 if (err)
951 return err;
0a6e7778
PJV
952
953 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
954
955 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
8cb08174
JB
956 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
957 TCA_FLOWER_KEY_ENC_OPTS_MAX,
958 enc_opts_policy, extack);
63c82997
JK
959 if (err)
960 return err;
961
0a6e7778
PJV
962 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
963 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
964 }
965
966 nla_for_each_attr(nla_opt_key, nla_enc_key,
967 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
968 switch (nla_type(nla_opt_key)) {
969 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
970 option_len = 0;
971 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
972 option_len = fl_set_geneve_opt(nla_opt_key, key,
973 key_depth, option_len,
974 extack);
975 if (option_len < 0)
976 return option_len;
977
978 key->enc_opts.len += option_len;
979 /* At the same time we need to parse through the mask
980 * in order to verify exact and mask attribute lengths.
981 */
982 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
983 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
984 msk_depth, option_len,
985 extack);
986 if (option_len < 0)
987 return option_len;
988
989 mask->enc_opts.len += option_len;
990 if (key->enc_opts.len != mask->enc_opts.len) {
991 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
992 return -EINVAL;
993 }
994
995 if (msk_depth)
996 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
997 break;
998 default:
999 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1000 return -EINVAL;
1001 }
1002 }
1003
1004 return 0;
1005}
1006
77b9900e 1007static int fl_set_key(struct net *net, struct nlattr **tb,
1057c55f
AA
1008 struct fl_flow_key *key, struct fl_flow_key *mask,
1009 struct netlink_ext_ack *extack)
77b9900e 1010{
9399ae9a 1011 __be16 ethertype;
d9724772 1012 int ret = 0;
dd3aa3b5 1013#ifdef CONFIG_NET_CLS_IND
77b9900e 1014 if (tb[TCA_FLOWER_INDEV]) {
1057c55f 1015 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
77b9900e
JP
1016 if (err < 0)
1017 return err;
1018 key->indev_ifindex = err;
1019 mask->indev_ifindex = 0xffffffff;
1020 }
dd3aa3b5 1021#endif
77b9900e
JP
1022
1023 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1024 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1025 sizeof(key->eth.dst));
1026 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1027 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1028 sizeof(key->eth.src));
66530bdf 1029
0b498a52 1030 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
9399ae9a
HHZ
1031 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1032
aaab0834 1033 if (eth_type_vlan(ethertype)) {
d64efd09
JL
1034 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1035 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1036 &mask->vlan);
1037
5e9a0fe4
JL
1038 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1039 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1040 if (eth_type_vlan(ethertype)) {
1041 fl_set_key_vlan(tb, ethertype,
1042 TCA_FLOWER_KEY_CVLAN_ID,
1043 TCA_FLOWER_KEY_CVLAN_PRIO,
1044 &key->cvlan, &mask->cvlan);
1045 fl_set_key_val(tb, &key->basic.n_proto,
1046 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1047 &mask->basic.n_proto,
1048 TCA_FLOWER_UNSPEC,
1049 sizeof(key->basic.n_proto));
1050 } else {
1051 key->basic.n_proto = ethertype;
1052 mask->basic.n_proto = cpu_to_be16(~0);
1053 }
d64efd09 1054 }
0b498a52
AB
1055 } else {
1056 key->basic.n_proto = ethertype;
1057 mask->basic.n_proto = cpu_to_be16(~0);
1058 }
9399ae9a 1059 }
66530bdf 1060
77b9900e
JP
1061 if (key->basic.n_proto == htons(ETH_P_IP) ||
1062 key->basic.n_proto == htons(ETH_P_IPV6)) {
1063 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1064 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1065 sizeof(key->basic.ip_proto));
0e2c17b6 1066 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
77b9900e 1067 }
66530bdf
JHS
1068
1069 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1070 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
970bfcd0 1071 mask->control.addr_type = ~0;
77b9900e
JP
1072 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1073 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1074 sizeof(key->ipv4.src));
1075 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1076 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1077 sizeof(key->ipv4.dst));
66530bdf
JHS
1078 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1079 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
970bfcd0 1080 mask->control.addr_type = ~0;
77b9900e
JP
1081 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1082 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1083 sizeof(key->ipv6.src));
1084 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1085 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1086 sizeof(key->ipv6.dst));
1087 }
66530bdf 1088
77b9900e
JP
1089 if (key->basic.ip_proto == IPPROTO_TCP) {
1090 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
aa72d708 1091 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
77b9900e
JP
1092 sizeof(key->tp.src));
1093 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
aa72d708 1094 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
77b9900e 1095 sizeof(key->tp.dst));
fdfc7dd6
JP
1096 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1097 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1098 sizeof(key->tcp.flags));
77b9900e
JP
1099 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1100 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
aa72d708 1101 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
77b9900e
JP
1102 sizeof(key->tp.src));
1103 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
aa72d708 1104 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
77b9900e 1105 sizeof(key->tp.dst));
5976c5f4
SH
1106 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1107 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1108 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1109 sizeof(key->tp.src));
1110 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1111 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1112 sizeof(key->tp.dst));
7b684884
SH
1113 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1114 key->basic.ip_proto == IPPROTO_ICMP) {
1115 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1116 &mask->icmp.type,
1117 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1118 sizeof(key->icmp.type));
1119 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1120 &mask->icmp.code,
1121 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1122 sizeof(key->icmp.code));
1123 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1124 key->basic.ip_proto == IPPROTO_ICMPV6) {
1125 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1126 &mask->icmp.type,
1127 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1128 sizeof(key->icmp.type));
040587af 1129 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
7b684884 1130 &mask->icmp.code,
040587af 1131 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
7b684884 1132 sizeof(key->icmp.code));
a577d8f7
BL
1133 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1134 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1a7fca63
BL
1135 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1136 if (ret)
1137 return ret;
99d31326
SH
1138 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1139 key->basic.n_proto == htons(ETH_P_RARP)) {
1140 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1141 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1142 sizeof(key->arp.sip));
1143 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1144 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1145 sizeof(key->arp.tip));
1146 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1147 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1148 sizeof(key->arp.op));
1149 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1150 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1151 sizeof(key->arp.sha));
1152 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1153 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1154 sizeof(key->arp.tha));
77b9900e
JP
1155 }
1156
5c72299f
AN
1157 if (key->basic.ip_proto == IPPROTO_TCP ||
1158 key->basic.ip_proto == IPPROTO_UDP ||
1159 key->basic.ip_proto == IPPROTO_SCTP) {
1160 ret = fl_set_key_port_range(tb, key, mask);
1161 if (ret)
1162 return ret;
1163 }
1164
bc3103f1
AV
1165 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1166 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1167 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
970bfcd0 1168 mask->enc_control.addr_type = ~0;
bc3103f1
AV
1169 fl_set_key_val(tb, &key->enc_ipv4.src,
1170 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1171 &mask->enc_ipv4.src,
1172 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1173 sizeof(key->enc_ipv4.src));
1174 fl_set_key_val(tb, &key->enc_ipv4.dst,
1175 TCA_FLOWER_KEY_ENC_IPV4_DST,
1176 &mask->enc_ipv4.dst,
1177 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1178 sizeof(key->enc_ipv4.dst));
1179 }
1180
1181 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1182 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1183 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
970bfcd0 1184 mask->enc_control.addr_type = ~0;
bc3103f1
AV
1185 fl_set_key_val(tb, &key->enc_ipv6.src,
1186 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1187 &mask->enc_ipv6.src,
1188 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1189 sizeof(key->enc_ipv6.src));
1190 fl_set_key_val(tb, &key->enc_ipv6.dst,
1191 TCA_FLOWER_KEY_ENC_IPV6_DST,
1192 &mask->enc_ipv6.dst,
1193 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1194 sizeof(key->enc_ipv6.dst));
1195 }
1196
1197 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
eb523f42 1198 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
bc3103f1
AV
1199 sizeof(key->enc_key_id.keyid));
1200
f4d997fd
HHZ
1201 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1202 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1203 sizeof(key->enc_tp.src));
1204
1205 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1206 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1207 sizeof(key->enc_tp.dst));
1208
0e2c17b6
OG
1209 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1210
0a6e7778
PJV
1211 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1212 ret = fl_set_enc_opt(tb, key, mask, extack);
1213 if (ret)
1214 return ret;
1215 }
1216
d9724772
OG
1217 if (tb[TCA_FLOWER_KEY_FLAGS])
1218 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
faa3ffce 1219
d9724772 1220 return ret;
77b9900e
JP
1221}
1222
05cd271f
PB
1223static void fl_mask_copy(struct fl_flow_mask *dst,
1224 struct fl_flow_mask *src)
77b9900e 1225{
05cd271f
PB
1226 const void *psrc = fl_key_get_start(&src->key, src);
1227 void *pdst = fl_key_get_start(&dst->key, src);
77b9900e 1228
05cd271f
PB
1229 memcpy(pdst, psrc, fl_mask_range(src));
1230 dst->range = src->range;
77b9900e
JP
1231}
1232
1233static const struct rhashtable_params fl_ht_params = {
1234 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1235 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1236 .automatic_shrinking = true,
1237};
1238
05cd271f 1239static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
77b9900e 1240{
05cd271f
PB
1241 mask->filter_ht_params = fl_ht_params;
1242 mask->filter_ht_params.key_len = fl_mask_range(mask);
1243 mask->filter_ht_params.key_offset += mask->range.start;
77b9900e 1244
05cd271f 1245 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
77b9900e
JP
1246}
1247
1248#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
cb205a81 1249#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
77b9900e 1250
339ba878
HHZ
1251#define FL_KEY_IS_MASKED(mask, member) \
1252 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1253 0, FL_KEY_MEMBER_SIZE(member)) \
77b9900e
JP
1254
1255#define FL_KEY_SET(keys, cnt, id, member) \
1256 do { \
1257 keys[cnt].key_id = id; \
1258 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1259 cnt++; \
1260 } while(0);
1261
339ba878 1262#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
77b9900e 1263 do { \
339ba878 1264 if (FL_KEY_IS_MASKED(mask, member)) \
77b9900e
JP
1265 FL_KEY_SET(keys, cnt, id, member); \
1266 } while(0);
1267
33fb5cba
JP
1268static void fl_init_dissector(struct flow_dissector *dissector,
1269 struct fl_flow_key *mask)
77b9900e
JP
1270{
1271 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1272 size_t cnt = 0;
1273
42aecaa9 1274 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
77b9900e 1275 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
33fb5cba 1276 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
339ba878 1277 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
33fb5cba 1278 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
339ba878 1279 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
33fb5cba 1280 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
339ba878 1281 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
5c72299f
AN
1282 if (FL_KEY_IS_MASKED(mask, tp) ||
1283 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1284 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
33fb5cba 1285 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
4d80cc0a 1286 FLOW_DISSECTOR_KEY_IP, ip);
33fb5cba 1287 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
fdfc7dd6 1288 FLOW_DISSECTOR_KEY_TCP, tcp);
33fb5cba 1289 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
7b684884 1290 FLOW_DISSECTOR_KEY_ICMP, icmp);
33fb5cba 1291 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
99d31326 1292 FLOW_DISSECTOR_KEY_ARP, arp);
33fb5cba 1293 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
a577d8f7 1294 FLOW_DISSECTOR_KEY_MPLS, mpls);
33fb5cba 1295 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
9399ae9a 1296 FLOW_DISSECTOR_KEY_VLAN, vlan);
33fb5cba 1297 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
d64efd09 1298 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
33fb5cba 1299 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
519d1052 1300 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
33fb5cba 1301 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
519d1052 1302 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
33fb5cba 1303 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
519d1052 1304 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
33fb5cba
JP
1305 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1306 FL_KEY_IS_MASKED(mask, enc_ipv6))
519d1052
HHZ
1307 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1308 enc_control);
33fb5cba 1309 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
f4d997fd 1310 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
33fb5cba 1311 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
0e2c17b6 1312 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
0a6e7778
PJV
1313 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1314 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
77b9900e 1315
33fb5cba 1316 skb_flow_dissector_init(dissector, keys, cnt);
05cd271f
PB
1317}
1318
1319static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1320 struct fl_flow_mask *mask)
1321{
1322 struct fl_flow_mask *newmask;
1323 int err;
1324
1325 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1326 if (!newmask)
1327 return ERR_PTR(-ENOMEM);
1328
1329 fl_mask_copy(newmask, mask);
1330
5c72299f
AN
1331 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1332 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1333 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1334
05cd271f
PB
1335 err = fl_init_mask_hashtable(newmask);
1336 if (err)
1337 goto errout_free;
1338
33fb5cba 1339 fl_init_dissector(&newmask->dissector, &newmask->key);
05cd271f
PB
1340
1341 INIT_LIST_HEAD_RCU(&newmask->filters);
1342
f48ef4d5 1343 refcount_set(&newmask->refcnt, 1);
195c234d
VB
1344 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1345 &newmask->ht_node, mask_ht_params);
05cd271f
PB
1346 if (err)
1347 goto errout_destroy;
1348
195c234d
VB
1349 /* Wait until any potential concurrent users of mask are finished */
1350 synchronize_rcu();
1351
259e60f9 1352 spin_lock(&head->masks_lock);
05cd271f 1353 list_add_tail_rcu(&newmask->list, &head->masks);
259e60f9 1354 spin_unlock(&head->masks_lock);
05cd271f
PB
1355
1356 return newmask;
1357
1358errout_destroy:
1359 rhashtable_destroy(&newmask->ht);
1360errout_free:
1361 kfree(newmask);
1362
1363 return ERR_PTR(err);
77b9900e
JP
1364}
1365
1366static int fl_check_assign_mask(struct cls_fl_head *head,
05cd271f
PB
1367 struct cls_fl_filter *fnew,
1368 struct cls_fl_filter *fold,
77b9900e
JP
1369 struct fl_flow_mask *mask)
1370{
05cd271f 1371 struct fl_flow_mask *newmask;
f48ef4d5 1372 int ret = 0;
77b9900e 1373
f48ef4d5 1374 rcu_read_lock();
195c234d
VB
1375
1376 /* Insert mask as temporary node to prevent concurrent creation of mask
1377 * with same key. Any concurrent lookups with same key will return
1378 * -EAGAIN because mask's refcnt is zero. It is safe to insert
1379 * stack-allocated 'mask' to masks hash table because we call
1380 * synchronize_rcu() before returning from this function (either in case
1381 * of error or after replacing it with heap-allocated mask in
1382 * fl_create_new_mask()).
1383 */
1384 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1385 &mask->ht_node,
1386 mask_ht_params);
05cd271f 1387 if (!fnew->mask) {
f48ef4d5
VB
1388 rcu_read_unlock();
1389
195c234d
VB
1390 if (fold) {
1391 ret = -EINVAL;
1392 goto errout_cleanup;
1393 }
77b9900e 1394
05cd271f 1395 newmask = fl_create_new_mask(head, mask);
195c234d
VB
1396 if (IS_ERR(newmask)) {
1397 ret = PTR_ERR(newmask);
1398 goto errout_cleanup;
1399 }
77b9900e 1400
05cd271f 1401 fnew->mask = newmask;
f48ef4d5 1402 return 0;
195c234d
VB
1403 } else if (IS_ERR(fnew->mask)) {
1404 ret = PTR_ERR(fnew->mask);
f6521c58 1405 } else if (fold && fold->mask != fnew->mask) {
f48ef4d5
VB
1406 ret = -EINVAL;
1407 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1408 /* Mask was deleted concurrently, try again */
1409 ret = -EAGAIN;
05cd271f 1410 }
f48ef4d5
VB
1411 rcu_read_unlock();
1412 return ret;
195c234d
VB
1413
1414errout_cleanup:
1415 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1416 mask_ht_params);
1417 /* Wait until any potential concurrent users of mask are finished */
1418 synchronize_rcu();
1419 return ret;
77b9900e
JP
1420}
1421
1422static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1423 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1424 unsigned long base, struct nlattr **tb,
50a56190 1425 struct nlattr *est, bool ovr,
c24e43d8 1426 struct fl_flow_tmplt *tmplt, bool rtnl_held,
50a56190 1427 struct netlink_ext_ack *extack)
77b9900e 1428{
77b9900e
JP
1429 int err;
1430
c24e43d8 1431 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
ec6743a1 1432 extack);
77b9900e
JP
1433 if (err < 0)
1434 return err;
1435
1436 if (tb[TCA_FLOWER_CLASSID]) {
1437 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
c24e43d8
VB
1438 if (!rtnl_held)
1439 rtnl_lock();
77b9900e 1440 tcf_bind_filter(tp, &f->res, base);
c24e43d8
VB
1441 if (!rtnl_held)
1442 rtnl_unlock();
77b9900e
JP
1443 }
1444
1057c55f 1445 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
77b9900e 1446 if (err)
45507529 1447 return err;
77b9900e
JP
1448
1449 fl_mask_update_range(mask);
1450 fl_set_masked_key(&f->mkey, &f->key, mask);
1451
b95ec7eb
JP
1452 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1453 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1454 return -EINVAL;
1455 }
1456
77b9900e 1457 return 0;
77b9900e
JP
1458}
1459
1f17f774
VB
1460static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1461 struct cls_fl_filter *fold,
1462 bool *in_ht)
1463{
1464 struct fl_flow_mask *mask = fnew->mask;
1465 int err;
1466
9e35552a
VB
1467 err = rhashtable_lookup_insert_fast(&mask->ht,
1468 &fnew->ht_node,
1469 mask->filter_ht_params);
1f17f774
VB
1470 if (err) {
1471 *in_ht = false;
1472 /* It is okay if filter with same key exists when
1473 * overwriting.
1474 */
1475 return fold && err == -EEXIST ? 0 : err;
1476 }
1477
1478 *in_ht = true;
1479 return 0;
1480}
1481
77b9900e
JP
1482static int fl_change(struct net *net, struct sk_buff *in_skb,
1483 struct tcf_proto *tp, unsigned long base,
1484 u32 handle, struct nlattr **tca,
12db03b6
VB
1485 void **arg, bool ovr, bool rtnl_held,
1486 struct netlink_ext_ack *extack)
77b9900e 1487{
e474619a 1488 struct cls_fl_head *head = fl_head_dereference(tp);
8113c095 1489 struct cls_fl_filter *fold = *arg;
77b9900e 1490 struct cls_fl_filter *fnew;
2cddd201 1491 struct fl_flow_mask *mask;
39b7b6a6 1492 struct nlattr **tb;
1f17f774 1493 bool in_ht;
77b9900e
JP
1494 int err;
1495
06177558
VB
1496 if (!tca[TCA_OPTIONS]) {
1497 err = -EINVAL;
1498 goto errout_fold;
1499 }
77b9900e 1500
2cddd201 1501 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
06177558
VB
1502 if (!mask) {
1503 err = -ENOBUFS;
1504 goto errout_fold;
1505 }
39b7b6a6 1506
2cddd201
IV
1507 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1508 if (!tb) {
1509 err = -ENOBUFS;
1510 goto errout_mask_alloc;
1511 }
1512
8cb08174
JB
1513 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1514 tca[TCA_OPTIONS], fl_policy, NULL);
77b9900e 1515 if (err < 0)
39b7b6a6 1516 goto errout_tb;
77b9900e 1517
39b7b6a6
AB
1518 if (fold && handle && fold->handle != handle) {
1519 err = -EINVAL;
1520 goto errout_tb;
1521 }
77b9900e
JP
1522
1523 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
39b7b6a6
AB
1524 if (!fnew) {
1525 err = -ENOBUFS;
1526 goto errout_tb;
1527 }
c049d56e 1528 INIT_LIST_HEAD(&fnew->hw_list);
06177558 1529 refcount_set(&fnew->refcnt, 1);
77b9900e 1530
14215108 1531 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
b9a24bb7
WC
1532 if (err < 0)
1533 goto errout;
77b9900e 1534
e69985c6
AV
1535 if (tb[TCA_FLOWER_FLAGS]) {
1536 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1537
1538 if (!tc_flags_valid(fnew->flags)) {
1539 err = -EINVAL;
ecb3dea4 1540 goto errout;
e69985c6
AV
1541 }
1542 }
5b33f488 1543
2cddd201 1544 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
c24e43d8 1545 tp->chain->tmplt_priv, rtnl_held, extack);
77b9900e 1546 if (err)
ecb3dea4 1547 goto errout;
77b9900e 1548
2cddd201 1549 err = fl_check_assign_mask(head, fnew, fold, mask);
77b9900e 1550 if (err)
ecb3dea4
VB
1551 goto errout;
1552
1f17f774
VB
1553 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1554 if (err)
1555 goto errout_mask;
1556
79685219 1557 if (!tc_skip_hw(fnew->flags)) {
c24e43d8 1558 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
79685219 1559 if (err)
1f17f774 1560 goto errout_ht;
79685219 1561 }
5b33f488 1562
55593960
OG
1563 if (!tc_in_hw(fnew->flags))
1564 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1565
3d81e711
VB
1566 spin_lock(&tp->lock);
1567
272ffaad
VB
1568 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1569 * proto again or create new one, if necessary.
1570 */
1571 if (tp->deleting) {
1572 err = -EAGAIN;
1573 goto errout_hw;
1574 }
1575
5b33f488 1576 if (fold) {
b2552b8c
VB
1577 /* Fold filter was deleted concurrently. Retry lookup. */
1578 if (fold->deleted) {
1579 err = -EAGAIN;
1580 goto errout_hw;
1581 }
1582
620da486
VB
1583 fnew->handle = handle;
1584
1f17f774
VB
1585 if (!in_ht) {
1586 struct rhashtable_params params =
1587 fnew->mask->filter_ht_params;
1588
1589 err = rhashtable_insert_fast(&fnew->mask->ht,
1590 &fnew->ht_node,
1591 params);
1592 if (err)
1593 goto errout_hw;
1594 in_ht = true;
1595 }
620da486 1596
c049d56e 1597 refcount_inc(&fnew->refcnt);
599d2570
RD
1598 rhashtable_remove_fast(&fold->mask->ht,
1599 &fold->ht_node,
1600 fold->mask->filter_ht_params);
234a4624 1601 idr_replace(&head->handle_idr, fnew, fnew->handle);
ff3532f2 1602 list_replace_rcu(&fold->list, &fnew->list);
b2552b8c 1603 fold->deleted = true;
620da486 1604
3d81e711
VB
1605 spin_unlock(&tp->lock);
1606
9994677c 1607 fl_mask_put(head, fold->mask);
620da486 1608 if (!tc_skip_hw(fold->flags))
c24e43d8 1609 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
77b9900e 1610 tcf_unbind_filter(tp, &fold->res);
06177558
VB
1611 /* Caller holds reference to fold, so refcnt is always > 0
1612 * after this.
1613 */
1614 refcount_dec(&fold->refcnt);
1615 __fl_put(fold);
77b9900e 1616 } else {
620da486
VB
1617 if (handle) {
1618 /* user specifies a handle and it doesn't exist */
1619 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1620 handle, GFP_ATOMIC);
9a2d9389
VB
1621
1622 /* Filter with specified handle was concurrently
1623 * inserted after initial check in cls_api. This is not
1624 * necessarily an error if NLM_F_EXCL is not set in
1625 * message flags. Returning EAGAIN will cause cls_api to
1626 * try to update concurrently inserted rule.
1627 */
1628 if (err == -ENOSPC)
1629 err = -EAGAIN;
620da486
VB
1630 } else {
1631 handle = 1;
1632 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1633 INT_MAX, GFP_ATOMIC);
1634 }
1635 if (err)
1636 goto errout_hw;
1637
c049d56e 1638 refcount_inc(&fnew->refcnt);
620da486 1639 fnew->handle = handle;
05cd271f 1640 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
3d81e711 1641 spin_unlock(&tp->lock);
77b9900e
JP
1642 }
1643
620da486
VB
1644 *arg = fnew;
1645
39b7b6a6 1646 kfree(tb);
2cddd201 1647 kfree(mask);
77b9900e
JP
1648 return 0;
1649
c049d56e
VB
1650errout_ht:
1651 spin_lock(&tp->lock);
620da486 1652errout_hw:
c049d56e 1653 fnew->deleted = true;
3d81e711 1654 spin_unlock(&tp->lock);
620da486 1655 if (!tc_skip_hw(fnew->flags))
c24e43d8 1656 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1f17f774
VB
1657 if (in_ht)
1658 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1659 fnew->mask->filter_ht_params);
ecb3dea4 1660errout_mask:
9994677c 1661 fl_mask_put(head, fnew->mask);
77b9900e 1662errout:
c049d56e 1663 __fl_put(fnew);
39b7b6a6
AB
1664errout_tb:
1665 kfree(tb);
2cddd201
IV
1666errout_mask_alloc:
1667 kfree(mask);
06177558
VB
1668errout_fold:
1669 if (fold)
1670 __fl_put(fold);
77b9900e
JP
1671 return err;
1672}
1673
571acf21 1674static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
12db03b6 1675 bool rtnl_held, struct netlink_ext_ack *extack)
77b9900e 1676{
e474619a 1677 struct cls_fl_head *head = fl_head_dereference(tp);
8113c095 1678 struct cls_fl_filter *f = arg;
b2552b8c
VB
1679 bool last_on_mask;
1680 int err = 0;
77b9900e 1681
c24e43d8 1682 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
05cd271f 1683 *last = list_empty(&head->masks);
06177558
VB
1684 __fl_put(f);
1685
b2552b8c 1686 return err;
77b9900e
JP
1687}
1688
12db03b6
VB
1689static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1690 bool rtnl_held)
77b9900e 1691{
77b9900e 1692 struct cls_fl_filter *f;
05cd271f 1693
01683a14
VB
1694 arg->count = arg->skip;
1695
06177558 1696 while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
01683a14 1697 if (arg->fn(tp, f, arg) < 0) {
06177558 1698 __fl_put(f);
01683a14
VB
1699 arg->stop = 1;
1700 break;
05cd271f 1701 }
06177558
VB
1702 __fl_put(f);
1703 arg->cookie++;
01683a14 1704 arg->count++;
77b9900e
JP
1705 }
1706}
1707
c049d56e
VB
1708static struct cls_fl_filter *
1709fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1710{
1711 struct cls_fl_head *head = fl_head_dereference(tp);
1712
1713 spin_lock(&tp->lock);
1714 if (list_empty(&head->hw_filters)) {
1715 spin_unlock(&tp->lock);
1716 return NULL;
1717 }
1718
1719 if (!f)
1720 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1721 hw_list);
1722 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1723 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1724 spin_unlock(&tp->lock);
1725 return f;
1726 }
1727 }
1728
1729 spin_unlock(&tp->lock);
1730 return NULL;
1731}
1732
31533cba
JH
1733static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1734 void *cb_priv, struct netlink_ext_ack *extack)
1735{
31533cba
JH
1736 struct tc_cls_flower_offload cls_flower = {};
1737 struct tcf_block *block = tp->chain->block;
c049d56e 1738 struct cls_fl_filter *f = NULL;
31533cba
JH
1739 int err;
1740
c049d56e
VB
1741 /* hw_filters list can only be changed by hw offload functions after
1742 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1743 * iterating it.
1744 */
1745 ASSERT_RTNL();
3a7b6861 1746
c049d56e 1747 while ((f = fl_get_next_hw_filter(tp, f, add))) {
95e27a4d
JH
1748 cls_flower.rule =
1749 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1750 if (!cls_flower.rule) {
1751 __fl_put(f);
1752 return -ENOMEM;
1753 }
31533cba 1754
95e27a4d 1755 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
d6787147 1756 extack);
95e27a4d
JH
1757 cls_flower.command = add ?
1758 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1759 cls_flower.cookie = (unsigned long)f;
1760 cls_flower.rule->match.dissector = &f->mask->dissector;
1761 cls_flower.rule->match.mask = &f->mask->key;
1762 cls_flower.rule->match.key = &f->mkey;
1763
1764 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1765 if (err) {
8f256622 1766 kfree(cls_flower.rule);
95e27a4d
JH
1767 if (tc_skip_sw(f->flags)) {
1768 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1769 __fl_put(f);
1770 return err;
31533cba 1771 }
95e27a4d
JH
1772 goto next_flow;
1773 }
31533cba 1774
95e27a4d
JH
1775 cls_flower.classid = f->res.classid;
1776
1777 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1778 kfree(cls_flower.rule);
1779
1780 if (err) {
1781 if (add && tc_skip_sw(f->flags)) {
1782 __fl_put(f);
1783 return err;
1784 }
1785 goto next_flow;
31533cba 1786 }
95e27a4d
JH
1787
1788 spin_lock(&tp->lock);
1789 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1790 add);
1791 spin_unlock(&tp->lock);
1792next_flow:
95e27a4d 1793 __fl_put(f);
31533cba
JH
1794 }
1795
1796 return 0;
1797}
1798
8f256622
PNA
1799static int fl_hw_create_tmplt(struct tcf_chain *chain,
1800 struct fl_flow_tmplt *tmplt)
34738452
JP
1801{
1802 struct tc_cls_flower_offload cls_flower = {};
1803 struct tcf_block *block = chain->block;
34738452 1804
e3ab786b 1805 cls_flower.rule = flow_rule_alloc(0);
8f256622
PNA
1806 if (!cls_flower.rule)
1807 return -ENOMEM;
1808
34738452
JP
1809 cls_flower.common.chain_index = chain->index;
1810 cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1811 cls_flower.cookie = (unsigned long) tmplt;
8f256622
PNA
1812 cls_flower.rule->match.dissector = &tmplt->dissector;
1813 cls_flower.rule->match.mask = &tmplt->mask;
1814 cls_flower.rule->match.key = &tmplt->dummy_key;
34738452
JP
1815
1816 /* We don't care if driver (any of them) fails to handle this
1817 * call. It serves just as a hint for it.
1818 */
aeb3fecd 1819 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
8f256622
PNA
1820 kfree(cls_flower.rule);
1821
1822 return 0;
34738452
JP
1823}
1824
1825static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1826 struct fl_flow_tmplt *tmplt)
1827{
1828 struct tc_cls_flower_offload cls_flower = {};
1829 struct tcf_block *block = chain->block;
1830
1831 cls_flower.common.chain_index = chain->index;
1832 cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1833 cls_flower.cookie = (unsigned long) tmplt;
1834
aeb3fecd 1835 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
34738452
JP
1836}
1837
b95ec7eb
JP
1838static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1839 struct nlattr **tca,
1840 struct netlink_ext_ack *extack)
1841{
1842 struct fl_flow_tmplt *tmplt;
1843 struct nlattr **tb;
1844 int err;
1845
1846 if (!tca[TCA_OPTIONS])
1847 return ERR_PTR(-EINVAL);
1848
1849 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1850 if (!tb)
1851 return ERR_PTR(-ENOBUFS);
8cb08174
JB
1852 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1853 tca[TCA_OPTIONS], fl_policy, NULL);
b95ec7eb
JP
1854 if (err)
1855 goto errout_tb;
1856
1857 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1cbc36a5
DC
1858 if (!tmplt) {
1859 err = -ENOMEM;
b95ec7eb 1860 goto errout_tb;
1cbc36a5 1861 }
b95ec7eb
JP
1862 tmplt->chain = chain;
1863 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1864 if (err)
1865 goto errout_tmplt;
b95ec7eb
JP
1866
1867 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1868
8f256622
PNA
1869 err = fl_hw_create_tmplt(chain, tmplt);
1870 if (err)
1871 goto errout_tmplt;
34738452 1872
8f256622 1873 kfree(tb);
b95ec7eb
JP
1874 return tmplt;
1875
1876errout_tmplt:
1877 kfree(tmplt);
1878errout_tb:
1879 kfree(tb);
1880 return ERR_PTR(err);
1881}
1882
ec3ed293
VB
1883static void fl_tmplt_destroy(void *tmplt_priv)
1884{
1885 struct fl_flow_tmplt *tmplt = tmplt_priv;
1886
95278dda
CW
1887 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1888 kfree(tmplt);
ec3ed293
VB
1889}
1890
77b9900e
JP
1891static int fl_dump_key_val(struct sk_buff *skb,
1892 void *val, int val_type,
1893 void *mask, int mask_type, int len)
1894{
1895 int err;
1896
1897 if (!memchr_inv(mask, 0, len))
1898 return 0;
1899 err = nla_put(skb, val_type, len, val);
1900 if (err)
1901 return err;
1902 if (mask_type != TCA_FLOWER_UNSPEC) {
1903 err = nla_put(skb, mask_type, len, mask);
1904 if (err)
1905 return err;
1906 }
1907 return 0;
1908}
1909
5c72299f
AN
1910static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1911 struct fl_flow_key *mask)
1912{
1913 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1914 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1915 sizeof(key->tp_min.dst)) ||
1916 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1917 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1918 sizeof(key->tp_max.dst)) ||
1919 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1920 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1921 sizeof(key->tp_min.src)) ||
1922 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1923 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1924 sizeof(key->tp_max.src)))
1925 return -1;
1926
1927 return 0;
1928}
1929
a577d8f7
BL
1930static int fl_dump_key_mpls(struct sk_buff *skb,
1931 struct flow_dissector_key_mpls *mpls_key,
1932 struct flow_dissector_key_mpls *mpls_mask)
1933{
1934 int err;
1935
1936 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1937 return 0;
1938 if (mpls_mask->mpls_ttl) {
1939 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1940 mpls_key->mpls_ttl);
1941 if (err)
1942 return err;
1943 }
1944 if (mpls_mask->mpls_tc) {
1945 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1946 mpls_key->mpls_tc);
1947 if (err)
1948 return err;
1949 }
1950 if (mpls_mask->mpls_label) {
1951 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1952 mpls_key->mpls_label);
1953 if (err)
1954 return err;
1955 }
1956 if (mpls_mask->mpls_bos) {
1957 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1958 mpls_key->mpls_bos);
1959 if (err)
1960 return err;
1961 }
1962 return 0;
1963}
1964
0e2c17b6 1965static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
4d80cc0a
OG
1966 struct flow_dissector_key_ip *key,
1967 struct flow_dissector_key_ip *mask)
1968{
0e2c17b6
OG
1969 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1970 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1971 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1972 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1973
1974 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1975 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
4d80cc0a
OG
1976 return -1;
1977
1978 return 0;
1979}
1980
9399ae9a 1981static int fl_dump_key_vlan(struct sk_buff *skb,
d64efd09 1982 int vlan_id_key, int vlan_prio_key,
9399ae9a
HHZ
1983 struct flow_dissector_key_vlan *vlan_key,
1984 struct flow_dissector_key_vlan *vlan_mask)
1985{
1986 int err;
1987
1988 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1989 return 0;
1990 if (vlan_mask->vlan_id) {
d64efd09 1991 err = nla_put_u16(skb, vlan_id_key,
9399ae9a
HHZ
1992 vlan_key->vlan_id);
1993 if (err)
1994 return err;
1995 }
1996 if (vlan_mask->vlan_priority) {
d64efd09 1997 err = nla_put_u8(skb, vlan_prio_key,
9399ae9a
HHZ
1998 vlan_key->vlan_priority);
1999 if (err)
2000 return err;
2001 }
2002 return 0;
2003}
2004
faa3ffce
OG
2005static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2006 u32 *flower_key, u32 *flower_mask,
2007 u32 flower_flag_bit, u32 dissector_flag_bit)
2008{
2009 if (dissector_mask & dissector_flag_bit) {
2010 *flower_mask |= flower_flag_bit;
2011 if (dissector_key & dissector_flag_bit)
2012 *flower_key |= flower_flag_bit;
2013 }
2014}
2015
2016static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2017{
2018 u32 key, mask;
2019 __be32 _key, _mask;
2020 int err;
2021
2022 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2023 return 0;
2024
2025 key = 0;
2026 mask = 0;
2027
2028 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2029 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
459d153d
PJV
2030 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2031 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2032 FLOW_DIS_FIRST_FRAG);
faa3ffce
OG
2033
2034 _key = cpu_to_be32(key);
2035 _mask = cpu_to_be32(mask);
2036
2037 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2038 if (err)
2039 return err;
2040
2041 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2042}
2043
0a6e7778
PJV
2044static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2045 struct flow_dissector_key_enc_opts *enc_opts)
2046{
2047 struct geneve_opt *opt;
2048 struct nlattr *nest;
2049 int opt_off = 0;
2050
ae0be8de 2051 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
0a6e7778
PJV
2052 if (!nest)
2053 goto nla_put_failure;
2054
2055 while (enc_opts->len > opt_off) {
2056 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2057
2058 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2059 opt->opt_class))
2060 goto nla_put_failure;
2061 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2062 opt->type))
2063 goto nla_put_failure;
2064 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2065 opt->length * 4, opt->opt_data))
2066 goto nla_put_failure;
2067
2068 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2069 }
2070 nla_nest_end(skb, nest);
2071 return 0;
2072
2073nla_put_failure:
2074 nla_nest_cancel(skb, nest);
2075 return -EMSGSIZE;
2076}
2077
2078static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2079 struct flow_dissector_key_enc_opts *enc_opts)
2080{
2081 struct nlattr *nest;
2082 int err;
2083
2084 if (!enc_opts->len)
2085 return 0;
2086
ae0be8de 2087 nest = nla_nest_start_noflag(skb, enc_opt_type);
0a6e7778
PJV
2088 if (!nest)
2089 goto nla_put_failure;
2090
2091 switch (enc_opts->dst_opt_type) {
2092 case TUNNEL_GENEVE_OPT:
2093 err = fl_dump_key_geneve_opt(skb, enc_opts);
2094 if (err)
2095 goto nla_put_failure;
2096 break;
2097 default:
2098 goto nla_put_failure;
2099 }
2100 nla_nest_end(skb, nest);
2101 return 0;
2102
2103nla_put_failure:
2104 nla_nest_cancel(skb, nest);
2105 return -EMSGSIZE;
2106}
2107
2108static int fl_dump_key_enc_opt(struct sk_buff *skb,
2109 struct flow_dissector_key_enc_opts *key_opts,
2110 struct flow_dissector_key_enc_opts *msk_opts)
2111{
2112 int err;
2113
2114 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2115 if (err)
2116 return err;
2117
2118 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2119}
2120
f5749081
JP
2121static int fl_dump_key(struct sk_buff *skb, struct net *net,
2122 struct fl_flow_key *key, struct fl_flow_key *mask)
77b9900e 2123{
77b9900e
JP
2124 if (mask->indev_ifindex) {
2125 struct net_device *dev;
2126
2127 dev = __dev_get_by_index(net, key->indev_ifindex);
2128 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2129 goto nla_put_failure;
2130 }
2131
2132 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2133 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2134 sizeof(key->eth.dst)) ||
2135 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2136 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2137 sizeof(key->eth.src)) ||
2138 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2139 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2140 sizeof(key->basic.n_proto)))
2141 goto nla_put_failure;
9399ae9a 2142
a577d8f7
BL
2143 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2144 goto nla_put_failure;
2145
d64efd09
JL
2146 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2147 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
9399ae9a
HHZ
2148 goto nla_put_failure;
2149
d64efd09
JL
2150 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2151 TCA_FLOWER_KEY_CVLAN_PRIO,
2152 &key->cvlan, &mask->cvlan) ||
2153 (mask->cvlan.vlan_tpid &&
158abbf1
JL
2154 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2155 key->cvlan.vlan_tpid)))
d3069512
JL
2156 goto nla_put_failure;
2157
5e9a0fe4
JL
2158 if (mask->basic.n_proto) {
2159 if (mask->cvlan.vlan_tpid) {
2160 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2161 key->basic.n_proto))
2162 goto nla_put_failure;
2163 } else if (mask->vlan.vlan_tpid) {
2164 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2165 key->basic.n_proto))
2166 goto nla_put_failure;
2167 }
d64efd09
JL
2168 }
2169
77b9900e
JP
2170 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2171 key->basic.n_proto == htons(ETH_P_IPV6)) &&
4d80cc0a 2172 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
77b9900e 2173 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
4d80cc0a 2174 sizeof(key->basic.ip_proto)) ||
0e2c17b6 2175 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
77b9900e
JP
2176 goto nla_put_failure;
2177
c3f83241 2178 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
77b9900e
JP
2179 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2180 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2181 sizeof(key->ipv4.src)) ||
2182 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2183 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2184 sizeof(key->ipv4.dst))))
2185 goto nla_put_failure;
c3f83241 2186 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
77b9900e
JP
2187 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2188 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2189 sizeof(key->ipv6.src)) ||
2190 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2191 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2192 sizeof(key->ipv6.dst))))
2193 goto nla_put_failure;
2194
2195 if (key->basic.ip_proto == IPPROTO_TCP &&
2196 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
aa72d708 2197 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
77b9900e
JP
2198 sizeof(key->tp.src)) ||
2199 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
aa72d708 2200 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
fdfc7dd6
JP
2201 sizeof(key->tp.dst)) ||
2202 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2203 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2204 sizeof(key->tcp.flags))))
77b9900e
JP
2205 goto nla_put_failure;
2206 else if (key->basic.ip_proto == IPPROTO_UDP &&
2207 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
aa72d708 2208 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
77b9900e
JP
2209 sizeof(key->tp.src)) ||
2210 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
aa72d708 2211 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
5976c5f4
SH
2212 sizeof(key->tp.dst))))
2213 goto nla_put_failure;
2214 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2215 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2216 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2217 sizeof(key->tp.src)) ||
2218 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2219 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
77b9900e
JP
2220 sizeof(key->tp.dst))))
2221 goto nla_put_failure;
7b684884
SH
2222 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2223 key->basic.ip_proto == IPPROTO_ICMP &&
2224 (fl_dump_key_val(skb, &key->icmp.type,
2225 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2226 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2227 sizeof(key->icmp.type)) ||
2228 fl_dump_key_val(skb, &key->icmp.code,
2229 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2230 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2231 sizeof(key->icmp.code))))
2232 goto nla_put_failure;
2233 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2234 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2235 (fl_dump_key_val(skb, &key->icmp.type,
2236 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2237 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2238 sizeof(key->icmp.type)) ||
2239 fl_dump_key_val(skb, &key->icmp.code,
2240 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2241 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2242 sizeof(key->icmp.code))))
2243 goto nla_put_failure;
99d31326
SH
2244 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2245 key->basic.n_proto == htons(ETH_P_RARP)) &&
2246 (fl_dump_key_val(skb, &key->arp.sip,
2247 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2248 TCA_FLOWER_KEY_ARP_SIP_MASK,
2249 sizeof(key->arp.sip)) ||
2250 fl_dump_key_val(skb, &key->arp.tip,
2251 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2252 TCA_FLOWER_KEY_ARP_TIP_MASK,
2253 sizeof(key->arp.tip)) ||
2254 fl_dump_key_val(skb, &key->arp.op,
2255 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2256 TCA_FLOWER_KEY_ARP_OP_MASK,
2257 sizeof(key->arp.op)) ||
2258 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2259 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2260 sizeof(key->arp.sha)) ||
2261 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2262 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2263 sizeof(key->arp.tha))))
2264 goto nla_put_failure;
77b9900e 2265
5c72299f
AN
2266 if ((key->basic.ip_proto == IPPROTO_TCP ||
2267 key->basic.ip_proto == IPPROTO_UDP ||
2268 key->basic.ip_proto == IPPROTO_SCTP) &&
2269 fl_dump_key_port_range(skb, key, mask))
2270 goto nla_put_failure;
2271
bc3103f1
AV
2272 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2273 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2274 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2275 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2276 sizeof(key->enc_ipv4.src)) ||
2277 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2278 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2279 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2280 sizeof(key->enc_ipv4.dst))))
2281 goto nla_put_failure;
2282 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2283 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2284 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2285 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2286 sizeof(key->enc_ipv6.src)) ||
2287 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2288 TCA_FLOWER_KEY_ENC_IPV6_DST,
2289 &mask->enc_ipv6.dst,
2290 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2291 sizeof(key->enc_ipv6.dst))))
2292 goto nla_put_failure;
2293
2294 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
eb523f42 2295 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
f4d997fd
HHZ
2296 sizeof(key->enc_key_id)) ||
2297 fl_dump_key_val(skb, &key->enc_tp.src,
2298 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2299 &mask->enc_tp.src,
2300 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2301 sizeof(key->enc_tp.src)) ||
2302 fl_dump_key_val(skb, &key->enc_tp.dst,
2303 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2304 &mask->enc_tp.dst,
2305 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
0e2c17b6 2306 sizeof(key->enc_tp.dst)) ||
0a6e7778
PJV
2307 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2308 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
bc3103f1
AV
2309 goto nla_put_failure;
2310
faa3ffce
OG
2311 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2312 goto nla_put_failure;
2313
f5749081
JP
2314 return 0;
2315
2316nla_put_failure:
2317 return -EMSGSIZE;
2318}
2319
2320static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
12db03b6 2321 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
f5749081
JP
2322{
2323 struct cls_fl_filter *f = fh;
2324 struct nlattr *nest;
2325 struct fl_flow_key *key, *mask;
3d81e711 2326 bool skip_hw;
f5749081
JP
2327
2328 if (!f)
2329 return skb->len;
2330
2331 t->tcm_handle = f->handle;
2332
ae0be8de 2333 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
f5749081
JP
2334 if (!nest)
2335 goto nla_put_failure;
2336
3d81e711
VB
2337 spin_lock(&tp->lock);
2338
f5749081
JP
2339 if (f->res.classid &&
2340 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3d81e711 2341 goto nla_put_failure_locked;
f5749081
JP
2342
2343 key = &f->key;
2344 mask = &f->mask->key;
3d81e711 2345 skip_hw = tc_skip_hw(f->flags);
f5749081
JP
2346
2347 if (fl_dump_key(skb, net, key, mask))
3d81e711 2348 goto nla_put_failure_locked;
f5749081 2349
749e6720 2350 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3d81e711
VB
2351 goto nla_put_failure_locked;
2352
2353 spin_unlock(&tp->lock);
2354
2355 if (!skip_hw)
c24e43d8 2356 fl_hw_update_stats(tp, f, rtnl_held);
e69985c6 2357
86c55361
VB
2358 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2359 goto nla_put_failure;
2360
77b9900e
JP
2361 if (tcf_exts_dump(skb, &f->exts))
2362 goto nla_put_failure;
2363
2364 nla_nest_end(skb, nest);
2365
2366 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2367 goto nla_put_failure;
2368
2369 return skb->len;
2370
3d81e711
VB
2371nla_put_failure_locked:
2372 spin_unlock(&tp->lock);
77b9900e
JP
2373nla_put_failure:
2374 nla_nest_cancel(skb, nest);
2375 return -1;
2376}
2377
b95ec7eb
JP
2378static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2379{
2380 struct fl_flow_tmplt *tmplt = tmplt_priv;
2381 struct fl_flow_key *key, *mask;
2382 struct nlattr *nest;
2383
ae0be8de 2384 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
b95ec7eb
JP
2385 if (!nest)
2386 goto nla_put_failure;
2387
2388 key = &tmplt->dummy_key;
2389 mask = &tmplt->mask;
2390
2391 if (fl_dump_key(skb, net, key, mask))
2392 goto nla_put_failure;
2393
2394 nla_nest_end(skb, nest);
2395
2396 return skb->len;
2397
2398nla_put_failure:
2399 nla_nest_cancel(skb, nest);
2400 return -EMSGSIZE;
2401}
2402
07d79fc7
CW
2403static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2404{
2405 struct cls_fl_filter *f = fh;
2406
2407 if (f && f->res.classid == classid)
2408 f->res.class = cl;
2409}
2410
77b9900e
JP
2411static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2412 .kind = "flower",
2413 .classify = fl_classify,
2414 .init = fl_init,
2415 .destroy = fl_destroy,
2416 .get = fl_get,
06177558 2417 .put = fl_put,
77b9900e
JP
2418 .change = fl_change,
2419 .delete = fl_delete,
2420 .walk = fl_walk,
31533cba 2421 .reoffload = fl_reoffload,
77b9900e 2422 .dump = fl_dump,
07d79fc7 2423 .bind_class = fl_bind_class,
b95ec7eb
JP
2424 .tmplt_create = fl_tmplt_create,
2425 .tmplt_destroy = fl_tmplt_destroy,
2426 .tmplt_dump = fl_tmplt_dump,
77b9900e 2427 .owner = THIS_MODULE,
92149190 2428 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
77b9900e
JP
2429};
2430
2431static int __init cls_fl_init(void)
2432{
2433 return register_tcf_proto_ops(&cls_fl_ops);
2434}
2435
2436static void __exit cls_fl_exit(void)
2437{
2438 unregister_tcf_proto_ops(&cls_fl_ops);
2439}
2440
2441module_init(cls_fl_init);
2442module_exit(cls_fl_exit);
2443
2444MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2445MODULE_DESCRIPTION("Flower classifier");
2446MODULE_LICENSE("GPL v2");