1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
18 #include <linux/mpls.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
27 #include <net/dst_metadata.h>
29 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 struct flow_dissector_key_meta meta
;
33 struct flow_dissector_key_control control
;
34 struct flow_dissector_key_control enc_control
;
35 struct flow_dissector_key_basic basic
;
36 struct flow_dissector_key_eth_addrs eth
;
37 struct flow_dissector_key_vlan vlan
;
38 struct flow_dissector_key_vlan cvlan
;
40 struct flow_dissector_key_ipv4_addrs ipv4
;
41 struct flow_dissector_key_ipv6_addrs ipv6
;
43 struct flow_dissector_key_ports tp
;
44 struct flow_dissector_key_icmp icmp
;
45 struct flow_dissector_key_arp arp
;
46 struct flow_dissector_key_keyid enc_key_id
;
48 struct flow_dissector_key_ipv4_addrs enc_ipv4
;
49 struct flow_dissector_key_ipv6_addrs enc_ipv6
;
51 struct flow_dissector_key_ports enc_tp
;
52 struct flow_dissector_key_mpls mpls
;
53 struct flow_dissector_key_tcp tcp
;
54 struct flow_dissector_key_ip ip
;
55 struct flow_dissector_key_ip enc_ip
;
56 struct flow_dissector_key_enc_opts enc_opts
;
57 struct flow_dissector_key_ports tp_min
;
58 struct flow_dissector_key_ports tp_max
;
59 struct flow_dissector_key_ct ct
;
60 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
62 struct fl_flow_mask_range
{
63 unsigned short int start
;
64 unsigned short int end
;
68 struct fl_flow_key key
;
69 struct fl_flow_mask_range range
;
71 struct rhash_head ht_node
;
73 struct rhashtable_params filter_ht_params
;
74 struct flow_dissector dissector
;
75 struct list_head filters
;
76 struct rcu_work rwork
;
77 struct list_head list
;
81 struct fl_flow_tmplt
{
82 struct fl_flow_key dummy_key
;
83 struct fl_flow_key mask
;
84 struct flow_dissector dissector
;
85 struct tcf_chain
*chain
;
90 spinlock_t masks_lock
; /* Protect masks list */
91 struct list_head masks
;
92 struct list_head hw_filters
;
93 struct rcu_work rwork
;
94 struct idr handle_idr
;
97 struct cls_fl_filter
{
98 struct fl_flow_mask
*mask
;
99 struct rhash_head ht_node
;
100 struct fl_flow_key mkey
;
101 struct tcf_exts exts
;
102 struct tcf_result res
;
103 struct fl_flow_key key
;
104 struct list_head list
;
105 struct list_head hw_list
;
109 struct rcu_work rwork
;
110 struct net_device
*hw_dev
;
111 /* Flower classifier is unlocked, which means that its reference counter
112 * can be changed concurrently without any kind of external
113 * synchronization. Use atomic reference counter to be concurrency-safe.
119 static const struct rhashtable_params mask_ht_params
= {
120 .key_offset
= offsetof(struct fl_flow_mask
, key
),
121 .key_len
= sizeof(struct fl_flow_key
),
122 .head_offset
= offsetof(struct fl_flow_mask
, ht_node
),
123 .automatic_shrinking
= true,
126 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
128 return mask
->range
.end
- mask
->range
.start
;
131 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
133 const u8
*bytes
= (const u8
*) &mask
->key
;
134 size_t size
= sizeof(mask
->key
);
135 size_t i
, first
= 0, last
;
137 for (i
= 0; i
< size
; i
++) {
144 for (i
= size
- 1; i
!= first
; i
--) {
150 mask
->range
.start
= rounddown(first
, sizeof(long));
151 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
154 static void *fl_key_get_start(struct fl_flow_key
*key
,
155 const struct fl_flow_mask
*mask
)
157 return (u8
*) key
+ mask
->range
.start
;
160 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
161 struct fl_flow_mask
*mask
)
163 const long *lkey
= fl_key_get_start(key
, mask
);
164 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
165 long *lmkey
= fl_key_get_start(mkey
, mask
);
168 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
169 *lmkey
++ = *lkey
++ & *lmask
++;
172 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt
*tmplt
,
173 struct fl_flow_mask
*mask
)
175 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
181 ltmplt
= fl_key_get_start(&tmplt
->mask
, mask
);
182 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long)) {
183 if (~*ltmplt
++ & *lmask
++)
189 static void fl_clear_masked_range(struct fl_flow_key
*key
,
190 struct fl_flow_mask
*mask
)
192 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
195 static bool fl_range_port_dst_cmp(struct cls_fl_filter
*filter
,
196 struct fl_flow_key
*key
,
197 struct fl_flow_key
*mkey
)
199 __be16 min_mask
, max_mask
, min_val
, max_val
;
201 min_mask
= htons(filter
->mask
->key
.tp_min
.dst
);
202 max_mask
= htons(filter
->mask
->key
.tp_max
.dst
);
203 min_val
= htons(filter
->key
.tp_min
.dst
);
204 max_val
= htons(filter
->key
.tp_max
.dst
);
206 if (min_mask
&& max_mask
) {
207 if (htons(key
->tp
.dst
) < min_val
||
208 htons(key
->tp
.dst
) > max_val
)
211 /* skb does not have min and max values */
212 mkey
->tp_min
.dst
= filter
->mkey
.tp_min
.dst
;
213 mkey
->tp_max
.dst
= filter
->mkey
.tp_max
.dst
;
218 static bool fl_range_port_src_cmp(struct cls_fl_filter
*filter
,
219 struct fl_flow_key
*key
,
220 struct fl_flow_key
*mkey
)
222 __be16 min_mask
, max_mask
, min_val
, max_val
;
224 min_mask
= htons(filter
->mask
->key
.tp_min
.src
);
225 max_mask
= htons(filter
->mask
->key
.tp_max
.src
);
226 min_val
= htons(filter
->key
.tp_min
.src
);
227 max_val
= htons(filter
->key
.tp_max
.src
);
229 if (min_mask
&& max_mask
) {
230 if (htons(key
->tp
.src
) < min_val
||
231 htons(key
->tp
.src
) > max_val
)
234 /* skb does not have min and max values */
235 mkey
->tp_min
.src
= filter
->mkey
.tp_min
.src
;
236 mkey
->tp_max
.src
= filter
->mkey
.tp_max
.src
;
241 static struct cls_fl_filter
*__fl_lookup(struct fl_flow_mask
*mask
,
242 struct fl_flow_key
*mkey
)
244 return rhashtable_lookup_fast(&mask
->ht
, fl_key_get_start(mkey
, mask
),
245 mask
->filter_ht_params
);
248 static struct cls_fl_filter
*fl_lookup_range(struct fl_flow_mask
*mask
,
249 struct fl_flow_key
*mkey
,
250 struct fl_flow_key
*key
)
252 struct cls_fl_filter
*filter
, *f
;
254 list_for_each_entry_rcu(filter
, &mask
->filters
, list
) {
255 if (!fl_range_port_dst_cmp(filter
, key
, mkey
))
258 if (!fl_range_port_src_cmp(filter
, key
, mkey
))
261 f
= __fl_lookup(mask
, mkey
);
268 static struct cls_fl_filter
*fl_lookup(struct fl_flow_mask
*mask
,
269 struct fl_flow_key
*mkey
,
270 struct fl_flow_key
*key
)
272 if ((mask
->flags
& TCA_FLOWER_MASK_FLAGS_RANGE
))
273 return fl_lookup_range(mask
, mkey
, key
);
275 return __fl_lookup(mask
, mkey
);
278 static u16 fl_ct_info_to_flower_map
[] = {
279 [IP_CT_ESTABLISHED
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
280 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED
,
281 [IP_CT_RELATED
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
282 TCA_FLOWER_KEY_CT_FLAGS_RELATED
,
283 [IP_CT_ESTABLISHED_REPLY
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
284 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED
,
285 [IP_CT_RELATED_REPLY
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
286 TCA_FLOWER_KEY_CT_FLAGS_RELATED
,
287 [IP_CT_NEW
] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED
|
288 TCA_FLOWER_KEY_CT_FLAGS_NEW
,
291 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
292 struct tcf_result
*res
)
294 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
295 struct fl_flow_key skb_mkey
;
296 struct fl_flow_key skb_key
;
297 struct fl_flow_mask
*mask
;
298 struct cls_fl_filter
*f
;
300 list_for_each_entry_rcu(mask
, &head
->masks
, list
) {
301 fl_clear_masked_range(&skb_key
, mask
);
303 skb_flow_dissect_meta(skb
, &mask
->dissector
, &skb_key
);
304 /* skb_flow_dissect() does not set n_proto in case an unknown
305 * protocol, so do it rather here.
307 skb_key
.basic
.n_proto
= skb
->protocol
;
308 skb_flow_dissect_tunnel_info(skb
, &mask
->dissector
, &skb_key
);
309 skb_flow_dissect_ct(skb
, &mask
->dissector
, &skb_key
,
310 fl_ct_info_to_flower_map
,
311 ARRAY_SIZE(fl_ct_info_to_flower_map
));
312 skb_flow_dissect(skb
, &mask
->dissector
, &skb_key
, 0);
314 fl_set_masked_key(&skb_mkey
, &skb_key
, mask
);
316 f
= fl_lookup(mask
, &skb_mkey
, &skb_key
);
317 if (f
&& !tc_skip_sw(f
->flags
)) {
319 return tcf_exts_exec(skb
, &f
->exts
, res
);
325 static int fl_init(struct tcf_proto
*tp
)
327 struct cls_fl_head
*head
;
329 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
333 spin_lock_init(&head
->masks_lock
);
334 INIT_LIST_HEAD_RCU(&head
->masks
);
335 INIT_LIST_HEAD(&head
->hw_filters
);
336 rcu_assign_pointer(tp
->root
, head
);
337 idr_init(&head
->handle_idr
);
339 return rhashtable_init(&head
->ht
, &mask_ht_params
);
342 static void fl_mask_free(struct fl_flow_mask
*mask
, bool mask_init_done
)
344 /* temporary masks don't have their filters list and ht initialized */
345 if (mask_init_done
) {
346 WARN_ON(!list_empty(&mask
->filters
));
347 rhashtable_destroy(&mask
->ht
);
352 static void fl_mask_free_work(struct work_struct
*work
)
354 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
355 struct fl_flow_mask
, rwork
);
357 fl_mask_free(mask
, true);
360 static void fl_uninit_mask_free_work(struct work_struct
*work
)
362 struct fl_flow_mask
*mask
= container_of(to_rcu_work(work
),
363 struct fl_flow_mask
, rwork
);
365 fl_mask_free(mask
, false);
368 static bool fl_mask_put(struct cls_fl_head
*head
, struct fl_flow_mask
*mask
)
370 if (!refcount_dec_and_test(&mask
->refcnt
))
373 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
, mask_ht_params
);
375 spin_lock(&head
->masks_lock
);
376 list_del_rcu(&mask
->list
);
377 spin_unlock(&head
->masks_lock
);
379 tcf_queue_work(&mask
->rwork
, fl_mask_free_work
);
384 static struct cls_fl_head
*fl_head_dereference(struct tcf_proto
*tp
)
386 /* Flower classifier only changes root pointer during init and destroy.
387 * Users must obtain reference to tcf_proto instance before calling its
388 * API, so tp->root pointer is protected from concurrent call to
389 * fl_destroy() by reference counting.
391 return rcu_dereference_raw(tp
->root
);
394 static void __fl_destroy_filter(struct cls_fl_filter
*f
)
396 tcf_exts_destroy(&f
->exts
);
397 tcf_exts_put_net(&f
->exts
);
401 static void fl_destroy_filter_work(struct work_struct
*work
)
403 struct cls_fl_filter
*f
= container_of(to_rcu_work(work
),
404 struct cls_fl_filter
, rwork
);
406 __fl_destroy_filter(f
);
409 static void fl_hw_destroy_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
410 bool rtnl_held
, struct netlink_ext_ack
*extack
)
412 struct tcf_block
*block
= tp
->chain
->block
;
413 struct flow_cls_offload cls_flower
= {};
415 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
416 cls_flower
.command
= FLOW_CLS_DESTROY
;
417 cls_flower
.cookie
= (unsigned long) f
;
419 tc_setup_cb_destroy(block
, tp
, TC_SETUP_CLSFLOWER
, &cls_flower
, false,
420 &f
->flags
, &f
->in_hw_count
, rtnl_held
);
424 static int fl_hw_replace_filter(struct tcf_proto
*tp
,
425 struct cls_fl_filter
*f
, bool rtnl_held
,
426 struct netlink_ext_ack
*extack
)
428 struct tcf_block
*block
= tp
->chain
->block
;
429 struct flow_cls_offload cls_flower
= {};
430 bool skip_sw
= tc_skip_sw(f
->flags
);
433 cls_flower
.rule
= flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
434 if (!cls_flower
.rule
)
437 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, extack
);
438 cls_flower
.command
= FLOW_CLS_REPLACE
;
439 cls_flower
.cookie
= (unsigned long) f
;
440 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
441 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
442 cls_flower
.rule
->match
.key
= &f
->mkey
;
443 cls_flower
.classid
= f
->res
.classid
;
445 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
,
448 kfree(cls_flower
.rule
);
450 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
456 err
= tc_setup_cb_add(block
, tp
, TC_SETUP_CLSFLOWER
, &cls_flower
,
457 skip_sw
, &f
->flags
, &f
->in_hw_count
, rtnl_held
);
458 tc_cleanup_flow_action(&cls_flower
.rule
->action
);
459 kfree(cls_flower
.rule
);
462 fl_hw_destroy_filter(tp
, f
, rtnl_held
, NULL
);
466 if (skip_sw
&& !(f
->flags
& TCA_CLS_FLAGS_IN_HW
))
472 static void fl_hw_update_stats(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
475 struct tcf_block
*block
= tp
->chain
->block
;
476 struct flow_cls_offload cls_flower
= {};
478 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
, NULL
);
479 cls_flower
.command
= FLOW_CLS_STATS
;
480 cls_flower
.cookie
= (unsigned long) f
;
481 cls_flower
.classid
= f
->res
.classid
;
483 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false,
486 tcf_exts_stats_update(&f
->exts
, cls_flower
.stats
.bytes
,
487 cls_flower
.stats
.pkts
,
488 cls_flower
.stats
.lastused
);
491 static void __fl_put(struct cls_fl_filter
*f
)
493 if (!refcount_dec_and_test(&f
->refcnt
))
496 if (tcf_exts_get_net(&f
->exts
))
497 tcf_queue_work(&f
->rwork
, fl_destroy_filter_work
);
499 __fl_destroy_filter(f
);
502 static struct cls_fl_filter
*__fl_get(struct cls_fl_head
*head
, u32 handle
)
504 struct cls_fl_filter
*f
;
507 f
= idr_find(&head
->handle_idr
, handle
);
508 if (f
&& !refcount_inc_not_zero(&f
->refcnt
))
515 static int __fl_delete(struct tcf_proto
*tp
, struct cls_fl_filter
*f
,
516 bool *last
, bool rtnl_held
,
517 struct netlink_ext_ack
*extack
)
519 struct cls_fl_head
*head
= fl_head_dereference(tp
);
523 spin_lock(&tp
->lock
);
525 spin_unlock(&tp
->lock
);
530 rhashtable_remove_fast(&f
->mask
->ht
, &f
->ht_node
,
531 f
->mask
->filter_ht_params
);
532 idr_remove(&head
->handle_idr
, f
->handle
);
533 list_del_rcu(&f
->list
);
534 spin_unlock(&tp
->lock
);
536 *last
= fl_mask_put(head
, f
->mask
);
537 if (!tc_skip_hw(f
->flags
))
538 fl_hw_destroy_filter(tp
, f
, rtnl_held
, extack
);
539 tcf_unbind_filter(tp
, &f
->res
);
545 static void fl_destroy_sleepable(struct work_struct
*work
)
547 struct cls_fl_head
*head
= container_of(to_rcu_work(work
),
551 rhashtable_destroy(&head
->ht
);
553 module_put(THIS_MODULE
);
556 static void fl_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
557 struct netlink_ext_ack
*extack
)
559 struct cls_fl_head
*head
= fl_head_dereference(tp
);
560 struct fl_flow_mask
*mask
, *next_mask
;
561 struct cls_fl_filter
*f
, *next
;
564 list_for_each_entry_safe(mask
, next_mask
, &head
->masks
, list
) {
565 list_for_each_entry_safe(f
, next
, &mask
->filters
, list
) {
566 __fl_delete(tp
, f
, &last
, rtnl_held
, extack
);
571 idr_destroy(&head
->handle_idr
);
573 __module_get(THIS_MODULE
);
574 tcf_queue_work(&head
->rwork
, fl_destroy_sleepable
);
577 static void fl_put(struct tcf_proto
*tp
, void *arg
)
579 struct cls_fl_filter
*f
= arg
;
584 static void *fl_get(struct tcf_proto
*tp
, u32 handle
)
586 struct cls_fl_head
*head
= fl_head_dereference(tp
);
588 return __fl_get(head
, handle
);
591 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
592 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
593 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
594 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
596 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
597 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
598 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
599 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
600 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
601 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
602 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
603 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
604 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
605 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
606 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
607 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
608 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
609 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
610 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
611 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
612 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
613 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
614 [TCA_FLOWER_KEY_VLAN_ID
] = { .type
= NLA_U16
},
615 [TCA_FLOWER_KEY_VLAN_PRIO
] = { .type
= NLA_U8
},
616 [TCA_FLOWER_KEY_VLAN_ETH_TYPE
] = { .type
= NLA_U16
},
617 [TCA_FLOWER_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
618 [TCA_FLOWER_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
619 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
620 [TCA_FLOWER_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
621 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
] = { .type
= NLA_U32
},
622 [TCA_FLOWER_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
623 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
624 [TCA_FLOWER_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
625 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
626 [TCA_FLOWER_KEY_TCP_SRC_MASK
] = { .type
= NLA_U16
},
627 [TCA_FLOWER_KEY_TCP_DST_MASK
] = { .type
= NLA_U16
},
628 [TCA_FLOWER_KEY_UDP_SRC_MASK
] = { .type
= NLA_U16
},
629 [TCA_FLOWER_KEY_UDP_DST_MASK
] = { .type
= NLA_U16
},
630 [TCA_FLOWER_KEY_SCTP_SRC_MASK
] = { .type
= NLA_U16
},
631 [TCA_FLOWER_KEY_SCTP_DST_MASK
] = { .type
= NLA_U16
},
632 [TCA_FLOWER_KEY_SCTP_SRC
] = { .type
= NLA_U16
},
633 [TCA_FLOWER_KEY_SCTP_DST
] = { .type
= NLA_U16
},
634 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
] = { .type
= NLA_U16
},
635 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
] = { .type
= NLA_U16
},
636 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT
] = { .type
= NLA_U16
},
637 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
] = { .type
= NLA_U16
},
638 [TCA_FLOWER_KEY_FLAGS
] = { .type
= NLA_U32
},
639 [TCA_FLOWER_KEY_FLAGS_MASK
] = { .type
= NLA_U32
},
640 [TCA_FLOWER_KEY_ICMPV4_TYPE
] = { .type
= NLA_U8
},
641 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
] = { .type
= NLA_U8
},
642 [TCA_FLOWER_KEY_ICMPV4_CODE
] = { .type
= NLA_U8
},
643 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK
] = { .type
= NLA_U8
},
644 [TCA_FLOWER_KEY_ICMPV6_TYPE
] = { .type
= NLA_U8
},
645 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
] = { .type
= NLA_U8
},
646 [TCA_FLOWER_KEY_ICMPV6_CODE
] = { .type
= NLA_U8
},
647 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK
] = { .type
= NLA_U8
},
648 [TCA_FLOWER_KEY_ARP_SIP
] = { .type
= NLA_U32
},
649 [TCA_FLOWER_KEY_ARP_SIP_MASK
] = { .type
= NLA_U32
},
650 [TCA_FLOWER_KEY_ARP_TIP
] = { .type
= NLA_U32
},
651 [TCA_FLOWER_KEY_ARP_TIP_MASK
] = { .type
= NLA_U32
},
652 [TCA_FLOWER_KEY_ARP_OP
] = { .type
= NLA_U8
},
653 [TCA_FLOWER_KEY_ARP_OP_MASK
] = { .type
= NLA_U8
},
654 [TCA_FLOWER_KEY_ARP_SHA
] = { .len
= ETH_ALEN
},
655 [TCA_FLOWER_KEY_ARP_SHA_MASK
] = { .len
= ETH_ALEN
},
656 [TCA_FLOWER_KEY_ARP_THA
] = { .len
= ETH_ALEN
},
657 [TCA_FLOWER_KEY_ARP_THA_MASK
] = { .len
= ETH_ALEN
},
658 [TCA_FLOWER_KEY_MPLS_TTL
] = { .type
= NLA_U8
},
659 [TCA_FLOWER_KEY_MPLS_BOS
] = { .type
= NLA_U8
},
660 [TCA_FLOWER_KEY_MPLS_TC
] = { .type
= NLA_U8
},
661 [TCA_FLOWER_KEY_MPLS_LABEL
] = { .type
= NLA_U32
},
662 [TCA_FLOWER_KEY_TCP_FLAGS
] = { .type
= NLA_U16
},
663 [TCA_FLOWER_KEY_TCP_FLAGS_MASK
] = { .type
= NLA_U16
},
664 [TCA_FLOWER_KEY_IP_TOS
] = { .type
= NLA_U8
},
665 [TCA_FLOWER_KEY_IP_TOS_MASK
] = { .type
= NLA_U8
},
666 [TCA_FLOWER_KEY_IP_TTL
] = { .type
= NLA_U8
},
667 [TCA_FLOWER_KEY_IP_TTL_MASK
] = { .type
= NLA_U8
},
668 [TCA_FLOWER_KEY_CVLAN_ID
] = { .type
= NLA_U16
},
669 [TCA_FLOWER_KEY_CVLAN_PRIO
] = { .type
= NLA_U8
},
670 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE
] = { .type
= NLA_U16
},
671 [TCA_FLOWER_KEY_ENC_IP_TOS
] = { .type
= NLA_U8
},
672 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK
] = { .type
= NLA_U8
},
673 [TCA_FLOWER_KEY_ENC_IP_TTL
] = { .type
= NLA_U8
},
674 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK
] = { .type
= NLA_U8
},
675 [TCA_FLOWER_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
676 [TCA_FLOWER_KEY_ENC_OPTS_MASK
] = { .type
= NLA_NESTED
},
677 [TCA_FLOWER_KEY_CT_STATE
] = { .type
= NLA_U16
},
678 [TCA_FLOWER_KEY_CT_STATE_MASK
] = { .type
= NLA_U16
},
679 [TCA_FLOWER_KEY_CT_ZONE
] = { .type
= NLA_U16
},
680 [TCA_FLOWER_KEY_CT_ZONE_MASK
] = { .type
= NLA_U16
},
681 [TCA_FLOWER_KEY_CT_MARK
] = { .type
= NLA_U32
},
682 [TCA_FLOWER_KEY_CT_MARK_MASK
] = { .type
= NLA_U32
},
683 [TCA_FLOWER_KEY_CT_LABELS
] = { .type
= NLA_BINARY
,
684 .len
= 128 / BITS_PER_BYTE
},
685 [TCA_FLOWER_KEY_CT_LABELS_MASK
] = { .type
= NLA_BINARY
,
686 .len
= 128 / BITS_PER_BYTE
},
689 static const struct nla_policy
690 enc_opts_policy
[TCA_FLOWER_KEY_ENC_OPTS_MAX
+ 1] = {
691 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
694 static const struct nla_policy
695 geneve_opt_policy
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
696 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
697 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
698 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
702 static void fl_set_key_val(struct nlattr
**tb
,
703 void *val
, int val_type
,
704 void *mask
, int mask_type
, int len
)
708 nla_memcpy(val
, tb
[val_type
], len
);
709 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
710 memset(mask
, 0xff, len
);
712 nla_memcpy(mask
, tb
[mask_type
], len
);
715 static int fl_set_key_port_range(struct nlattr
**tb
, struct fl_flow_key
*key
,
716 struct fl_flow_key
*mask
)
718 fl_set_key_val(tb
, &key
->tp_min
.dst
,
719 TCA_FLOWER_KEY_PORT_DST_MIN
, &mask
->tp_min
.dst
,
720 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_min
.dst
));
721 fl_set_key_val(tb
, &key
->tp_max
.dst
,
722 TCA_FLOWER_KEY_PORT_DST_MAX
, &mask
->tp_max
.dst
,
723 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_max
.dst
));
724 fl_set_key_val(tb
, &key
->tp_min
.src
,
725 TCA_FLOWER_KEY_PORT_SRC_MIN
, &mask
->tp_min
.src
,
726 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_min
.src
));
727 fl_set_key_val(tb
, &key
->tp_max
.src
,
728 TCA_FLOWER_KEY_PORT_SRC_MAX
, &mask
->tp_max
.src
,
729 TCA_FLOWER_UNSPEC
, sizeof(key
->tp_max
.src
));
731 if ((mask
->tp_min
.dst
&& mask
->tp_max
.dst
&&
732 htons(key
->tp_max
.dst
) <= htons(key
->tp_min
.dst
)) ||
733 (mask
->tp_min
.src
&& mask
->tp_max
.src
&&
734 htons(key
->tp_max
.src
) <= htons(key
->tp_min
.src
)))
740 static int fl_set_key_mpls(struct nlattr
**tb
,
741 struct flow_dissector_key_mpls
*key_val
,
742 struct flow_dissector_key_mpls
*key_mask
)
744 if (tb
[TCA_FLOWER_KEY_MPLS_TTL
]) {
745 key_val
->mpls_ttl
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TTL
]);
746 key_mask
->mpls_ttl
= MPLS_TTL_MASK
;
748 if (tb
[TCA_FLOWER_KEY_MPLS_BOS
]) {
749 u8 bos
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_BOS
]);
751 if (bos
& ~MPLS_BOS_MASK
)
753 key_val
->mpls_bos
= bos
;
754 key_mask
->mpls_bos
= MPLS_BOS_MASK
;
756 if (tb
[TCA_FLOWER_KEY_MPLS_TC
]) {
757 u8 tc
= nla_get_u8(tb
[TCA_FLOWER_KEY_MPLS_TC
]);
759 if (tc
& ~MPLS_TC_MASK
)
761 key_val
->mpls_tc
= tc
;
762 key_mask
->mpls_tc
= MPLS_TC_MASK
;
764 if (tb
[TCA_FLOWER_KEY_MPLS_LABEL
]) {
765 u32 label
= nla_get_u32(tb
[TCA_FLOWER_KEY_MPLS_LABEL
]);
767 if (label
& ~MPLS_LABEL_MASK
)
769 key_val
->mpls_label
= label
;
770 key_mask
->mpls_label
= MPLS_LABEL_MASK
;
775 static void fl_set_key_vlan(struct nlattr
**tb
,
777 int vlan_id_key
, int vlan_prio_key
,
778 struct flow_dissector_key_vlan
*key_val
,
779 struct flow_dissector_key_vlan
*key_mask
)
781 #define VLAN_PRIORITY_MASK 0x7
783 if (tb
[vlan_id_key
]) {
785 nla_get_u16(tb
[vlan_id_key
]) & VLAN_VID_MASK
;
786 key_mask
->vlan_id
= VLAN_VID_MASK
;
788 if (tb
[vlan_prio_key
]) {
789 key_val
->vlan_priority
=
790 nla_get_u8(tb
[vlan_prio_key
]) &
792 key_mask
->vlan_priority
= VLAN_PRIORITY_MASK
;
794 key_val
->vlan_tpid
= ethertype
;
795 key_mask
->vlan_tpid
= cpu_to_be16(~0);
798 static void fl_set_key_flag(u32 flower_key
, u32 flower_mask
,
799 u32
*dissector_key
, u32
*dissector_mask
,
800 u32 flower_flag_bit
, u32 dissector_flag_bit
)
802 if (flower_mask
& flower_flag_bit
) {
803 *dissector_mask
|= dissector_flag_bit
;
804 if (flower_key
& flower_flag_bit
)
805 *dissector_key
|= dissector_flag_bit
;
809 static int fl_set_key_flags(struct nlattr
**tb
,
810 u32
*flags_key
, u32
*flags_mask
)
814 /* mask is mandatory for flags */
815 if (!tb
[TCA_FLOWER_KEY_FLAGS_MASK
])
818 key
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS
]));
819 mask
= be32_to_cpu(nla_get_u32(tb
[TCA_FLOWER_KEY_FLAGS_MASK
]));
824 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
825 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
826 fl_set_key_flag(key
, mask
, flags_key
, flags_mask
,
827 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
828 FLOW_DIS_FIRST_FRAG
);
833 static void fl_set_key_ip(struct nlattr
**tb
, bool encap
,
834 struct flow_dissector_key_ip
*key
,
835 struct flow_dissector_key_ip
*mask
)
837 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
838 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
839 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
840 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
842 fl_set_key_val(tb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
));
843 fl_set_key_val(tb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
));
846 static int fl_set_geneve_opt(const struct nlattr
*nla
, struct fl_flow_key
*key
,
847 int depth
, int option_len
,
848 struct netlink_ext_ack
*extack
)
850 struct nlattr
*tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
+ 1];
851 struct nlattr
*class = NULL
, *type
= NULL
, *data
= NULL
;
852 struct geneve_opt
*opt
;
853 int err
, data_len
= 0;
855 if (option_len
> sizeof(struct geneve_opt
))
856 data_len
= option_len
- sizeof(struct geneve_opt
);
858 opt
= (struct geneve_opt
*)&key
->enc_opts
.data
[key
->enc_opts
.len
];
859 memset(opt
, 0xff, option_len
);
860 opt
->length
= data_len
/ 4;
865 /* If no mask has been prodived we assume an exact match. */
867 return sizeof(struct geneve_opt
) + data_len
;
869 if (nla_type(nla
) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE
) {
870 NL_SET_ERR_MSG(extack
, "Non-geneve option type for mask");
874 err
= nla_parse_nested_deprecated(tb
,
875 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX
,
876 nla
, geneve_opt_policy
, extack
);
880 /* We are not allowed to omit any of CLASS, TYPE or DATA
881 * fields from the key.
884 (!tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
] ||
885 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
] ||
886 !tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
])) {
887 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
891 /* Omitting any of CLASS, TYPE or DATA fields is allowed
894 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
]) {
895 int new_len
= key
->enc_opts
.len
;
897 data
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
];
898 data_len
= nla_len(data
);
900 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
904 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
908 new_len
+= sizeof(struct geneve_opt
) + data_len
;
909 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX
!= IP_TUNNEL_OPTS_MAX
);
910 if (new_len
> FLOW_DIS_TUN_OPTS_MAX
) {
911 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
914 opt
->length
= data_len
/ 4;
915 memcpy(opt
->opt_data
, nla_data(data
), data_len
);
918 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
]) {
919 class = tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
];
920 opt
->opt_class
= nla_get_be16(class);
923 if (tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
]) {
924 type
= tb
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
];
925 opt
->type
= nla_get_u8(type
);
928 return sizeof(struct geneve_opt
) + data_len
;
931 static int fl_set_enc_opt(struct nlattr
**tb
, struct fl_flow_key
*key
,
932 struct fl_flow_key
*mask
,
933 struct netlink_ext_ack
*extack
)
935 const struct nlattr
*nla_enc_key
, *nla_opt_key
, *nla_opt_msk
= NULL
;
936 int err
, option_len
, key_depth
, msk_depth
= 0;
938 err
= nla_validate_nested_deprecated(tb
[TCA_FLOWER_KEY_ENC_OPTS
],
939 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
940 enc_opts_policy
, extack
);
944 nla_enc_key
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS
]);
946 if (tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]) {
947 err
= nla_validate_nested_deprecated(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
],
948 TCA_FLOWER_KEY_ENC_OPTS_MAX
,
949 enc_opts_policy
, extack
);
953 nla_opt_msk
= nla_data(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
954 msk_depth
= nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS_MASK
]);
957 nla_for_each_attr(nla_opt_key
, nla_enc_key
,
958 nla_len(tb
[TCA_FLOWER_KEY_ENC_OPTS
]), key_depth
) {
959 switch (nla_type(nla_opt_key
)) {
960 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE
:
962 key
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
963 option_len
= fl_set_geneve_opt(nla_opt_key
, key
,
964 key_depth
, option_len
,
969 key
->enc_opts
.len
+= option_len
;
970 /* At the same time we need to parse through the mask
971 * in order to verify exact and mask attribute lengths.
973 mask
->enc_opts
.dst_opt_type
= TUNNEL_GENEVE_OPT
;
974 option_len
= fl_set_geneve_opt(nla_opt_msk
, mask
,
975 msk_depth
, option_len
,
980 mask
->enc_opts
.len
+= option_len
;
981 if (key
->enc_opts
.len
!= mask
->enc_opts
.len
) {
982 NL_SET_ERR_MSG(extack
, "Key and mask miss aligned");
987 nla_opt_msk
= nla_next(nla_opt_msk
, &msk_depth
);
990 NL_SET_ERR_MSG(extack
, "Unknown tunnel option type");
998 static int fl_set_key_ct(struct nlattr
**tb
,
999 struct flow_dissector_key_ct
*key
,
1000 struct flow_dissector_key_ct
*mask
,
1001 struct netlink_ext_ack
*extack
)
1003 if (tb
[TCA_FLOWER_KEY_CT_STATE
]) {
1004 if (!IS_ENABLED(CONFIG_NF_CONNTRACK
)) {
1005 NL_SET_ERR_MSG(extack
, "Conntrack isn't enabled");
1008 fl_set_key_val(tb
, &key
->ct_state
, TCA_FLOWER_KEY_CT_STATE
,
1009 &mask
->ct_state
, TCA_FLOWER_KEY_CT_STATE_MASK
,
1010 sizeof(key
->ct_state
));
1012 if (tb
[TCA_FLOWER_KEY_CT_ZONE
]) {
1013 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
)) {
1014 NL_SET_ERR_MSG(extack
, "Conntrack zones isn't enabled");
1017 fl_set_key_val(tb
, &key
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE
,
1018 &mask
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE_MASK
,
1019 sizeof(key
->ct_zone
));
1021 if (tb
[TCA_FLOWER_KEY_CT_MARK
]) {
1022 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
)) {
1023 NL_SET_ERR_MSG(extack
, "Conntrack mark isn't enabled");
1026 fl_set_key_val(tb
, &key
->ct_mark
, TCA_FLOWER_KEY_CT_MARK
,
1027 &mask
->ct_mark
, TCA_FLOWER_KEY_CT_MARK_MASK
,
1028 sizeof(key
->ct_mark
));
1030 if (tb
[TCA_FLOWER_KEY_CT_LABELS
]) {
1031 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
)) {
1032 NL_SET_ERR_MSG(extack
, "Conntrack labels aren't enabled");
1035 fl_set_key_val(tb
, key
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS
,
1036 mask
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS_MASK
,
1037 sizeof(key
->ct_labels
));
1043 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
1044 struct fl_flow_key
*key
, struct fl_flow_key
*mask
,
1045 struct netlink_ext_ack
*extack
)
1050 if (tb
[TCA_FLOWER_INDEV
]) {
1051 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
], extack
);
1054 key
->meta
.ingress_ifindex
= err
;
1055 mask
->meta
.ingress_ifindex
= 0xffffffff;
1058 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
1059 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
1060 sizeof(key
->eth
.dst
));
1061 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
1062 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
1063 sizeof(key
->eth
.src
));
1065 if (tb
[TCA_FLOWER_KEY_ETH_TYPE
]) {
1066 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_ETH_TYPE
]);
1068 if (eth_type_vlan(ethertype
)) {
1069 fl_set_key_vlan(tb
, ethertype
, TCA_FLOWER_KEY_VLAN_ID
,
1070 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
,
1073 if (tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]) {
1074 ethertype
= nla_get_be16(tb
[TCA_FLOWER_KEY_VLAN_ETH_TYPE
]);
1075 if (eth_type_vlan(ethertype
)) {
1076 fl_set_key_vlan(tb
, ethertype
,
1077 TCA_FLOWER_KEY_CVLAN_ID
,
1078 TCA_FLOWER_KEY_CVLAN_PRIO
,
1079 &key
->cvlan
, &mask
->cvlan
);
1080 fl_set_key_val(tb
, &key
->basic
.n_proto
,
1081 TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
1082 &mask
->basic
.n_proto
,
1084 sizeof(key
->basic
.n_proto
));
1086 key
->basic
.n_proto
= ethertype
;
1087 mask
->basic
.n_proto
= cpu_to_be16(~0);
1091 key
->basic
.n_proto
= ethertype
;
1092 mask
->basic
.n_proto
= cpu_to_be16(~0);
1096 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
1097 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
1098 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
1099 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
1100 sizeof(key
->basic
.ip_proto
));
1101 fl_set_key_ip(tb
, false, &key
->ip
, &mask
->ip
);
1104 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
1105 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1106 mask
->control
.addr_type
= ~0;
1107 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
1108 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
1109 sizeof(key
->ipv4
.src
));
1110 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
1111 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
1112 sizeof(key
->ipv4
.dst
));
1113 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
1114 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1115 mask
->control
.addr_type
= ~0;
1116 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
1117 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
1118 sizeof(key
->ipv6
.src
));
1119 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
1120 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
1121 sizeof(key
->ipv6
.dst
));
1124 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
1125 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
1126 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
1127 sizeof(key
->tp
.src
));
1128 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
1129 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
1130 sizeof(key
->tp
.dst
));
1131 fl_set_key_val(tb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
1132 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
1133 sizeof(key
->tcp
.flags
));
1134 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
1135 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
1136 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
1137 sizeof(key
->tp
.src
));
1138 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
1139 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
1140 sizeof(key
->tp
.dst
));
1141 } else if (key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1142 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
1143 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
1144 sizeof(key
->tp
.src
));
1145 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
1146 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
1147 sizeof(key
->tp
.dst
));
1148 } else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
1149 key
->basic
.ip_proto
== IPPROTO_ICMP
) {
1150 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV4_TYPE
,
1152 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
1153 sizeof(key
->icmp
.type
));
1154 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV4_CODE
,
1156 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
1157 sizeof(key
->icmp
.code
));
1158 } else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
1159 key
->basic
.ip_proto
== IPPROTO_ICMPV6
) {
1160 fl_set_key_val(tb
, &key
->icmp
.type
, TCA_FLOWER_KEY_ICMPV6_TYPE
,
1162 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
1163 sizeof(key
->icmp
.type
));
1164 fl_set_key_val(tb
, &key
->icmp
.code
, TCA_FLOWER_KEY_ICMPV6_CODE
,
1166 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
1167 sizeof(key
->icmp
.code
));
1168 } else if (key
->basic
.n_proto
== htons(ETH_P_MPLS_UC
) ||
1169 key
->basic
.n_proto
== htons(ETH_P_MPLS_MC
)) {
1170 ret
= fl_set_key_mpls(tb
, &key
->mpls
, &mask
->mpls
);
1173 } else if (key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
1174 key
->basic
.n_proto
== htons(ETH_P_RARP
)) {
1175 fl_set_key_val(tb
, &key
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP
,
1176 &mask
->arp
.sip
, TCA_FLOWER_KEY_ARP_SIP_MASK
,
1177 sizeof(key
->arp
.sip
));
1178 fl_set_key_val(tb
, &key
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP
,
1179 &mask
->arp
.tip
, TCA_FLOWER_KEY_ARP_TIP_MASK
,
1180 sizeof(key
->arp
.tip
));
1181 fl_set_key_val(tb
, &key
->arp
.op
, TCA_FLOWER_KEY_ARP_OP
,
1182 &mask
->arp
.op
, TCA_FLOWER_KEY_ARP_OP_MASK
,
1183 sizeof(key
->arp
.op
));
1184 fl_set_key_val(tb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
1185 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
1186 sizeof(key
->arp
.sha
));
1187 fl_set_key_val(tb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
1188 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
1189 sizeof(key
->arp
.tha
));
1192 if (key
->basic
.ip_proto
== IPPROTO_TCP
||
1193 key
->basic
.ip_proto
== IPPROTO_UDP
||
1194 key
->basic
.ip_proto
== IPPROTO_SCTP
) {
1195 ret
= fl_set_key_port_range(tb
, key
, mask
);
1200 if (tb
[TCA_FLOWER_KEY_ENC_IPV4_SRC
] ||
1201 tb
[TCA_FLOWER_KEY_ENC_IPV4_DST
]) {
1202 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
1203 mask
->enc_control
.addr_type
= ~0;
1204 fl_set_key_val(tb
, &key
->enc_ipv4
.src
,
1205 TCA_FLOWER_KEY_ENC_IPV4_SRC
,
1206 &mask
->enc_ipv4
.src
,
1207 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
1208 sizeof(key
->enc_ipv4
.src
));
1209 fl_set_key_val(tb
, &key
->enc_ipv4
.dst
,
1210 TCA_FLOWER_KEY_ENC_IPV4_DST
,
1211 &mask
->enc_ipv4
.dst
,
1212 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
1213 sizeof(key
->enc_ipv4
.dst
));
1216 if (tb
[TCA_FLOWER_KEY_ENC_IPV6_SRC
] ||
1217 tb
[TCA_FLOWER_KEY_ENC_IPV6_DST
]) {
1218 key
->enc_control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1219 mask
->enc_control
.addr_type
= ~0;
1220 fl_set_key_val(tb
, &key
->enc_ipv6
.src
,
1221 TCA_FLOWER_KEY_ENC_IPV6_SRC
,
1222 &mask
->enc_ipv6
.src
,
1223 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
1224 sizeof(key
->enc_ipv6
.src
));
1225 fl_set_key_val(tb
, &key
->enc_ipv6
.dst
,
1226 TCA_FLOWER_KEY_ENC_IPV6_DST
,
1227 &mask
->enc_ipv6
.dst
,
1228 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
1229 sizeof(key
->enc_ipv6
.dst
));
1232 fl_set_key_val(tb
, &key
->enc_key_id
.keyid
, TCA_FLOWER_KEY_ENC_KEY_ID
,
1233 &mask
->enc_key_id
.keyid
, TCA_FLOWER_UNSPEC
,
1234 sizeof(key
->enc_key_id
.keyid
));
1236 fl_set_key_val(tb
, &key
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
1237 &mask
->enc_tp
.src
, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
1238 sizeof(key
->enc_tp
.src
));
1240 fl_set_key_val(tb
, &key
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
1241 &mask
->enc_tp
.dst
, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
1242 sizeof(key
->enc_tp
.dst
));
1244 fl_set_key_ip(tb
, true, &key
->enc_ip
, &mask
->enc_ip
);
1246 if (tb
[TCA_FLOWER_KEY_ENC_OPTS
]) {
1247 ret
= fl_set_enc_opt(tb
, key
, mask
, extack
);
1252 ret
= fl_set_key_ct(tb
, &key
->ct
, &mask
->ct
, extack
);
1256 if (tb
[TCA_FLOWER_KEY_FLAGS
])
1257 ret
= fl_set_key_flags(tb
, &key
->control
.flags
, &mask
->control
.flags
);
1262 static void fl_mask_copy(struct fl_flow_mask
*dst
,
1263 struct fl_flow_mask
*src
)
1265 const void *psrc
= fl_key_get_start(&src
->key
, src
);
1266 void *pdst
= fl_key_get_start(&dst
->key
, src
);
1268 memcpy(pdst
, psrc
, fl_mask_range(src
));
1269 dst
->range
= src
->range
;
1272 static const struct rhashtable_params fl_ht_params
= {
1273 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
1274 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
1275 .automatic_shrinking
= true,
1278 static int fl_init_mask_hashtable(struct fl_flow_mask
*mask
)
1280 mask
->filter_ht_params
= fl_ht_params
;
1281 mask
->filter_ht_params
.key_len
= fl_mask_range(mask
);
1282 mask
->filter_ht_params
.key_offset
+= mask
->range
.start
;
1284 return rhashtable_init(&mask
->ht
, &mask
->filter_ht_params
);
1287 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1288 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1290 #define FL_KEY_IS_MASKED(mask, member) \
1291 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1292 0, FL_KEY_MEMBER_SIZE(member)) \
1294 #define FL_KEY_SET(keys, cnt, id, member) \
1296 keys[cnt].key_id = id; \
1297 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1301 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1303 if (FL_KEY_IS_MASKED(mask, member)) \
1304 FL_KEY_SET(keys, cnt, id, member); \
1307 static void fl_init_dissector(struct flow_dissector
*dissector
,
1308 struct fl_flow_key
*mask
)
1310 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
1313 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1314 FLOW_DISSECTOR_KEY_META
, meta
);
1315 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
1316 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
1317 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1318 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
1319 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1320 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
1321 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1322 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
1323 if (FL_KEY_IS_MASKED(mask
, tp
) ||
1324 FL_KEY_IS_MASKED(mask
, tp_min
) || FL_KEY_IS_MASKED(mask
, tp_max
))
1325 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_PORTS
, tp
);
1326 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1327 FLOW_DISSECTOR_KEY_IP
, ip
);
1328 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1329 FLOW_DISSECTOR_KEY_TCP
, tcp
);
1330 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1331 FLOW_DISSECTOR_KEY_ICMP
, icmp
);
1332 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1333 FLOW_DISSECTOR_KEY_ARP
, arp
);
1334 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1335 FLOW_DISSECTOR_KEY_MPLS
, mpls
);
1336 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1337 FLOW_DISSECTOR_KEY_VLAN
, vlan
);
1338 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1339 FLOW_DISSECTOR_KEY_CVLAN
, cvlan
);
1340 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1341 FLOW_DISSECTOR_KEY_ENC_KEYID
, enc_key_id
);
1342 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1343 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, enc_ipv4
);
1344 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1345 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, enc_ipv6
);
1346 if (FL_KEY_IS_MASKED(mask
, enc_ipv4
) ||
1347 FL_KEY_IS_MASKED(mask
, enc_ipv6
))
1348 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_ENC_CONTROL
,
1350 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1351 FLOW_DISSECTOR_KEY_ENC_PORTS
, enc_tp
);
1352 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1353 FLOW_DISSECTOR_KEY_ENC_IP
, enc_ip
);
1354 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1355 FLOW_DISSECTOR_KEY_ENC_OPTS
, enc_opts
);
1356 FL_KEY_SET_IF_MASKED(mask
, keys
, cnt
,
1357 FLOW_DISSECTOR_KEY_CT
, ct
);
1359 skb_flow_dissector_init(dissector
, keys
, cnt
);
1362 static struct fl_flow_mask
*fl_create_new_mask(struct cls_fl_head
*head
,
1363 struct fl_flow_mask
*mask
)
1365 struct fl_flow_mask
*newmask
;
1368 newmask
= kzalloc(sizeof(*newmask
), GFP_KERNEL
);
1370 return ERR_PTR(-ENOMEM
);
1372 fl_mask_copy(newmask
, mask
);
1374 if ((newmask
->key
.tp_min
.dst
&& newmask
->key
.tp_max
.dst
) ||
1375 (newmask
->key
.tp_min
.src
&& newmask
->key
.tp_max
.src
))
1376 newmask
->flags
|= TCA_FLOWER_MASK_FLAGS_RANGE
;
1378 err
= fl_init_mask_hashtable(newmask
);
1382 fl_init_dissector(&newmask
->dissector
, &newmask
->key
);
1384 INIT_LIST_HEAD_RCU(&newmask
->filters
);
1386 refcount_set(&newmask
->refcnt
, 1);
1387 err
= rhashtable_replace_fast(&head
->ht
, &mask
->ht_node
,
1388 &newmask
->ht_node
, mask_ht_params
);
1390 goto errout_destroy
;
1392 spin_lock(&head
->masks_lock
);
1393 list_add_tail_rcu(&newmask
->list
, &head
->masks
);
1394 spin_unlock(&head
->masks_lock
);
1399 rhashtable_destroy(&newmask
->ht
);
1403 return ERR_PTR(err
);
1406 static int fl_check_assign_mask(struct cls_fl_head
*head
,
1407 struct cls_fl_filter
*fnew
,
1408 struct cls_fl_filter
*fold
,
1409 struct fl_flow_mask
*mask
)
1411 struct fl_flow_mask
*newmask
;
1416 /* Insert mask as temporary node to prevent concurrent creation of mask
1417 * with same key. Any concurrent lookups with same key will return
1418 * -EAGAIN because mask's refcnt is zero.
1420 fnew
->mask
= rhashtable_lookup_get_insert_fast(&head
->ht
,
1428 goto errout_cleanup
;
1431 newmask
= fl_create_new_mask(head
, mask
);
1432 if (IS_ERR(newmask
)) {
1433 ret
= PTR_ERR(newmask
);
1434 goto errout_cleanup
;
1437 fnew
->mask
= newmask
;
1439 } else if (IS_ERR(fnew
->mask
)) {
1440 ret
= PTR_ERR(fnew
->mask
);
1441 } else if (fold
&& fold
->mask
!= fnew
->mask
) {
1443 } else if (!refcount_inc_not_zero(&fnew
->mask
->refcnt
)) {
1444 /* Mask was deleted concurrently, try again */
1451 rhashtable_remove_fast(&head
->ht
, &mask
->ht_node
,
1456 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
1457 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
1458 unsigned long base
, struct nlattr
**tb
,
1459 struct nlattr
*est
, bool ovr
,
1460 struct fl_flow_tmplt
*tmplt
, bool rtnl_held
,
1461 struct netlink_ext_ack
*extack
)
1465 err
= tcf_exts_validate(net
, tp
, tb
, est
, &f
->exts
, ovr
, rtnl_held
,
1470 if (tb
[TCA_FLOWER_CLASSID
]) {
1471 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
1474 tcf_bind_filter(tp
, &f
->res
, base
);
1479 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
, extack
);
1483 fl_mask_update_range(mask
);
1484 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
1486 if (!fl_mask_fits_tmplt(tmplt
, mask
)) {
1487 NL_SET_ERR_MSG_MOD(extack
, "Mask does not fit the template");
1494 static int fl_ht_insert_unique(struct cls_fl_filter
*fnew
,
1495 struct cls_fl_filter
*fold
,
1498 struct fl_flow_mask
*mask
= fnew
->mask
;
1501 err
= rhashtable_lookup_insert_fast(&mask
->ht
,
1503 mask
->filter_ht_params
);
1506 /* It is okay if filter with same key exists when
1509 return fold
&& err
== -EEXIST
? 0 : err
;
1516 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
1517 struct tcf_proto
*tp
, unsigned long base
,
1518 u32 handle
, struct nlattr
**tca
,
1519 void **arg
, bool ovr
, bool rtnl_held
,
1520 struct netlink_ext_ack
*extack
)
1522 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1523 struct cls_fl_filter
*fold
= *arg
;
1524 struct cls_fl_filter
*fnew
;
1525 struct fl_flow_mask
*mask
;
1530 if (!tca
[TCA_OPTIONS
]) {
1535 mask
= kzalloc(sizeof(struct fl_flow_mask
), GFP_KERNEL
);
1541 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
1544 goto errout_mask_alloc
;
1547 err
= nla_parse_nested_deprecated(tb
, TCA_FLOWER_MAX
,
1548 tca
[TCA_OPTIONS
], fl_policy
, NULL
);
1552 if (fold
&& handle
&& fold
->handle
!= handle
) {
1557 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
1562 INIT_LIST_HEAD(&fnew
->hw_list
);
1563 refcount_set(&fnew
->refcnt
, 1);
1565 err
= tcf_exts_init(&fnew
->exts
, net
, TCA_FLOWER_ACT
, 0);
1569 if (tb
[TCA_FLOWER_FLAGS
]) {
1570 fnew
->flags
= nla_get_u32(tb
[TCA_FLOWER_FLAGS
]);
1572 if (!tc_flags_valid(fnew
->flags
)) {
1578 err
= fl_set_parms(net
, tp
, fnew
, mask
, base
, tb
, tca
[TCA_RATE
], ovr
,
1579 tp
->chain
->tmplt_priv
, rtnl_held
, extack
);
1583 err
= fl_check_assign_mask(head
, fnew
, fold
, mask
);
1587 err
= fl_ht_insert_unique(fnew
, fold
, &in_ht
);
1591 if (!tc_skip_hw(fnew
->flags
)) {
1592 err
= fl_hw_replace_filter(tp
, fnew
, rtnl_held
, extack
);
1597 if (!tc_in_hw(fnew
->flags
))
1598 fnew
->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
1600 spin_lock(&tp
->lock
);
1602 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1603 * proto again or create new one, if necessary.
1611 /* Fold filter was deleted concurrently. Retry lookup. */
1612 if (fold
->deleted
) {
1617 fnew
->handle
= handle
;
1620 struct rhashtable_params params
=
1621 fnew
->mask
->filter_ht_params
;
1623 err
= rhashtable_insert_fast(&fnew
->mask
->ht
,
1631 refcount_inc(&fnew
->refcnt
);
1632 rhashtable_remove_fast(&fold
->mask
->ht
,
1634 fold
->mask
->filter_ht_params
);
1635 idr_replace(&head
->handle_idr
, fnew
, fnew
->handle
);
1636 list_replace_rcu(&fold
->list
, &fnew
->list
);
1637 fold
->deleted
= true;
1639 spin_unlock(&tp
->lock
);
1641 fl_mask_put(head
, fold
->mask
);
1642 if (!tc_skip_hw(fold
->flags
))
1643 fl_hw_destroy_filter(tp
, fold
, rtnl_held
, NULL
);
1644 tcf_unbind_filter(tp
, &fold
->res
);
1645 /* Caller holds reference to fold, so refcnt is always > 0
1648 refcount_dec(&fold
->refcnt
);
1652 /* user specifies a handle and it doesn't exist */
1653 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
1654 handle
, GFP_ATOMIC
);
1656 /* Filter with specified handle was concurrently
1657 * inserted after initial check in cls_api. This is not
1658 * necessarily an error if NLM_F_EXCL is not set in
1659 * message flags. Returning EAGAIN will cause cls_api to
1660 * try to update concurrently inserted rule.
1666 err
= idr_alloc_u32(&head
->handle_idr
, fnew
, &handle
,
1667 INT_MAX
, GFP_ATOMIC
);
1672 refcount_inc(&fnew
->refcnt
);
1673 fnew
->handle
= handle
;
1674 list_add_tail_rcu(&fnew
->list
, &fnew
->mask
->filters
);
1675 spin_unlock(&tp
->lock
);
1681 tcf_queue_work(&mask
->rwork
, fl_uninit_mask_free_work
);
1685 spin_lock(&tp
->lock
);
1687 fnew
->deleted
= true;
1688 spin_unlock(&tp
->lock
);
1689 if (!tc_skip_hw(fnew
->flags
))
1690 fl_hw_destroy_filter(tp
, fnew
, rtnl_held
, NULL
);
1692 rhashtable_remove_fast(&fnew
->mask
->ht
, &fnew
->ht_node
,
1693 fnew
->mask
->filter_ht_params
);
1695 fl_mask_put(head
, fnew
->mask
);
1701 tcf_queue_work(&mask
->rwork
, fl_uninit_mask_free_work
);
1708 static int fl_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
1709 bool rtnl_held
, struct netlink_ext_ack
*extack
)
1711 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1712 struct cls_fl_filter
*f
= arg
;
1716 err
= __fl_delete(tp
, f
, &last_on_mask
, rtnl_held
, extack
);
1717 *last
= list_empty(&head
->masks
);
1723 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
1726 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1727 unsigned long id
= arg
->cookie
, tmp
;
1728 struct cls_fl_filter
*f
;
1730 arg
->count
= arg
->skip
;
1732 idr_for_each_entry_continue_ul(&head
->handle_idr
, f
, tmp
, id
) {
1733 /* don't return filters that are being deleted */
1734 if (!refcount_inc_not_zero(&f
->refcnt
))
1736 if (arg
->fn(tp
, f
, arg
) < 0) {
1747 static struct cls_fl_filter
*
1748 fl_get_next_hw_filter(struct tcf_proto
*tp
, struct cls_fl_filter
*f
, bool add
)
1750 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1752 spin_lock(&tp
->lock
);
1753 if (list_empty(&head
->hw_filters
)) {
1754 spin_unlock(&tp
->lock
);
1759 f
= list_entry(&head
->hw_filters
, struct cls_fl_filter
,
1761 list_for_each_entry_continue(f
, &head
->hw_filters
, hw_list
) {
1762 if (!(add
&& f
->deleted
) && refcount_inc_not_zero(&f
->refcnt
)) {
1763 spin_unlock(&tp
->lock
);
1768 spin_unlock(&tp
->lock
);
1772 static int fl_reoffload(struct tcf_proto
*tp
, bool add
, flow_setup_cb_t
*cb
,
1773 void *cb_priv
, struct netlink_ext_ack
*extack
)
1775 struct tcf_block
*block
= tp
->chain
->block
;
1776 struct flow_cls_offload cls_flower
= {};
1777 struct cls_fl_filter
*f
= NULL
;
1780 /* hw_filters list can only be changed by hw offload functions after
1781 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1786 while ((f
= fl_get_next_hw_filter(tp
, f
, add
))) {
1788 flow_rule_alloc(tcf_exts_num_actions(&f
->exts
));
1789 if (!cls_flower
.rule
) {
1794 tc_cls_common_offload_init(&cls_flower
.common
, tp
, f
->flags
,
1796 cls_flower
.command
= add
?
1797 FLOW_CLS_REPLACE
: FLOW_CLS_DESTROY
;
1798 cls_flower
.cookie
= (unsigned long)f
;
1799 cls_flower
.rule
->match
.dissector
= &f
->mask
->dissector
;
1800 cls_flower
.rule
->match
.mask
= &f
->mask
->key
;
1801 cls_flower
.rule
->match
.key
= &f
->mkey
;
1803 err
= tc_setup_flow_action(&cls_flower
.rule
->action
, &f
->exts
,
1806 kfree(cls_flower
.rule
);
1807 if (tc_skip_sw(f
->flags
)) {
1808 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
1815 cls_flower
.classid
= f
->res
.classid
;
1817 err
= tc_setup_cb_reoffload(block
, tp
, add
, cb
,
1818 TC_SETUP_CLSFLOWER
, &cls_flower
,
1821 tc_cleanup_flow_action(&cls_flower
.rule
->action
);
1822 kfree(cls_flower
.rule
);
1835 static void fl_hw_add(struct tcf_proto
*tp
, void *type_data
)
1837 struct flow_cls_offload
*cls_flower
= type_data
;
1838 struct cls_fl_filter
*f
=
1839 (struct cls_fl_filter
*) cls_flower
->cookie
;
1840 struct cls_fl_head
*head
= fl_head_dereference(tp
);
1842 spin_lock(&tp
->lock
);
1843 list_add(&f
->hw_list
, &head
->hw_filters
);
1844 spin_unlock(&tp
->lock
);
1847 static void fl_hw_del(struct tcf_proto
*tp
, void *type_data
)
1849 struct flow_cls_offload
*cls_flower
= type_data
;
1850 struct cls_fl_filter
*f
=
1851 (struct cls_fl_filter
*) cls_flower
->cookie
;
1853 spin_lock(&tp
->lock
);
1854 if (!list_empty(&f
->hw_list
))
1855 list_del_init(&f
->hw_list
);
1856 spin_unlock(&tp
->lock
);
1859 static int fl_hw_create_tmplt(struct tcf_chain
*chain
,
1860 struct fl_flow_tmplt
*tmplt
)
1862 struct flow_cls_offload cls_flower
= {};
1863 struct tcf_block
*block
= chain
->block
;
1865 cls_flower
.rule
= flow_rule_alloc(0);
1866 if (!cls_flower
.rule
)
1869 cls_flower
.common
.chain_index
= chain
->index
;
1870 cls_flower
.command
= FLOW_CLS_TMPLT_CREATE
;
1871 cls_flower
.cookie
= (unsigned long) tmplt
;
1872 cls_flower
.rule
->match
.dissector
= &tmplt
->dissector
;
1873 cls_flower
.rule
->match
.mask
= &tmplt
->mask
;
1874 cls_flower
.rule
->match
.key
= &tmplt
->dummy_key
;
1876 /* We don't care if driver (any of them) fails to handle this
1877 * call. It serves just as a hint for it.
1879 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false, true);
1880 kfree(cls_flower
.rule
);
1885 static void fl_hw_destroy_tmplt(struct tcf_chain
*chain
,
1886 struct fl_flow_tmplt
*tmplt
)
1888 struct flow_cls_offload cls_flower
= {};
1889 struct tcf_block
*block
= chain
->block
;
1891 cls_flower
.common
.chain_index
= chain
->index
;
1892 cls_flower
.command
= FLOW_CLS_TMPLT_DESTROY
;
1893 cls_flower
.cookie
= (unsigned long) tmplt
;
1895 tc_setup_cb_call(block
, TC_SETUP_CLSFLOWER
, &cls_flower
, false, true);
1898 static void *fl_tmplt_create(struct net
*net
, struct tcf_chain
*chain
,
1899 struct nlattr
**tca
,
1900 struct netlink_ext_ack
*extack
)
1902 struct fl_flow_tmplt
*tmplt
;
1906 if (!tca
[TCA_OPTIONS
])
1907 return ERR_PTR(-EINVAL
);
1909 tb
= kcalloc(TCA_FLOWER_MAX
+ 1, sizeof(struct nlattr
*), GFP_KERNEL
);
1911 return ERR_PTR(-ENOBUFS
);
1912 err
= nla_parse_nested_deprecated(tb
, TCA_FLOWER_MAX
,
1913 tca
[TCA_OPTIONS
], fl_policy
, NULL
);
1917 tmplt
= kzalloc(sizeof(*tmplt
), GFP_KERNEL
);
1922 tmplt
->chain
= chain
;
1923 err
= fl_set_key(net
, tb
, &tmplt
->dummy_key
, &tmplt
->mask
, extack
);
1927 fl_init_dissector(&tmplt
->dissector
, &tmplt
->mask
);
1929 err
= fl_hw_create_tmplt(chain
, tmplt
);
1940 return ERR_PTR(err
);
1943 static void fl_tmplt_destroy(void *tmplt_priv
)
1945 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
1947 fl_hw_destroy_tmplt(tmplt
->chain
, tmplt
);
1951 static int fl_dump_key_val(struct sk_buff
*skb
,
1952 void *val
, int val_type
,
1953 void *mask
, int mask_type
, int len
)
1957 if (!memchr_inv(mask
, 0, len
))
1959 err
= nla_put(skb
, val_type
, len
, val
);
1962 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
1963 err
= nla_put(skb
, mask_type
, len
, mask
);
1970 static int fl_dump_key_port_range(struct sk_buff
*skb
, struct fl_flow_key
*key
,
1971 struct fl_flow_key
*mask
)
1973 if (fl_dump_key_val(skb
, &key
->tp_min
.dst
, TCA_FLOWER_KEY_PORT_DST_MIN
,
1974 &mask
->tp_min
.dst
, TCA_FLOWER_UNSPEC
,
1975 sizeof(key
->tp_min
.dst
)) ||
1976 fl_dump_key_val(skb
, &key
->tp_max
.dst
, TCA_FLOWER_KEY_PORT_DST_MAX
,
1977 &mask
->tp_max
.dst
, TCA_FLOWER_UNSPEC
,
1978 sizeof(key
->tp_max
.dst
)) ||
1979 fl_dump_key_val(skb
, &key
->tp_min
.src
, TCA_FLOWER_KEY_PORT_SRC_MIN
,
1980 &mask
->tp_min
.src
, TCA_FLOWER_UNSPEC
,
1981 sizeof(key
->tp_min
.src
)) ||
1982 fl_dump_key_val(skb
, &key
->tp_max
.src
, TCA_FLOWER_KEY_PORT_SRC_MAX
,
1983 &mask
->tp_max
.src
, TCA_FLOWER_UNSPEC
,
1984 sizeof(key
->tp_max
.src
)))
1990 static int fl_dump_key_mpls(struct sk_buff
*skb
,
1991 struct flow_dissector_key_mpls
*mpls_key
,
1992 struct flow_dissector_key_mpls
*mpls_mask
)
1996 if (!memchr_inv(mpls_mask
, 0, sizeof(*mpls_mask
)))
1998 if (mpls_mask
->mpls_ttl
) {
1999 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TTL
,
2000 mpls_key
->mpls_ttl
);
2004 if (mpls_mask
->mpls_tc
) {
2005 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_TC
,
2010 if (mpls_mask
->mpls_label
) {
2011 err
= nla_put_u32(skb
, TCA_FLOWER_KEY_MPLS_LABEL
,
2012 mpls_key
->mpls_label
);
2016 if (mpls_mask
->mpls_bos
) {
2017 err
= nla_put_u8(skb
, TCA_FLOWER_KEY_MPLS_BOS
,
2018 mpls_key
->mpls_bos
);
2025 static int fl_dump_key_ip(struct sk_buff
*skb
, bool encap
,
2026 struct flow_dissector_key_ip
*key
,
2027 struct flow_dissector_key_ip
*mask
)
2029 int tos_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS
: TCA_FLOWER_KEY_IP_TOS
;
2030 int ttl_key
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL
: TCA_FLOWER_KEY_IP_TTL
;
2031 int tos_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TOS_MASK
: TCA_FLOWER_KEY_IP_TOS_MASK
;
2032 int ttl_mask
= encap
? TCA_FLOWER_KEY_ENC_IP_TTL_MASK
: TCA_FLOWER_KEY_IP_TTL_MASK
;
2034 if (fl_dump_key_val(skb
, &key
->tos
, tos_key
, &mask
->tos
, tos_mask
, sizeof(key
->tos
)) ||
2035 fl_dump_key_val(skb
, &key
->ttl
, ttl_key
, &mask
->ttl
, ttl_mask
, sizeof(key
->ttl
)))
2041 static int fl_dump_key_vlan(struct sk_buff
*skb
,
2042 int vlan_id_key
, int vlan_prio_key
,
2043 struct flow_dissector_key_vlan
*vlan_key
,
2044 struct flow_dissector_key_vlan
*vlan_mask
)
2048 if (!memchr_inv(vlan_mask
, 0, sizeof(*vlan_mask
)))
2050 if (vlan_mask
->vlan_id
) {
2051 err
= nla_put_u16(skb
, vlan_id_key
,
2056 if (vlan_mask
->vlan_priority
) {
2057 err
= nla_put_u8(skb
, vlan_prio_key
,
2058 vlan_key
->vlan_priority
);
2065 static void fl_get_key_flag(u32 dissector_key
, u32 dissector_mask
,
2066 u32
*flower_key
, u32
*flower_mask
,
2067 u32 flower_flag_bit
, u32 dissector_flag_bit
)
2069 if (dissector_mask
& dissector_flag_bit
) {
2070 *flower_mask
|= flower_flag_bit
;
2071 if (dissector_key
& dissector_flag_bit
)
2072 *flower_key
|= flower_flag_bit
;
2076 static int fl_dump_key_flags(struct sk_buff
*skb
, u32 flags_key
, u32 flags_mask
)
2082 if (!memchr_inv(&flags_mask
, 0, sizeof(flags_mask
)))
2088 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
2089 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
, FLOW_DIS_IS_FRAGMENT
);
2090 fl_get_key_flag(flags_key
, flags_mask
, &key
, &mask
,
2091 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
,
2092 FLOW_DIS_FIRST_FRAG
);
2094 _key
= cpu_to_be32(key
);
2095 _mask
= cpu_to_be32(mask
);
2097 err
= nla_put(skb
, TCA_FLOWER_KEY_FLAGS
, 4, &_key
);
2101 return nla_put(skb
, TCA_FLOWER_KEY_FLAGS_MASK
, 4, &_mask
);
2104 static int fl_dump_key_geneve_opt(struct sk_buff
*skb
,
2105 struct flow_dissector_key_enc_opts
*enc_opts
)
2107 struct geneve_opt
*opt
;
2108 struct nlattr
*nest
;
2111 nest
= nla_nest_start_noflag(skb
, TCA_FLOWER_KEY_ENC_OPTS_GENEVE
);
2113 goto nla_put_failure
;
2115 while (enc_opts
->len
> opt_off
) {
2116 opt
= (struct geneve_opt
*)&enc_opts
->data
[opt_off
];
2118 if (nla_put_be16(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS
,
2120 goto nla_put_failure
;
2121 if (nla_put_u8(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE
,
2123 goto nla_put_failure
;
2124 if (nla_put(skb
, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA
,
2125 opt
->length
* 4, opt
->opt_data
))
2126 goto nla_put_failure
;
2128 opt_off
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
2130 nla_nest_end(skb
, nest
);
2134 nla_nest_cancel(skb
, nest
);
2138 static int fl_dump_key_ct(struct sk_buff
*skb
,
2139 struct flow_dissector_key_ct
*key
,
2140 struct flow_dissector_key_ct
*mask
)
2142 if (IS_ENABLED(CONFIG_NF_CONNTRACK
) &&
2143 fl_dump_key_val(skb
, &key
->ct_state
, TCA_FLOWER_KEY_CT_STATE
,
2144 &mask
->ct_state
, TCA_FLOWER_KEY_CT_STATE_MASK
,
2145 sizeof(key
->ct_state
)))
2146 goto nla_put_failure
;
2148 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
2149 fl_dump_key_val(skb
, &key
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE
,
2150 &mask
->ct_zone
, TCA_FLOWER_KEY_CT_ZONE_MASK
,
2151 sizeof(key
->ct_zone
)))
2152 goto nla_put_failure
;
2154 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
2155 fl_dump_key_val(skb
, &key
->ct_mark
, TCA_FLOWER_KEY_CT_MARK
,
2156 &mask
->ct_mark
, TCA_FLOWER_KEY_CT_MARK_MASK
,
2157 sizeof(key
->ct_mark
)))
2158 goto nla_put_failure
;
2160 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
2161 fl_dump_key_val(skb
, &key
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS
,
2162 &mask
->ct_labels
, TCA_FLOWER_KEY_CT_LABELS_MASK
,
2163 sizeof(key
->ct_labels
)))
2164 goto nla_put_failure
;
2172 static int fl_dump_key_options(struct sk_buff
*skb
, int enc_opt_type
,
2173 struct flow_dissector_key_enc_opts
*enc_opts
)
2175 struct nlattr
*nest
;
2181 nest
= nla_nest_start_noflag(skb
, enc_opt_type
);
2183 goto nla_put_failure
;
2185 switch (enc_opts
->dst_opt_type
) {
2186 case TUNNEL_GENEVE_OPT
:
2187 err
= fl_dump_key_geneve_opt(skb
, enc_opts
);
2189 goto nla_put_failure
;
2192 goto nla_put_failure
;
2194 nla_nest_end(skb
, nest
);
2198 nla_nest_cancel(skb
, nest
);
2202 static int fl_dump_key_enc_opt(struct sk_buff
*skb
,
2203 struct flow_dissector_key_enc_opts
*key_opts
,
2204 struct flow_dissector_key_enc_opts
*msk_opts
)
2208 err
= fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS
, key_opts
);
2212 return fl_dump_key_options(skb
, TCA_FLOWER_KEY_ENC_OPTS_MASK
, msk_opts
);
2215 static int fl_dump_key(struct sk_buff
*skb
, struct net
*net
,
2216 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
2218 if (mask
->meta
.ingress_ifindex
) {
2219 struct net_device
*dev
;
2221 dev
= __dev_get_by_index(net
, key
->meta
.ingress_ifindex
);
2222 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
2223 goto nla_put_failure
;
2226 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
2227 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
2228 sizeof(key
->eth
.dst
)) ||
2229 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
2230 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
2231 sizeof(key
->eth
.src
)) ||
2232 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
2233 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
2234 sizeof(key
->basic
.n_proto
)))
2235 goto nla_put_failure
;
2237 if (fl_dump_key_mpls(skb
, &key
->mpls
, &mask
->mpls
))
2238 goto nla_put_failure
;
2240 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_VLAN_ID
,
2241 TCA_FLOWER_KEY_VLAN_PRIO
, &key
->vlan
, &mask
->vlan
))
2242 goto nla_put_failure
;
2244 if (fl_dump_key_vlan(skb
, TCA_FLOWER_KEY_CVLAN_ID
,
2245 TCA_FLOWER_KEY_CVLAN_PRIO
,
2246 &key
->cvlan
, &mask
->cvlan
) ||
2247 (mask
->cvlan
.vlan_tpid
&&
2248 nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
2249 key
->cvlan
.vlan_tpid
)))
2250 goto nla_put_failure
;
2252 if (mask
->basic
.n_proto
) {
2253 if (mask
->cvlan
.vlan_tpid
) {
2254 if (nla_put_be16(skb
, TCA_FLOWER_KEY_CVLAN_ETH_TYPE
,
2255 key
->basic
.n_proto
))
2256 goto nla_put_failure
;
2257 } else if (mask
->vlan
.vlan_tpid
) {
2258 if (nla_put_be16(skb
, TCA_FLOWER_KEY_VLAN_ETH_TYPE
,
2259 key
->basic
.n_proto
))
2260 goto nla_put_failure
;
2264 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
2265 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
2266 (fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
2267 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
2268 sizeof(key
->basic
.ip_proto
)) ||
2269 fl_dump_key_ip(skb
, false, &key
->ip
, &mask
->ip
)))
2270 goto nla_put_failure
;
2272 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2273 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
2274 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
2275 sizeof(key
->ipv4
.src
)) ||
2276 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
2277 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
2278 sizeof(key
->ipv4
.dst
))))
2279 goto nla_put_failure
;
2280 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2281 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
2282 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
2283 sizeof(key
->ipv6
.src
)) ||
2284 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
2285 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
2286 sizeof(key
->ipv6
.dst
))))
2287 goto nla_put_failure
;
2289 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
2290 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
2291 &mask
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC_MASK
,
2292 sizeof(key
->tp
.src
)) ||
2293 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
2294 &mask
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST_MASK
,
2295 sizeof(key
->tp
.dst
)) ||
2296 fl_dump_key_val(skb
, &key
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS
,
2297 &mask
->tcp
.flags
, TCA_FLOWER_KEY_TCP_FLAGS_MASK
,
2298 sizeof(key
->tcp
.flags
))))
2299 goto nla_put_failure
;
2300 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
2301 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
2302 &mask
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC_MASK
,
2303 sizeof(key
->tp
.src
)) ||
2304 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
2305 &mask
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST_MASK
,
2306 sizeof(key
->tp
.dst
))))
2307 goto nla_put_failure
;
2308 else if (key
->basic
.ip_proto
== IPPROTO_SCTP
&&
2309 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC
,
2310 &mask
->tp
.src
, TCA_FLOWER_KEY_SCTP_SRC_MASK
,
2311 sizeof(key
->tp
.src
)) ||
2312 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST
,
2313 &mask
->tp
.dst
, TCA_FLOWER_KEY_SCTP_DST_MASK
,
2314 sizeof(key
->tp
.dst
))))
2315 goto nla_put_failure
;
2316 else if (key
->basic
.n_proto
== htons(ETH_P_IP
) &&
2317 key
->basic
.ip_proto
== IPPROTO_ICMP
&&
2318 (fl_dump_key_val(skb
, &key
->icmp
.type
,
2319 TCA_FLOWER_KEY_ICMPV4_TYPE
, &mask
->icmp
.type
,
2320 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK
,
2321 sizeof(key
->icmp
.type
)) ||
2322 fl_dump_key_val(skb
, &key
->icmp
.code
,
2323 TCA_FLOWER_KEY_ICMPV4_CODE
, &mask
->icmp
.code
,
2324 TCA_FLOWER_KEY_ICMPV4_CODE_MASK
,
2325 sizeof(key
->icmp
.code
))))
2326 goto nla_put_failure
;
2327 else if (key
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
2328 key
->basic
.ip_proto
== IPPROTO_ICMPV6
&&
2329 (fl_dump_key_val(skb
, &key
->icmp
.type
,
2330 TCA_FLOWER_KEY_ICMPV6_TYPE
, &mask
->icmp
.type
,
2331 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK
,
2332 sizeof(key
->icmp
.type
)) ||
2333 fl_dump_key_val(skb
, &key
->icmp
.code
,
2334 TCA_FLOWER_KEY_ICMPV6_CODE
, &mask
->icmp
.code
,
2335 TCA_FLOWER_KEY_ICMPV6_CODE_MASK
,
2336 sizeof(key
->icmp
.code
))))
2337 goto nla_put_failure
;
2338 else if ((key
->basic
.n_proto
== htons(ETH_P_ARP
) ||
2339 key
->basic
.n_proto
== htons(ETH_P_RARP
)) &&
2340 (fl_dump_key_val(skb
, &key
->arp
.sip
,
2341 TCA_FLOWER_KEY_ARP_SIP
, &mask
->arp
.sip
,
2342 TCA_FLOWER_KEY_ARP_SIP_MASK
,
2343 sizeof(key
->arp
.sip
)) ||
2344 fl_dump_key_val(skb
, &key
->arp
.tip
,
2345 TCA_FLOWER_KEY_ARP_TIP
, &mask
->arp
.tip
,
2346 TCA_FLOWER_KEY_ARP_TIP_MASK
,
2347 sizeof(key
->arp
.tip
)) ||
2348 fl_dump_key_val(skb
, &key
->arp
.op
,
2349 TCA_FLOWER_KEY_ARP_OP
, &mask
->arp
.op
,
2350 TCA_FLOWER_KEY_ARP_OP_MASK
,
2351 sizeof(key
->arp
.op
)) ||
2352 fl_dump_key_val(skb
, key
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA
,
2353 mask
->arp
.sha
, TCA_FLOWER_KEY_ARP_SHA_MASK
,
2354 sizeof(key
->arp
.sha
)) ||
2355 fl_dump_key_val(skb
, key
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA
,
2356 mask
->arp
.tha
, TCA_FLOWER_KEY_ARP_THA_MASK
,
2357 sizeof(key
->arp
.tha
))))
2358 goto nla_put_failure
;
2360 if ((key
->basic
.ip_proto
== IPPROTO_TCP
||
2361 key
->basic
.ip_proto
== IPPROTO_UDP
||
2362 key
->basic
.ip_proto
== IPPROTO_SCTP
) &&
2363 fl_dump_key_port_range(skb
, key
, mask
))
2364 goto nla_put_failure
;
2366 if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
2367 (fl_dump_key_val(skb
, &key
->enc_ipv4
.src
,
2368 TCA_FLOWER_KEY_ENC_IPV4_SRC
, &mask
->enc_ipv4
.src
,
2369 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
,
2370 sizeof(key
->enc_ipv4
.src
)) ||
2371 fl_dump_key_val(skb
, &key
->enc_ipv4
.dst
,
2372 TCA_FLOWER_KEY_ENC_IPV4_DST
, &mask
->enc_ipv4
.dst
,
2373 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
,
2374 sizeof(key
->enc_ipv4
.dst
))))
2375 goto nla_put_failure
;
2376 else if (key
->enc_control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
2377 (fl_dump_key_val(skb
, &key
->enc_ipv6
.src
,
2378 TCA_FLOWER_KEY_ENC_IPV6_SRC
, &mask
->enc_ipv6
.src
,
2379 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
,
2380 sizeof(key
->enc_ipv6
.src
)) ||
2381 fl_dump_key_val(skb
, &key
->enc_ipv6
.dst
,
2382 TCA_FLOWER_KEY_ENC_IPV6_DST
,
2383 &mask
->enc_ipv6
.dst
,
2384 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
,
2385 sizeof(key
->enc_ipv6
.dst
))))
2386 goto nla_put_failure
;
2388 if (fl_dump_key_val(skb
, &key
->enc_key_id
, TCA_FLOWER_KEY_ENC_KEY_ID
,
2389 &mask
->enc_key_id
, TCA_FLOWER_UNSPEC
,
2390 sizeof(key
->enc_key_id
)) ||
2391 fl_dump_key_val(skb
, &key
->enc_tp
.src
,
2392 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
,
2394 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
,
2395 sizeof(key
->enc_tp
.src
)) ||
2396 fl_dump_key_val(skb
, &key
->enc_tp
.dst
,
2397 TCA_FLOWER_KEY_ENC_UDP_DST_PORT
,
2399 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
,
2400 sizeof(key
->enc_tp
.dst
)) ||
2401 fl_dump_key_ip(skb
, true, &key
->enc_ip
, &mask
->enc_ip
) ||
2402 fl_dump_key_enc_opt(skb
, &key
->enc_opts
, &mask
->enc_opts
))
2403 goto nla_put_failure
;
2405 if (fl_dump_key_ct(skb
, &key
->ct
, &mask
->ct
))
2406 goto nla_put_failure
;
2408 if (fl_dump_key_flags(skb
, key
->control
.flags
, mask
->control
.flags
))
2409 goto nla_put_failure
;
2417 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
2418 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
2420 struct cls_fl_filter
*f
= fh
;
2421 struct nlattr
*nest
;
2422 struct fl_flow_key
*key
, *mask
;
2428 t
->tcm_handle
= f
->handle
;
2430 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
2432 goto nla_put_failure
;
2434 spin_lock(&tp
->lock
);
2436 if (f
->res
.classid
&&
2437 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
2438 goto nla_put_failure_locked
;
2441 mask
= &f
->mask
->key
;
2442 skip_hw
= tc_skip_hw(f
->flags
);
2444 if (fl_dump_key(skb
, net
, key
, mask
))
2445 goto nla_put_failure_locked
;
2447 if (f
->flags
&& nla_put_u32(skb
, TCA_FLOWER_FLAGS
, f
->flags
))
2448 goto nla_put_failure_locked
;
2450 spin_unlock(&tp
->lock
);
2453 fl_hw_update_stats(tp
, f
, rtnl_held
);
2455 if (nla_put_u32(skb
, TCA_FLOWER_IN_HW_COUNT
, f
->in_hw_count
))
2456 goto nla_put_failure
;
2458 if (tcf_exts_dump(skb
, &f
->exts
))
2459 goto nla_put_failure
;
2461 nla_nest_end(skb
, nest
);
2463 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
2464 goto nla_put_failure
;
2468 nla_put_failure_locked
:
2469 spin_unlock(&tp
->lock
);
2471 nla_nest_cancel(skb
, nest
);
2475 static int fl_tmplt_dump(struct sk_buff
*skb
, struct net
*net
, void *tmplt_priv
)
2477 struct fl_flow_tmplt
*tmplt
= tmplt_priv
;
2478 struct fl_flow_key
*key
, *mask
;
2479 struct nlattr
*nest
;
2481 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
2483 goto nla_put_failure
;
2485 key
= &tmplt
->dummy_key
;
2486 mask
= &tmplt
->mask
;
2488 if (fl_dump_key(skb
, net
, key
, mask
))
2489 goto nla_put_failure
;
2491 nla_nest_end(skb
, nest
);
2496 nla_nest_cancel(skb
, nest
);
2500 static void fl_bind_class(void *fh
, u32 classid
, unsigned long cl
)
2502 struct cls_fl_filter
*f
= fh
;
2504 if (f
&& f
->res
.classid
== classid
)
2508 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
2510 .classify
= fl_classify
,
2512 .destroy
= fl_destroy
,
2515 .change
= fl_change
,
2516 .delete = fl_delete
,
2518 .reoffload
= fl_reoffload
,
2519 .hw_add
= fl_hw_add
,
2520 .hw_del
= fl_hw_del
,
2522 .bind_class
= fl_bind_class
,
2523 .tmplt_create
= fl_tmplt_create
,
2524 .tmplt_destroy
= fl_tmplt_destroy
,
2525 .tmplt_dump
= fl_tmplt_dump
,
2526 .owner
= THIS_MODULE
,
2527 .flags
= TCF_PROTO_OPS_DOIT_UNLOCKED
,
2530 static int __init
cls_fl_init(void)
2532 return register_tcf_proto_ops(&cls_fl_ops
);
2535 static void __exit
cls_fl_exit(void)
2537 unregister_tcf_proto_ops(&cls_fl_ops
);
2540 module_init(cls_fl_init
);
2541 module_exit(cls_fl_exit
);
2543 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2544 MODULE_DESCRIPTION("Flower classifier");
2545 MODULE_LICENSE("GPL v2");