1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/flow_offload.h>
10 #include <net/net_namespace.h>
12 /* TC action not accessible from user space */
13 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
15 /* Basic packet classifier frontend definitions. */
23 int (*fn
)(struct tcf_proto
*, void *node
, struct tcf_walker
*);
26 int register_tcf_proto_ops(struct tcf_proto_ops
*ops
);
27 int unregister_tcf_proto_ops(struct tcf_proto_ops
*ops
);
29 struct tcf_block_ext_info
{
30 enum flow_block_binder_type binder_type
;
31 tcf_chain_head_change_t
*chain_head_change
;
32 void *chain_head_change_priv
;
37 bool tcf_queue_work(struct rcu_work
*rwork
, work_func_t func
);
40 struct tcf_chain
*tcf_chain_get_by_act(struct tcf_block
*block
,
42 void tcf_chain_put_by_act(struct tcf_chain
*chain
);
43 struct tcf_chain
*tcf_get_next_chain(struct tcf_block
*block
,
44 struct tcf_chain
*chain
);
45 struct tcf_proto
*tcf_get_next_proto(struct tcf_chain
*chain
,
46 struct tcf_proto
*tp
, bool rtnl_held
);
47 void tcf_block_netif_keep_dst(struct tcf_block
*block
);
48 int tcf_block_get(struct tcf_block
**p_block
,
49 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
50 struct netlink_ext_ack
*extack
);
51 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
52 struct tcf_block_ext_info
*ei
,
53 struct netlink_ext_ack
*extack
);
54 void tcf_block_put(struct tcf_block
*block
);
55 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
56 struct tcf_block_ext_info
*ei
);
58 static inline bool tcf_block_shared(struct tcf_block
*block
)
63 static inline bool tcf_block_non_null_shared(struct tcf_block
*block
)
65 return block
&& block
->index
;
68 static inline struct Qdisc
*tcf_block_q(struct tcf_block
*block
)
70 WARN_ON(tcf_block_shared(block
));
74 int __tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
75 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
76 int tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
77 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
78 void __tc_indr_block_cb_unregister(struct net_device
*dev
,
79 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
80 void tc_indr_block_cb_unregister(struct net_device
*dev
,
81 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
);
83 int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
84 struct tcf_result
*res
, bool compat_mode
);
87 static inline bool tcf_block_shared(struct tcf_block
*block
)
92 static inline bool tcf_block_non_null_shared(struct tcf_block
*block
)
98 int tcf_block_get(struct tcf_block
**p_block
,
99 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
100 struct netlink_ext_ack
*extack
)
106 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
107 struct tcf_block_ext_info
*ei
,
108 struct netlink_ext_ack
*extack
)
113 static inline void tcf_block_put(struct tcf_block
*block
)
118 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
119 struct tcf_block_ext_info
*ei
)
123 static inline struct Qdisc
*tcf_block_q(struct tcf_block
*block
)
129 int tc_setup_cb_block_register(struct tcf_block
*block
, tc_setup_cb_t
*cb
,
136 void tc_setup_cb_block_unregister(struct tcf_block
*block
, tc_setup_cb_t
*cb
,
142 int __tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
143 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
149 int tc_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
150 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
156 void __tc_indr_block_cb_unregister(struct net_device
*dev
,
157 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
162 void tc_indr_block_cb_unregister(struct net_device
*dev
,
163 tc_indr_block_bind_cb_t
*cb
, void *cb_ident
)
167 static inline int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
168 struct tcf_result
*res
, bool compat_mode
)
170 return TC_ACT_UNSPEC
;
174 static inline unsigned long
175 __cls_set_class(unsigned long *clp
, unsigned long cl
)
177 return xchg(clp
, cl
);
180 static inline unsigned long
181 cls_set_class(struct Qdisc
*q
, unsigned long *clp
, unsigned long cl
)
183 unsigned long old_cl
;
186 old_cl
= __cls_set_class(clp
, cl
);
192 tcf_bind_filter(struct tcf_proto
*tp
, struct tcf_result
*r
, unsigned long base
)
194 struct Qdisc
*q
= tp
->chain
->block
->q
;
197 /* Check q as it is not set for shared blocks. In that case,
198 * setting class is not supported.
202 cl
= q
->ops
->cl_ops
->bind_tcf(q
, base
, r
->classid
);
203 cl
= cls_set_class(q
, &r
->class, cl
);
205 q
->ops
->cl_ops
->unbind_tcf(q
, cl
);
209 tcf_unbind_filter(struct tcf_proto
*tp
, struct tcf_result
*r
)
211 struct Qdisc
*q
= tp
->chain
->block
->q
;
216 if ((cl
= __cls_set_class(&r
->class, 0)) != 0)
217 q
->ops
->cl_ops
->unbind_tcf(q
, cl
);
221 #ifdef CONFIG_NET_CLS_ACT
222 __u32 type
; /* for backward compat(TCA_OLD_COMPAT) */
224 struct tc_action
**actions
;
227 /* Map to export classifier specific extension TLV types to the
228 * generic extensions API. Unsupported extensions must be set to 0.
234 static inline int tcf_exts_init(struct tcf_exts
*exts
, struct net
*net
,
235 int action
, int police
)
237 #ifdef CONFIG_NET_CLS_ACT
239 exts
->nr_actions
= 0;
241 exts
->actions
= kcalloc(TCA_ACT_MAX_PRIO
, sizeof(struct tc_action
*),
246 exts
->action
= action
;
247 exts
->police
= police
;
251 /* Return false if the netns is being destroyed in cleanup_net(). Callers
252 * need to do cleanup synchronously in this case, otherwise may race with
253 * tc_action_net_exit(). Return true for other cases.
255 static inline bool tcf_exts_get_net(struct tcf_exts
*exts
)
257 #ifdef CONFIG_NET_CLS_ACT
258 exts
->net
= maybe_get_net(exts
->net
);
259 return exts
->net
!= NULL
;
265 static inline void tcf_exts_put_net(struct tcf_exts
*exts
)
267 #ifdef CONFIG_NET_CLS_ACT
273 #ifdef CONFIG_NET_CLS_ACT
274 #define tcf_exts_for_each_action(i, a, exts) \
275 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
277 #define tcf_exts_for_each_action(i, a, exts) \
278 for (; 0; (void)(i), (void)(a), (void)(exts))
282 tcf_exts_stats_update(const struct tcf_exts
*exts
,
283 u64 bytes
, u64 packets
, u64 lastuse
)
285 #ifdef CONFIG_NET_CLS_ACT
290 for (i
= 0; i
< exts
->nr_actions
; i
++) {
291 struct tc_action
*a
= exts
->actions
[i
];
293 tcf_action_stats_update(a
, bytes
, packets
, lastuse
, true);
301 * tcf_exts_has_actions - check if at least one action is present
302 * @exts: tc filter extensions handle
304 * Returns true if at least one action is present.
306 static inline bool tcf_exts_has_actions(struct tcf_exts
*exts
)
308 #ifdef CONFIG_NET_CLS_ACT
309 return exts
->nr_actions
;
316 * tcf_exts_exec - execute tc filter extensions
317 * @skb: socket buffer
318 * @exts: tc filter extensions handle
319 * @res: desired result
321 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
322 * a negative number if the filter must be considered unmatched or
323 * a positive action code (TC_ACT_*) which must be returned to the
327 tcf_exts_exec(struct sk_buff
*skb
, struct tcf_exts
*exts
,
328 struct tcf_result
*res
)
330 #ifdef CONFIG_NET_CLS_ACT
331 return tcf_action_exec(skb
, exts
->actions
, exts
->nr_actions
, res
);
336 int tcf_exts_validate(struct net
*net
, struct tcf_proto
*tp
,
337 struct nlattr
**tb
, struct nlattr
*rate_tlv
,
338 struct tcf_exts
*exts
, bool ovr
, bool rtnl_held
,
339 struct netlink_ext_ack
*extack
);
340 void tcf_exts_destroy(struct tcf_exts
*exts
);
341 void tcf_exts_change(struct tcf_exts
*dst
, struct tcf_exts
*src
);
342 int tcf_exts_dump(struct sk_buff
*skb
, struct tcf_exts
*exts
);
343 int tcf_exts_dump_stats(struct sk_buff
*skb
, struct tcf_exts
*exts
);
346 * struct tcf_pkt_info - packet information
348 struct tcf_pkt_info
{
353 #ifdef CONFIG_NET_EMATCH
355 struct tcf_ematch_ops
;
358 * struct tcf_ematch - extended match (ematch)
360 * @matchid: identifier to allow userspace to reidentify a match
361 * @flags: flags specifying attributes and the relation to other matches
362 * @ops: the operations lookup table of the corresponding ematch module
363 * @datalen: length of the ematch specific configuration data
364 * @data: ematch specific data
367 struct tcf_ematch_ops
* ops
;
369 unsigned int datalen
;
375 static inline int tcf_em_is_container(struct tcf_ematch
*em
)
380 static inline int tcf_em_is_simple(struct tcf_ematch
*em
)
382 return em
->flags
& TCF_EM_SIMPLE
;
385 static inline int tcf_em_is_inverted(struct tcf_ematch
*em
)
387 return em
->flags
& TCF_EM_INVERT
;
390 static inline int tcf_em_last_match(struct tcf_ematch
*em
)
392 return (em
->flags
& TCF_EM_REL_MASK
) == TCF_EM_REL_END
;
395 static inline int tcf_em_early_end(struct tcf_ematch
*em
, int result
)
397 if (tcf_em_last_match(em
))
400 if (result
== 0 && em
->flags
& TCF_EM_REL_AND
)
403 if (result
!= 0 && em
->flags
& TCF_EM_REL_OR
)
410 * struct tcf_ematch_tree - ematch tree handle
412 * @hdr: ematch tree header supplied by userspace
413 * @matches: array of ematches
415 struct tcf_ematch_tree
{
416 struct tcf_ematch_tree_hdr hdr
;
417 struct tcf_ematch
* matches
;
422 * struct tcf_ematch_ops - ematch module operations
424 * @kind: identifier (kind) of this ematch module
425 * @datalen: length of expected configuration data (optional)
426 * @change: called during validation (optional)
427 * @match: called during ematch tree evaluation, must return 1/0
428 * @destroy: called during destroyage (optional)
429 * @dump: called during dumping process (optional)
430 * @owner: owner, must be set to THIS_MODULE
431 * @link: link to previous/next ematch module (internal use)
433 struct tcf_ematch_ops
{
436 int (*change
)(struct net
*net
, void *,
437 int, struct tcf_ematch
*);
438 int (*match
)(struct sk_buff
*, struct tcf_ematch
*,
439 struct tcf_pkt_info
*);
440 void (*destroy
)(struct tcf_ematch
*);
441 int (*dump
)(struct sk_buff
*, struct tcf_ematch
*);
442 struct module
*owner
;
443 struct list_head link
;
446 int tcf_em_register(struct tcf_ematch_ops
*);
447 void tcf_em_unregister(struct tcf_ematch_ops
*);
448 int tcf_em_tree_validate(struct tcf_proto
*, struct nlattr
*,
449 struct tcf_ematch_tree
*);
450 void tcf_em_tree_destroy(struct tcf_ematch_tree
*);
451 int tcf_em_tree_dump(struct sk_buff
*, struct tcf_ematch_tree
*, int);
452 int __tcf_em_tree_match(struct sk_buff
*, struct tcf_ematch_tree
*,
453 struct tcf_pkt_info
*);
456 * tcf_em_tree_match - evaulate an ematch tree
458 * @skb: socket buffer of the packet in question
459 * @tree: ematch tree to be used for evaluation
460 * @info: packet information examined by classifier
462 * This function matches @skb against the ematch tree in @tree by going
463 * through all ematches respecting their logic relations returning
464 * as soon as the result is obvious.
466 * Returns 1 if the ematch tree as-one matches, no ematches are configured
467 * or ematch is not enabled in the kernel, otherwise 0 is returned.
469 static inline int tcf_em_tree_match(struct sk_buff
*skb
,
470 struct tcf_ematch_tree
*tree
,
471 struct tcf_pkt_info
*info
)
473 if (tree
->hdr
.nmatches
)
474 return __tcf_em_tree_match(skb
, tree
, info
);
479 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
481 #else /* CONFIG_NET_EMATCH */
483 struct tcf_ematch_tree
{
486 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
487 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
488 #define tcf_em_tree_dump(skb, t, tlv) (0)
489 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
491 #endif /* CONFIG_NET_EMATCH */
493 static inline unsigned char * tcf_get_base_ptr(struct sk_buff
*skb
, int layer
)
497 return skb_mac_header(skb
);
498 case TCF_LAYER_NETWORK
:
499 return skb_network_header(skb
);
500 case TCF_LAYER_TRANSPORT
:
501 return skb_transport_header(skb
);
507 static inline int tcf_valid_offset(const struct sk_buff
*skb
,
508 const unsigned char *ptr
, const int len
)
510 return likely((ptr
+ len
) <= skb_tail_pointer(skb
) &&
512 (ptr
<= (ptr
+ len
)));
516 tcf_change_indev(struct net
*net
, struct nlattr
*indev_tlv
,
517 struct netlink_ext_ack
*extack
)
519 char indev
[IFNAMSIZ
];
520 struct net_device
*dev
;
522 if (nla_strlcpy(indev
, indev_tlv
, IFNAMSIZ
) >= IFNAMSIZ
) {
523 NL_SET_ERR_MSG(extack
, "Interface name too long");
526 dev
= __dev_get_by_name(net
, indev
);
533 tcf_match_indev(struct sk_buff
*skb
, int ifindex
)
539 return ifindex
== skb
->skb_iif
;
542 int tc_setup_flow_action(struct flow_action
*flow_action
,
543 const struct tcf_exts
*exts
);
544 int tc_setup_cb_call(struct tcf_block
*block
, enum tc_setup_type type
,
545 void *type_data
, bool err_stop
);
546 unsigned int tcf_exts_num_actions(struct tcf_exts
*exts
);
548 struct tc_cls_u32_knode
{
549 struct tcf_exts
*exts
;
550 struct tcf_result
*res
;
551 struct tc_u32_sel
*sel
;
559 struct tc_cls_u32_hnode
{
562 unsigned int divisor
;
565 enum tc_clsu32_command
{
567 TC_CLSU32_REPLACE_KNODE
,
568 TC_CLSU32_DELETE_KNODE
,
570 TC_CLSU32_REPLACE_HNODE
,
571 TC_CLSU32_DELETE_HNODE
,
574 struct tc_cls_u32_offload
{
575 struct flow_cls_common_offload common
;
577 enum tc_clsu32_command command
;
579 struct tc_cls_u32_knode knode
;
580 struct tc_cls_u32_hnode hnode
;
584 static inline bool tc_can_offload(const struct net_device
*dev
)
586 return dev
->features
& NETIF_F_HW_TC
;
589 static inline bool tc_can_offload_extack(const struct net_device
*dev
,
590 struct netlink_ext_ack
*extack
)
592 bool can
= tc_can_offload(dev
);
595 NL_SET_ERR_MSG(extack
, "TC offload is disabled on net device");
601 tc_cls_can_offload_and_chain0(const struct net_device
*dev
,
602 struct flow_cls_common_offload
*common
)
604 if (!tc_can_offload_extack(dev
, common
->extack
))
606 if (common
->chain_index
) {
607 NL_SET_ERR_MSG(common
->extack
,
608 "Driver supports only offload of chain 0");
614 static inline bool tc_skip_hw(u32 flags
)
616 return (flags
& TCA_CLS_FLAGS_SKIP_HW
) ? true : false;
619 static inline bool tc_skip_sw(u32 flags
)
621 return (flags
& TCA_CLS_FLAGS_SKIP_SW
) ? true : false;
624 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
625 static inline bool tc_flags_valid(u32 flags
)
627 if (flags
& ~(TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
|
628 TCA_CLS_FLAGS_VERBOSE
))
631 flags
&= TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
;
632 if (!(flags
^ (TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
)))
638 static inline bool tc_in_hw(u32 flags
)
640 return (flags
& TCA_CLS_FLAGS_IN_HW
) ? true : false;
644 tc_cls_common_offload_init(struct flow_cls_common_offload
*cls_common
,
645 const struct tcf_proto
*tp
, u32 flags
,
646 struct netlink_ext_ack
*extack
)
648 cls_common
->chain_index
= tp
->chain
->index
;
649 cls_common
->protocol
= tp
->protocol
;
650 cls_common
->prio
= tp
->prio
;
651 if (tc_skip_sw(flags
) || flags
& TCA_CLS_FLAGS_VERBOSE
)
652 cls_common
->extack
= extack
;
655 enum tc_matchall_command
{
656 TC_CLSMATCHALL_REPLACE
,
657 TC_CLSMATCHALL_DESTROY
,
658 TC_CLSMATCHALL_STATS
,
661 struct tc_cls_matchall_offload
{
662 struct flow_cls_common_offload common
;
663 enum tc_matchall_command command
;
664 struct flow_rule
*rule
;
665 struct flow_stats stats
;
666 unsigned long cookie
;
669 enum tc_clsbpf_command
{
674 struct tc_cls_bpf_offload
{
675 struct flow_cls_common_offload common
;
676 enum tc_clsbpf_command command
;
677 struct tcf_exts
*exts
;
678 struct bpf_prog
*prog
;
679 struct bpf_prog
*oldprog
;
681 bool exts_integrated
;
684 struct tc_mqprio_qopt_offload
{
685 /* struct tc_mqprio_qopt must always be the first element */
686 struct tc_mqprio_qopt qopt
;
690 u64 min_rate
[TC_QOPT_MAX_QUEUE
];
691 u64 max_rate
[TC_QOPT_MAX_QUEUE
];
694 /* This structure holds cookie structure that is passed from user
695 * to the kernel for actions and classifiers
703 struct tc_qopt_offload_stats
{
704 struct gnet_stats_basic_packed
*bstats
;
705 struct gnet_stats_queue
*qstats
;
715 struct tc_mq_opt_offload_graft_params
{
720 struct tc_mq_qopt_offload
{
721 enum tc_mq_command command
;
724 struct tc_qopt_offload_stats stats
;
725 struct tc_mq_opt_offload_graft_params graft_params
;
729 enum tc_red_command
{
737 struct tc_red_qopt_offload_params
{
744 struct gnet_stats_queue
*qstats
;
747 struct tc_red_qopt_offload
{
748 enum tc_red_command command
;
752 struct tc_red_qopt_offload_params set
;
753 struct tc_qopt_offload_stats stats
;
754 struct red_stats
*xstats
;
759 enum tc_gred_command
{
765 struct tc_gred_vq_qopt_offload_params
{
774 /* Only need backlog, see struct tc_prio_qopt_offload_params */
778 struct tc_gred_qopt_offload_params
{
783 struct gnet_stats_queue
*qstats
;
784 struct tc_gred_vq_qopt_offload_params tab
[MAX_DPs
];
787 struct tc_gred_qopt_offload_stats
{
788 struct gnet_stats_basic_packed bstats
[MAX_DPs
];
789 struct gnet_stats_queue qstats
[MAX_DPs
];
790 struct red_stats
*xstats
[MAX_DPs
];
793 struct tc_gred_qopt_offload
{
794 enum tc_gred_command command
;
798 struct tc_gred_qopt_offload_params set
;
799 struct tc_gred_qopt_offload_stats stats
;
803 enum tc_prio_command
{
810 struct tc_prio_qopt_offload_params
{
812 u8 priomap
[TC_PRIO_MAX
+ 1];
813 /* In case that a prio qdisc is offloaded and now is changed to a
814 * non-offloadedable config, it needs to update the backlog & qlen
815 * values to negate the HW backlog & qlen values (and only them).
817 struct gnet_stats_queue
*qstats
;
820 struct tc_prio_qopt_offload_graft_params
{
825 struct tc_prio_qopt_offload
{
826 enum tc_prio_command command
;
830 struct tc_prio_qopt_offload_params replace_params
;
831 struct tc_qopt_offload_stats stats
;
832 struct tc_prio_qopt_offload_graft_params graft_params
;
836 enum tc_root_command
{
840 struct tc_root_qopt_offload
{
841 enum tc_root_command command
;