1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
14 /* Basic packet classifier frontend definitions. */
22 int (*fn
)(struct tcf_proto
*, void *node
, struct tcf_walker
*);
25 int register_tcf_proto_ops(struct tcf_proto_ops
*ops
);
26 int unregister_tcf_proto_ops(struct tcf_proto_ops
*ops
);
28 struct tcf_block_ext_info
{
29 enum flow_block_binder_type binder_type
;
30 tcf_chain_head_change_t
*chain_head_change
;
31 void *chain_head_change_priv
;
36 bool tcf_queue_work(struct rcu_work
*rwork
, work_func_t func
);
39 struct tcf_chain
*tcf_chain_get_by_act(struct tcf_block
*block
,
41 void tcf_chain_put_by_act(struct tcf_chain
*chain
);
42 struct tcf_chain
*tcf_get_next_chain(struct tcf_block
*block
,
43 struct tcf_chain
*chain
);
44 struct tcf_proto
*tcf_get_next_proto(struct tcf_chain
*chain
,
45 struct tcf_proto
*tp
, bool rtnl_held
);
46 void tcf_block_netif_keep_dst(struct tcf_block
*block
);
47 int tcf_block_get(struct tcf_block
**p_block
,
48 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
49 struct netlink_ext_ack
*extack
);
50 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
51 struct tcf_block_ext_info
*ei
,
52 struct netlink_ext_ack
*extack
);
53 void tcf_block_put(struct tcf_block
*block
);
54 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
55 struct tcf_block_ext_info
*ei
);
57 static inline bool tcf_block_shared(struct tcf_block
*block
)
62 static inline bool tcf_block_non_null_shared(struct tcf_block
*block
)
64 return block
&& block
->index
;
67 static inline struct Qdisc
*tcf_block_q(struct tcf_block
*block
)
69 WARN_ON(tcf_block_shared(block
));
73 int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
74 struct tcf_result
*res
, bool compat_mode
);
77 static inline bool tcf_block_shared(struct tcf_block
*block
)
82 static inline bool tcf_block_non_null_shared(struct tcf_block
*block
)
88 int tcf_block_get(struct tcf_block
**p_block
,
89 struct tcf_proto __rcu
**p_filter_chain
, struct Qdisc
*q
,
90 struct netlink_ext_ack
*extack
)
96 int tcf_block_get_ext(struct tcf_block
**p_block
, struct Qdisc
*q
,
97 struct tcf_block_ext_info
*ei
,
98 struct netlink_ext_ack
*extack
)
103 static inline void tcf_block_put(struct tcf_block
*block
)
108 void tcf_block_put_ext(struct tcf_block
*block
, struct Qdisc
*q
,
109 struct tcf_block_ext_info
*ei
)
113 static inline struct Qdisc
*tcf_block_q(struct tcf_block
*block
)
119 int tc_setup_cb_block_register(struct tcf_block
*block
, flow_setup_cb_t
*cb
,
126 void tc_setup_cb_block_unregister(struct tcf_block
*block
, flow_setup_cb_t
*cb
,
131 static inline int tcf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
132 struct tcf_result
*res
, bool compat_mode
)
134 return TC_ACT_UNSPEC
;
138 static inline unsigned long
139 __cls_set_class(unsigned long *clp
, unsigned long cl
)
141 return xchg(clp
, cl
);
144 static inline unsigned long
145 cls_set_class(struct Qdisc
*q
, unsigned long *clp
, unsigned long cl
)
147 unsigned long old_cl
;
150 old_cl
= __cls_set_class(clp
, cl
);
156 tcf_bind_filter(struct tcf_proto
*tp
, struct tcf_result
*r
, unsigned long base
)
158 struct Qdisc
*q
= tp
->chain
->block
->q
;
161 /* Check q as it is not set for shared blocks. In that case,
162 * setting class is not supported.
166 cl
= q
->ops
->cl_ops
->bind_tcf(q
, base
, r
->classid
);
167 cl
= cls_set_class(q
, &r
->class, cl
);
169 q
->ops
->cl_ops
->unbind_tcf(q
, cl
);
173 tcf_unbind_filter(struct tcf_proto
*tp
, struct tcf_result
*r
)
175 struct Qdisc
*q
= tp
->chain
->block
->q
;
180 if ((cl
= __cls_set_class(&r
->class, 0)) != 0)
181 q
->ops
->cl_ops
->unbind_tcf(q
, cl
);
185 #ifdef CONFIG_NET_CLS_ACT
186 __u32 type
; /* for backward compat(TCA_OLD_COMPAT) */
188 struct tc_action
**actions
;
191 /* Map to export classifier specific extension TLV types to the
192 * generic extensions API. Unsupported extensions must be set to 0.
198 static inline int tcf_exts_init(struct tcf_exts
*exts
, struct net
*net
,
199 int action
, int police
)
201 #ifdef CONFIG_NET_CLS_ACT
203 exts
->nr_actions
= 0;
205 exts
->actions
= kcalloc(TCA_ACT_MAX_PRIO
, sizeof(struct tc_action
*),
210 exts
->action
= action
;
211 exts
->police
= police
;
215 /* Return false if the netns is being destroyed in cleanup_net(). Callers
216 * need to do cleanup synchronously in this case, otherwise may race with
217 * tc_action_net_exit(). Return true for other cases.
219 static inline bool tcf_exts_get_net(struct tcf_exts
*exts
)
221 #ifdef CONFIG_NET_CLS_ACT
222 exts
->net
= maybe_get_net(exts
->net
);
223 return exts
->net
!= NULL
;
229 static inline void tcf_exts_put_net(struct tcf_exts
*exts
)
231 #ifdef CONFIG_NET_CLS_ACT
237 #ifdef CONFIG_NET_CLS_ACT
238 #define tcf_exts_for_each_action(i, a, exts) \
239 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
241 #define tcf_exts_for_each_action(i, a, exts) \
242 for (; 0; (void)(i), (void)(a), (void)(exts))
246 tcf_exts_stats_update(const struct tcf_exts
*exts
,
247 u64 bytes
, u64 packets
, u64 lastuse
)
249 #ifdef CONFIG_NET_CLS_ACT
254 for (i
= 0; i
< exts
->nr_actions
; i
++) {
255 struct tc_action
*a
= exts
->actions
[i
];
257 tcf_action_stats_update(a
, bytes
, packets
, lastuse
, true);
265 * tcf_exts_has_actions - check if at least one action is present
266 * @exts: tc filter extensions handle
268 * Returns true if at least one action is present.
270 static inline bool tcf_exts_has_actions(struct tcf_exts
*exts
)
272 #ifdef CONFIG_NET_CLS_ACT
273 return exts
->nr_actions
;
280 * tcf_exts_exec - execute tc filter extensions
281 * @skb: socket buffer
282 * @exts: tc filter extensions handle
283 * @res: desired result
285 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
286 * a negative number if the filter must be considered unmatched or
287 * a positive action code (TC_ACT_*) which must be returned to the
291 tcf_exts_exec(struct sk_buff
*skb
, struct tcf_exts
*exts
,
292 struct tcf_result
*res
)
294 #ifdef CONFIG_NET_CLS_ACT
295 return tcf_action_exec(skb
, exts
->actions
, exts
->nr_actions
, res
);
300 int tcf_exts_validate(struct net
*net
, struct tcf_proto
*tp
,
301 struct nlattr
**tb
, struct nlattr
*rate_tlv
,
302 struct tcf_exts
*exts
, bool ovr
, bool rtnl_held
,
303 struct netlink_ext_ack
*extack
);
304 void tcf_exts_destroy(struct tcf_exts
*exts
);
305 void tcf_exts_change(struct tcf_exts
*dst
, struct tcf_exts
*src
);
306 int tcf_exts_dump(struct sk_buff
*skb
, struct tcf_exts
*exts
);
307 int tcf_exts_dump_stats(struct sk_buff
*skb
, struct tcf_exts
*exts
);
310 * struct tcf_pkt_info - packet information
312 struct tcf_pkt_info
{
317 #ifdef CONFIG_NET_EMATCH
319 struct tcf_ematch_ops
;
322 * struct tcf_ematch - extended match (ematch)
324 * @matchid: identifier to allow userspace to reidentify a match
325 * @flags: flags specifying attributes and the relation to other matches
326 * @ops: the operations lookup table of the corresponding ematch module
327 * @datalen: length of the ematch specific configuration data
328 * @data: ematch specific data
331 struct tcf_ematch_ops
* ops
;
333 unsigned int datalen
;
339 static inline int tcf_em_is_container(struct tcf_ematch
*em
)
344 static inline int tcf_em_is_simple(struct tcf_ematch
*em
)
346 return em
->flags
& TCF_EM_SIMPLE
;
349 static inline int tcf_em_is_inverted(struct tcf_ematch
*em
)
351 return em
->flags
& TCF_EM_INVERT
;
354 static inline int tcf_em_last_match(struct tcf_ematch
*em
)
356 return (em
->flags
& TCF_EM_REL_MASK
) == TCF_EM_REL_END
;
359 static inline int tcf_em_early_end(struct tcf_ematch
*em
, int result
)
361 if (tcf_em_last_match(em
))
364 if (result
== 0 && em
->flags
& TCF_EM_REL_AND
)
367 if (result
!= 0 && em
->flags
& TCF_EM_REL_OR
)
374 * struct tcf_ematch_tree - ematch tree handle
376 * @hdr: ematch tree header supplied by userspace
377 * @matches: array of ematches
379 struct tcf_ematch_tree
{
380 struct tcf_ematch_tree_hdr hdr
;
381 struct tcf_ematch
* matches
;
386 * struct tcf_ematch_ops - ematch module operations
388 * @kind: identifier (kind) of this ematch module
389 * @datalen: length of expected configuration data (optional)
390 * @change: called during validation (optional)
391 * @match: called during ematch tree evaluation, must return 1/0
392 * @destroy: called during destroyage (optional)
393 * @dump: called during dumping process (optional)
394 * @owner: owner, must be set to THIS_MODULE
395 * @link: link to previous/next ematch module (internal use)
397 struct tcf_ematch_ops
{
400 int (*change
)(struct net
*net
, void *,
401 int, struct tcf_ematch
*);
402 int (*match
)(struct sk_buff
*, struct tcf_ematch
*,
403 struct tcf_pkt_info
*);
404 void (*destroy
)(struct tcf_ematch
*);
405 int (*dump
)(struct sk_buff
*, struct tcf_ematch
*);
406 struct module
*owner
;
407 struct list_head link
;
410 int tcf_em_register(struct tcf_ematch_ops
*);
411 void tcf_em_unregister(struct tcf_ematch_ops
*);
412 int tcf_em_tree_validate(struct tcf_proto
*, struct nlattr
*,
413 struct tcf_ematch_tree
*);
414 void tcf_em_tree_destroy(struct tcf_ematch_tree
*);
415 int tcf_em_tree_dump(struct sk_buff
*, struct tcf_ematch_tree
*, int);
416 int __tcf_em_tree_match(struct sk_buff
*, struct tcf_ematch_tree
*,
417 struct tcf_pkt_info
*);
420 * tcf_em_tree_match - evaulate an ematch tree
422 * @skb: socket buffer of the packet in question
423 * @tree: ematch tree to be used for evaluation
424 * @info: packet information examined by classifier
426 * This function matches @skb against the ematch tree in @tree by going
427 * through all ematches respecting their logic relations returning
428 * as soon as the result is obvious.
430 * Returns 1 if the ematch tree as-one matches, no ematches are configured
431 * or ematch is not enabled in the kernel, otherwise 0 is returned.
433 static inline int tcf_em_tree_match(struct sk_buff
*skb
,
434 struct tcf_ematch_tree
*tree
,
435 struct tcf_pkt_info
*info
)
437 if (tree
->hdr
.nmatches
)
438 return __tcf_em_tree_match(skb
, tree
, info
);
443 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
445 #else /* CONFIG_NET_EMATCH */
447 struct tcf_ematch_tree
{
450 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
451 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
452 #define tcf_em_tree_dump(skb, t, tlv) (0)
453 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
455 #endif /* CONFIG_NET_EMATCH */
457 static inline unsigned char * tcf_get_base_ptr(struct sk_buff
*skb
, int layer
)
461 return skb_mac_header(skb
);
462 case TCF_LAYER_NETWORK
:
463 return skb_network_header(skb
);
464 case TCF_LAYER_TRANSPORT
:
465 return skb_transport_header(skb
);
471 static inline int tcf_valid_offset(const struct sk_buff
*skb
,
472 const unsigned char *ptr
, const int len
)
474 return likely((ptr
+ len
) <= skb_tail_pointer(skb
) &&
476 (ptr
<= (ptr
+ len
)));
480 tcf_change_indev(struct net
*net
, struct nlattr
*indev_tlv
,
481 struct netlink_ext_ack
*extack
)
483 char indev
[IFNAMSIZ
];
484 struct net_device
*dev
;
486 if (nla_strlcpy(indev
, indev_tlv
, IFNAMSIZ
) >= IFNAMSIZ
) {
487 NL_SET_ERR_MSG(extack
, "Interface name too long");
490 dev
= __dev_get_by_name(net
, indev
);
497 tcf_match_indev(struct sk_buff
*skb
, int ifindex
)
503 return ifindex
== skb
->skb_iif
;
506 int tc_setup_flow_action(struct flow_action
*flow_action
,
507 const struct tcf_exts
*exts
, bool rtnl_held
);
508 void tc_cleanup_flow_action(struct flow_action
*flow_action
);
510 int tc_setup_cb_call(struct tcf_block
*block
, enum tc_setup_type type
,
511 void *type_data
, bool err_stop
, bool rtnl_held
);
512 int tc_setup_cb_add(struct tcf_block
*block
, struct tcf_proto
*tp
,
513 enum tc_setup_type type
, void *type_data
, bool err_stop
,
514 u32
*flags
, unsigned int *in_hw_count
, bool rtnl_held
);
515 int tc_setup_cb_replace(struct tcf_block
*block
, struct tcf_proto
*tp
,
516 enum tc_setup_type type
, void *type_data
, bool err_stop
,
517 u32
*old_flags
, unsigned int *old_in_hw_count
,
518 u32
*new_flags
, unsigned int *new_in_hw_count
,
520 int tc_setup_cb_destroy(struct tcf_block
*block
, struct tcf_proto
*tp
,
521 enum tc_setup_type type
, void *type_data
, bool err_stop
,
522 u32
*flags
, unsigned int *in_hw_count
, bool rtnl_held
);
523 int tc_setup_cb_reoffload(struct tcf_block
*block
, struct tcf_proto
*tp
,
524 bool add
, flow_setup_cb_t
*cb
,
525 enum tc_setup_type type
, void *type_data
,
526 void *cb_priv
, u32
*flags
, unsigned int *in_hw_count
);
527 unsigned int tcf_exts_num_actions(struct tcf_exts
*exts
);
529 struct tc_cls_u32_knode
{
530 struct tcf_exts
*exts
;
531 struct tcf_result
*res
;
532 struct tc_u32_sel
*sel
;
540 struct tc_cls_u32_hnode
{
543 unsigned int divisor
;
546 enum tc_clsu32_command
{
548 TC_CLSU32_REPLACE_KNODE
,
549 TC_CLSU32_DELETE_KNODE
,
551 TC_CLSU32_REPLACE_HNODE
,
552 TC_CLSU32_DELETE_HNODE
,
555 struct tc_cls_u32_offload
{
556 struct flow_cls_common_offload common
;
558 enum tc_clsu32_command command
;
560 struct tc_cls_u32_knode knode
;
561 struct tc_cls_u32_hnode hnode
;
565 static inline bool tc_can_offload(const struct net_device
*dev
)
567 return dev
->features
& NETIF_F_HW_TC
;
570 static inline bool tc_can_offload_extack(const struct net_device
*dev
,
571 struct netlink_ext_ack
*extack
)
573 bool can
= tc_can_offload(dev
);
576 NL_SET_ERR_MSG(extack
, "TC offload is disabled on net device");
582 tc_cls_can_offload_and_chain0(const struct net_device
*dev
,
583 struct flow_cls_common_offload
*common
)
585 if (!tc_can_offload_extack(dev
, common
->extack
))
587 if (common
->chain_index
) {
588 NL_SET_ERR_MSG(common
->extack
,
589 "Driver supports only offload of chain 0");
595 static inline bool tc_skip_hw(u32 flags
)
597 return (flags
& TCA_CLS_FLAGS_SKIP_HW
) ? true : false;
600 static inline bool tc_skip_sw(u32 flags
)
602 return (flags
& TCA_CLS_FLAGS_SKIP_SW
) ? true : false;
605 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
606 static inline bool tc_flags_valid(u32 flags
)
608 if (flags
& ~(TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
|
609 TCA_CLS_FLAGS_VERBOSE
))
612 flags
&= TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
;
613 if (!(flags
^ (TCA_CLS_FLAGS_SKIP_HW
| TCA_CLS_FLAGS_SKIP_SW
)))
619 static inline bool tc_in_hw(u32 flags
)
621 return (flags
& TCA_CLS_FLAGS_IN_HW
) ? true : false;
625 tc_cls_common_offload_init(struct flow_cls_common_offload
*cls_common
,
626 const struct tcf_proto
*tp
, u32 flags
,
627 struct netlink_ext_ack
*extack
)
629 cls_common
->chain_index
= tp
->chain
->index
;
630 cls_common
->protocol
= tp
->protocol
;
631 cls_common
->prio
= tp
->prio
>> 16;
632 if (tc_skip_sw(flags
) || flags
& TCA_CLS_FLAGS_VERBOSE
)
633 cls_common
->extack
= extack
;
636 enum tc_matchall_command
{
637 TC_CLSMATCHALL_REPLACE
,
638 TC_CLSMATCHALL_DESTROY
,
639 TC_CLSMATCHALL_STATS
,
642 struct tc_cls_matchall_offload
{
643 struct flow_cls_common_offload common
;
644 enum tc_matchall_command command
;
645 struct flow_rule
*rule
;
646 struct flow_stats stats
;
647 unsigned long cookie
;
650 enum tc_clsbpf_command
{
655 struct tc_cls_bpf_offload
{
656 struct flow_cls_common_offload common
;
657 enum tc_clsbpf_command command
;
658 struct tcf_exts
*exts
;
659 struct bpf_prog
*prog
;
660 struct bpf_prog
*oldprog
;
662 bool exts_integrated
;
665 struct tc_mqprio_qopt_offload
{
666 /* struct tc_mqprio_qopt must always be the first element */
667 struct tc_mqprio_qopt qopt
;
671 u64 min_rate
[TC_QOPT_MAX_QUEUE
];
672 u64 max_rate
[TC_QOPT_MAX_QUEUE
];
675 /* This structure holds cookie structure that is passed from user
676 * to the kernel for actions and classifiers
684 struct tc_qopt_offload_stats
{
685 struct gnet_stats_basic_packed
*bstats
;
686 struct gnet_stats_queue
*qstats
;
696 struct tc_mq_opt_offload_graft_params
{
701 struct tc_mq_qopt_offload
{
702 enum tc_mq_command command
;
705 struct tc_qopt_offload_stats stats
;
706 struct tc_mq_opt_offload_graft_params graft_params
;
710 enum tc_red_command
{
718 struct tc_red_qopt_offload_params
{
725 struct gnet_stats_queue
*qstats
;
728 struct tc_red_qopt_offload
{
729 enum tc_red_command command
;
733 struct tc_red_qopt_offload_params set
;
734 struct tc_qopt_offload_stats stats
;
735 struct red_stats
*xstats
;
740 enum tc_gred_command
{
746 struct tc_gred_vq_qopt_offload_params
{
755 /* Only need backlog, see struct tc_prio_qopt_offload_params */
759 struct tc_gred_qopt_offload_params
{
764 struct gnet_stats_queue
*qstats
;
765 struct tc_gred_vq_qopt_offload_params tab
[MAX_DPs
];
768 struct tc_gred_qopt_offload_stats
{
769 struct gnet_stats_basic_packed bstats
[MAX_DPs
];
770 struct gnet_stats_queue qstats
[MAX_DPs
];
771 struct red_stats
*xstats
[MAX_DPs
];
774 struct tc_gred_qopt_offload
{
775 enum tc_gred_command command
;
779 struct tc_gred_qopt_offload_params set
;
780 struct tc_gred_qopt_offload_stats stats
;
784 enum tc_prio_command
{
791 struct tc_prio_qopt_offload_params
{
793 u8 priomap
[TC_PRIO_MAX
+ 1];
794 /* In case that a prio qdisc is offloaded and now is changed to a
795 * non-offloadedable config, it needs to update the backlog & qlen
796 * values to negate the HW backlog & qlen values (and only them).
798 struct gnet_stats_queue
*qstats
;
801 struct tc_prio_qopt_offload_graft_params
{
806 struct tc_prio_qopt_offload
{
807 enum tc_prio_command command
;
811 struct tc_prio_qopt_offload_params replace_params
;
812 struct tc_qopt_offload_stats stats
;
813 struct tc_prio_qopt_offload_graft_params graft_params
;
817 enum tc_root_command
{
821 struct tc_root_qopt_offload
{
822 enum tc_root_command command
;