]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - include/net/pkt_cls.h
net: flow_offload: remove netns parameter from flow_block_cb_alloc()
[mirror_ubuntu-focal-kernel.git] / include / net / pkt_cls.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
7aa0045d 6#include <linux/workqueue.h>
1da177e4
LT
7#include <net/sch_generic.h>
8#include <net/act_api.h>
8f256622 9#include <net/flow_offload.h>
a5148626 10#include <net/net_namespace.h>
1da177e4 11
cd11b164 12/* TC action not accessible from user space */
720f22fe 13#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
cd11b164 14
1da177e4
LT
15/* Basic packet classifier frontend definitions. */
16
fd2c3ef7 17struct tcf_walker {
1da177e4
LT
18 int stop;
19 int skip;
20 int count;
6676d5e4 21 bool nonempty;
01683a14 22 unsigned long cookie;
8113c095 23 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
1da177e4
LT
24};
25
5c15257f
JP
26int register_tcf_proto_ops(struct tcf_proto_ops *ops);
27int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
1da177e4 28
8c4083b3 29struct tcf_block_ext_info {
32f8c409 30 enum flow_block_binder_type binder_type;
c7eb7d72
JP
31 tcf_chain_head_change_t *chain_head_change;
32 void *chain_head_change_priv;
48617387 33 u32 block_index;
8c4083b3
JP
34};
35
acb67442 36struct tcf_block_cb;
aaa908ff 37bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
acb67442 38
8ae70032 39#ifdef CONFIG_NET_CLS
1f3ed383
JP
40struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
41 u32 chain_index);
1f3ed383 42void tcf_chain_put_by_act(struct tcf_chain *chain);
bbf73830
VB
43struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
44 struct tcf_chain *chain);
fe2923af 45struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
12db03b6 46 struct tcf_proto *tp, bool rtnl_held);
f36fe1c4 47void tcf_block_netif_keep_dst(struct tcf_block *block);
6529eaba 48int tcf_block_get(struct tcf_block **p_block,
8d1a77f9
AA
49 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
50 struct netlink_ext_ack *extack);
c7eb7d72 51int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
8d1a77f9
AA
52 struct tcf_block_ext_info *ei,
53 struct netlink_ext_ack *extack);
6529eaba 54void tcf_block_put(struct tcf_block *block);
c7eb7d72 55void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3 56 struct tcf_block_ext_info *ei);
44186460 57
48617387
JP
58static inline bool tcf_block_shared(struct tcf_block *block)
59{
60 return block->index;
61}
62
c1a970d0
VB
63static inline bool tcf_block_non_null_shared(struct tcf_block *block)
64{
65 return block && block->index;
66}
67
44186460
JP
68static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
69{
48617387 70 WARN_ON(tcf_block_shared(block));
44186460
JP
71 return block->q;
72}
73
7f76fa36
JH
74int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
75 tc_indr_block_bind_cb_t *cb, void *cb_ident);
76int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
77 tc_indr_block_bind_cb_t *cb, void *cb_ident);
78void __tc_indr_block_cb_unregister(struct net_device *dev,
79 tc_indr_block_bind_cb_t *cb, void *cb_ident);
80void tc_indr_block_cb_unregister(struct net_device *dev,
81 tc_indr_block_bind_cb_t *cb, void *cb_ident);
acb67442 82
87d83093
JP
83int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
84 struct tcf_result *res, bool compat_mode);
85
8ae70032 86#else
88c44a52
PJV
87static inline bool tcf_block_shared(struct tcf_block *block)
88{
89 return false;
90}
91
c1a970d0
VB
92static inline bool tcf_block_non_null_shared(struct tcf_block *block)
93{
94 return false;
95}
96
6529eaba
JP
97static inline
98int tcf_block_get(struct tcf_block **p_block,
3c149091
SM
99 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
100 struct netlink_ext_ack *extack)
6529eaba
JP
101{
102 return 0;
103}
104
8c4083b3 105static inline
c7eb7d72 106int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
33c30a8b
QM
107 struct tcf_block_ext_info *ei,
108 struct netlink_ext_ack *extack)
8c4083b3
JP
109{
110 return 0;
111}
112
6529eaba 113static inline void tcf_block_put(struct tcf_block *block)
8ae70032
JP
114{
115}
87d83093 116
8c4083b3 117static inline
c7eb7d72 118void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3
JP
119 struct tcf_block_ext_info *ei)
120{
121}
122
44186460
JP
123static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
124{
125 return NULL;
126}
127
acb67442
JP
128static inline
129int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
130 void *cb_priv)
131{
132 return 0;
133}
134
135static inline
136void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
137 void *cb_priv)
138{
139}
140
7f76fa36
JH
141static inline
142int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
143 tc_indr_block_bind_cb_t *cb, void *cb_ident)
144{
145 return 0;
146}
147
148static inline
149int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
150 tc_indr_block_bind_cb_t *cb, void *cb_ident)
151{
152 return 0;
153}
154
155static inline
156void __tc_indr_block_cb_unregister(struct net_device *dev,
157 tc_indr_block_bind_cb_t *cb, void *cb_ident)
158{
159}
160
161static inline
162void tc_indr_block_cb_unregister(struct net_device *dev,
163 tc_indr_block_bind_cb_t *cb, void *cb_ident)
164{
165}
166
87d83093
JP
167static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
168 struct tcf_result *res, bool compat_mode)
169{
170 return TC_ACT_UNSPEC;
171}
8ae70032 172#endif
cf1facda 173
1da177e4
LT
174static inline unsigned long
175__cls_set_class(unsigned long *clp, unsigned long cl)
176{
a0efb80c 177 return xchg(clp, cl);
1da177e4
LT
178}
179
180static inline unsigned long
34e3759c 181cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
1da177e4
LT
182{
183 unsigned long old_cl;
34e3759c
JP
184
185 sch_tree_lock(q);
1da177e4 186 old_cl = __cls_set_class(clp, cl);
34e3759c 187 sch_tree_unlock(q);
1da177e4
LT
188 return old_cl;
189}
190
191static inline void
192tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
193{
34e3759c 194 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
195 unsigned long cl;
196
34e3759c
JP
197 /* Check q as it is not set for shared blocks. In that case,
198 * setting class is not supported.
199 */
200 if (!q)
201 return;
202 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
203 cl = cls_set_class(q, &r->class, cl);
1da177e4 204 if (cl)
34e3759c 205 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
206}
207
208static inline void
209tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
210{
34e3759c 211 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
212 unsigned long cl;
213
34e3759c
JP
214 if (!q)
215 return;
1da177e4 216 if ((cl = __cls_set_class(&r->class, 0)) != 0)
34e3759c 217 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
218}
219
fd2c3ef7 220struct tcf_exts {
1da177e4 221#ifdef CONFIG_NET_CLS_ACT
33be6271 222 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
22dc13c8
WC
223 int nr_actions;
224 struct tc_action **actions;
e4b95c41 225 struct net *net;
1da177e4 226#endif
5da57f42
WC
227 /* Map to export classifier specific extension TLV types to the
228 * generic extensions API. Unsupported extensions must be set to 0.
229 */
1da177e4
LT
230 int action;
231 int police;
232};
233
14215108
CW
234static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
235 int action, int police)
33be6271
WC
236{
237#ifdef CONFIG_NET_CLS_ACT
5da57f42 238 exts->type = 0;
22dc13c8 239 exts->nr_actions = 0;
14215108 240 exts->net = net;
22dc13c8
WC
241 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
242 GFP_KERNEL);
b9a24bb7
WC
243 if (!exts->actions)
244 return -ENOMEM;
33be6271 245#endif
5da57f42
WC
246 exts->action = action;
247 exts->police = police;
b9a24bb7 248 return 0;
33be6271
WC
249}
250
e4b95c41
CW
251/* Return false if the netns is being destroyed in cleanup_net(). Callers
252 * need to do cleanup synchronously in this case, otherwise may race with
253 * tc_action_net_exit(). Return true for other cases.
254 */
255static inline bool tcf_exts_get_net(struct tcf_exts *exts)
256{
257#ifdef CONFIG_NET_CLS_ACT
258 exts->net = maybe_get_net(exts->net);
259 return exts->net != NULL;
260#else
261 return true;
262#endif
263}
264
265static inline void tcf_exts_put_net(struct tcf_exts *exts)
266{
267#ifdef CONFIG_NET_CLS_ACT
268 if (exts->net)
269 put_net(exts->net);
270#endif
271}
272
22dc13c8 273#ifdef CONFIG_NET_CLS_ACT
244cd96a
CW
274#define tcf_exts_for_each_action(i, a, exts) \
275 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
276#else
277#define tcf_exts_for_each_action(i, a, exts) \
191672ca 278 for (; 0; (void)(i), (void)(a), (void)(exts))
22dc13c8 279#endif
22dc13c8 280
d897a638
JK
281static inline void
282tcf_exts_stats_update(const struct tcf_exts *exts,
283 u64 bytes, u64 packets, u64 lastuse)
284{
285#ifdef CONFIG_NET_CLS_ACT
286 int i;
287
288 preempt_disable();
289
290 for (i = 0; i < exts->nr_actions; i++) {
291 struct tc_action *a = exts->actions[i];
292
28169aba 293 tcf_action_stats_update(a, bytes, packets, lastuse, true);
d897a638
JK
294 }
295
296 preempt_enable();
297#endif
298}
299
3bcc0cec
JP
300/**
301 * tcf_exts_has_actions - check if at least one action is present
302 * @exts: tc filter extensions handle
303 *
304 * Returns true if at least one action is present.
305 */
306static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
307{
2734437e 308#ifdef CONFIG_NET_CLS_ACT
3bcc0cec
JP
309 return exts->nr_actions;
310#else
311 return false;
312#endif
313}
2734437e 314
af69afc5
JP
315/**
316 * tcf_exts_exec - execute tc filter extensions
317 * @skb: socket buffer
318 * @exts: tc filter extensions handle
319 * @res: desired result
320 *
af089e70 321 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
af69afc5
JP
322 * a negative number if the filter must be considered unmatched or
323 * a positive action code (TC_ACT_*) which must be returned to the
324 * underlying layer.
325 */
326static inline int
327tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
328 struct tcf_result *res)
329{
330#ifdef CONFIG_NET_CLS_ACT
ec1a9cca 331 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
af69afc5 332#endif
af089e70 333 return TC_ACT_OK;
af69afc5
JP
334}
335
5c15257f
JP
336int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
337 struct nlattr **tb, struct nlattr *rate_tlv,
ec6743a1 338 struct tcf_exts *exts, bool ovr, bool rtnl_held,
50a56190 339 struct netlink_ext_ack *extack);
18d0264f 340void tcf_exts_destroy(struct tcf_exts *exts);
9b0d4446 341void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
5da57f42
WC
342int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
343int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
1da177e4
LT
344
345/**
346 * struct tcf_pkt_info - packet information
347 */
fd2c3ef7 348struct tcf_pkt_info {
1da177e4
LT
349 unsigned char * ptr;
350 int nexthdr;
351};
352
353#ifdef CONFIG_NET_EMATCH
354
355struct tcf_ematch_ops;
356
357/**
358 * struct tcf_ematch - extended match (ematch)
359 *
360 * @matchid: identifier to allow userspace to reidentify a match
361 * @flags: flags specifying attributes and the relation to other matches
362 * @ops: the operations lookup table of the corresponding ematch module
363 * @datalen: length of the ematch specific configuration data
364 * @data: ematch specific data
365 */
fd2c3ef7 366struct tcf_ematch {
1da177e4
LT
367 struct tcf_ematch_ops * ops;
368 unsigned long data;
369 unsigned int datalen;
370 u16 matchid;
371 u16 flags;
82a470f1 372 struct net *net;
1da177e4
LT
373};
374
375static inline int tcf_em_is_container(struct tcf_ematch *em)
376{
377 return !em->ops;
378}
379
380static inline int tcf_em_is_simple(struct tcf_ematch *em)
381{
382 return em->flags & TCF_EM_SIMPLE;
383}
384
385static inline int tcf_em_is_inverted(struct tcf_ematch *em)
386{
387 return em->flags & TCF_EM_INVERT;
388}
389
390static inline int tcf_em_last_match(struct tcf_ematch *em)
391{
392 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
393}
394
395static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
396{
397 if (tcf_em_last_match(em))
398 return 1;
399
400 if (result == 0 && em->flags & TCF_EM_REL_AND)
401 return 1;
402
403 if (result != 0 && em->flags & TCF_EM_REL_OR)
404 return 1;
405
406 return 0;
407}
408
409/**
410 * struct tcf_ematch_tree - ematch tree handle
411 *
412 * @hdr: ematch tree header supplied by userspace
413 * @matches: array of ematches
414 */
fd2c3ef7 415struct tcf_ematch_tree {
1da177e4
LT
416 struct tcf_ematch_tree_hdr hdr;
417 struct tcf_ematch * matches;
418
419};
420
421/**
422 * struct tcf_ematch_ops - ematch module operations
423 *
424 * @kind: identifier (kind) of this ematch module
425 * @datalen: length of expected configuration data (optional)
426 * @change: called during validation (optional)
427 * @match: called during ematch tree evaluation, must return 1/0
428 * @destroy: called during destroyage (optional)
429 * @dump: called during dumping process (optional)
430 * @owner: owner, must be set to THIS_MODULE
431 * @link: link to previous/next ematch module (internal use)
432 */
fd2c3ef7 433struct tcf_ematch_ops {
1da177e4
LT
434 int kind;
435 int datalen;
82a470f1 436 int (*change)(struct net *net, void *,
1da177e4
LT
437 int, struct tcf_ematch *);
438 int (*match)(struct sk_buff *, struct tcf_ematch *,
439 struct tcf_pkt_info *);
82a470f1 440 void (*destroy)(struct tcf_ematch *);
1da177e4
LT
441 int (*dump)(struct sk_buff *, struct tcf_ematch *);
442 struct module *owner;
443 struct list_head link;
444};
445
5c15257f
JP
446int tcf_em_register(struct tcf_ematch_ops *);
447void tcf_em_unregister(struct tcf_ematch_ops *);
448int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
449 struct tcf_ematch_tree *);
82a470f1 450void tcf_em_tree_destroy(struct tcf_ematch_tree *);
5c15257f
JP
451int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
452int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
453 struct tcf_pkt_info *);
1da177e4 454
1da177e4
LT
455/**
456 * tcf_em_tree_match - evaulate an ematch tree
457 *
458 * @skb: socket buffer of the packet in question
459 * @tree: ematch tree to be used for evaluation
460 * @info: packet information examined by classifier
461 *
462 * This function matches @skb against the ematch tree in @tree by going
463 * through all ematches respecting their logic relations returning
464 * as soon as the result is obvious.
465 *
466 * Returns 1 if the ematch tree as-one matches, no ematches are configured
467 * or ematch is not enabled in the kernel, otherwise 0 is returned.
468 */
469static inline int tcf_em_tree_match(struct sk_buff *skb,
470 struct tcf_ematch_tree *tree,
471 struct tcf_pkt_info *info)
472{
473 if (tree->hdr.nmatches)
474 return __tcf_em_tree_match(skb, tree, info);
475 else
476 return 1;
477}
478
db3d99c0
PM
479#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
480
1da177e4
LT
481#else /* CONFIG_NET_EMATCH */
482
fd2c3ef7 483struct tcf_ematch_tree {
1da177e4
LT
484};
485
486#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
82a470f1 487#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
1da177e4 488#define tcf_em_tree_dump(skb, t, tlv) (0)
1da177e4
LT
489#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
490
491#endif /* CONFIG_NET_EMATCH */
492
493static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
494{
495 switch (layer) {
496 case TCF_LAYER_LINK:
d3303a65 497 return skb_mac_header(skb);
1da177e4 498 case TCF_LAYER_NETWORK:
d56f90a7 499 return skb_network_header(skb);
1da177e4 500 case TCF_LAYER_TRANSPORT:
9c70220b 501 return skb_transport_header(skb);
1da177e4
LT
502 }
503
504 return NULL;
505}
506
eddc9ec5
ACM
507static inline int tcf_valid_offset(const struct sk_buff *skb,
508 const unsigned char *ptr, const int len)
1da177e4 509{
da521b2c
DM
510 return likely((ptr + len) <= skb_tail_pointer(skb) &&
511 ptr >= skb->head &&
512 (ptr <= (ptr + len)));
1da177e4
LT
513}
514
1da177e4 515static inline int
1057c55f
AA
516tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
517 struct netlink_ext_ack *extack)
1da177e4 518{
2519a602
WC
519 char indev[IFNAMSIZ];
520 struct net_device *dev;
521
1057c55f
AA
522 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
523 NL_SET_ERR_MSG(extack, "Interface name too long");
1da177e4 524 return -EINVAL;
1057c55f 525 }
2519a602
WC
526 dev = __dev_get_by_name(net, indev);
527 if (!dev)
528 return -ENODEV;
529 return dev->ifindex;
1da177e4
LT
530}
531
2519a602
WC
532static inline bool
533tcf_match_indev(struct sk_buff *skb, int ifindex)
1da177e4 534{
2519a602
WC
535 if (!ifindex)
536 return true;
537 if (!skb->skb_iif)
538 return false;
539 return ifindex == skb->skb_iif;
1da177e4 540}
1da177e4 541
3a7b6861
PNA
542int tc_setup_flow_action(struct flow_action *flow_action,
543 const struct tcf_exts *exts);
aeb3fecd
CW
544int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
545 void *type_data, bool err_stop);
e3ab786b 546unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
717503b9 547
a1b7c5fd
JF
548struct tc_cls_u32_knode {
549 struct tcf_exts *exts;
068ceb35 550 struct tcf_result *res;
e014860e 551 struct tc_u32_sel *sel;
a1b7c5fd
JF
552 u32 handle;
553 u32 val;
554 u32 mask;
555 u32 link_handle;
e014860e 556 u8 fshift;
a1b7c5fd
JF
557};
558
559struct tc_cls_u32_hnode {
560 u32 handle;
561 u32 prio;
562 unsigned int divisor;
563};
564
565enum tc_clsu32_command {
566 TC_CLSU32_NEW_KNODE,
567 TC_CLSU32_REPLACE_KNODE,
568 TC_CLSU32_DELETE_KNODE,
569 TC_CLSU32_NEW_HNODE,
570 TC_CLSU32_REPLACE_HNODE,
571 TC_CLSU32_DELETE_HNODE,
572};
573
574struct tc_cls_u32_offload {
f9e30088 575 struct flow_cls_common_offload common;
a1b7c5fd
JF
576 /* knode values */
577 enum tc_clsu32_command command;
578 union {
579 struct tc_cls_u32_knode knode;
580 struct tc_cls_u32_hnode hnode;
581 };
582};
583
7b06e8ae 584static inline bool tc_can_offload(const struct net_device *dev)
6843e7a2 585{
70b5aee4 586 return dev->features & NETIF_F_HW_TC;
6843e7a2
JF
587}
588
f9eda14f
QM
589static inline bool tc_can_offload_extack(const struct net_device *dev,
590 struct netlink_ext_ack *extack)
591{
592 bool can = tc_can_offload(dev);
593
594 if (!can)
595 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
596
597 return can;
598}
599
878db9f0
JK
600static inline bool
601tc_cls_can_offload_and_chain0(const struct net_device *dev,
f9e30088 602 struct flow_cls_common_offload *common)
878db9f0
JK
603{
604 if (!tc_can_offload_extack(dev, common->extack))
605 return false;
606 if (common->chain_index) {
607 NL_SET_ERR_MSG(common->extack,
608 "Driver supports only offload of chain 0");
609 return false;
610 }
611 return true;
612}
613
55330f05
HHZ
614static inline bool tc_skip_hw(u32 flags)
615{
616 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
617}
618
d34e3e18
SS
619static inline bool tc_skip_sw(u32 flags)
620{
621 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
622}
623
624/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
625static inline bool tc_flags_valid(u32 flags)
626{
81c7288b
MRL
627 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
628 TCA_CLS_FLAGS_VERBOSE))
d34e3e18
SS
629 return false;
630
81c7288b 631 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
d34e3e18
SS
632 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
633 return false;
634
635 return true;
636}
637
e696028a
OG
638static inline bool tc_in_hw(u32 flags)
639{
640 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
641}
642
34832e1c 643static inline void
f9e30088 644tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
34832e1c
JK
645 const struct tcf_proto *tp, u32 flags,
646 struct netlink_ext_ack *extack)
647{
648 cls_common->chain_index = tp->chain->index;
649 cls_common->protocol = tp->protocol;
650 cls_common->prio = tp->prio;
81c7288b 651 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
34832e1c
JK
652 cls_common->extack = extack;
653}
654
b87f7936
YG
655enum tc_matchall_command {
656 TC_CLSMATCHALL_REPLACE,
657 TC_CLSMATCHALL_DESTROY,
b7fe4ab8 658 TC_CLSMATCHALL_STATS,
b87f7936
YG
659};
660
661struct tc_cls_matchall_offload {
f9e30088 662 struct flow_cls_common_offload common;
b87f7936 663 enum tc_matchall_command command;
f00cbf19 664 struct flow_rule *rule;
b7fe4ab8 665 struct flow_stats stats;
b87f7936
YG
666 unsigned long cookie;
667};
668
332ae8e2 669enum tc_clsbpf_command {
102740bd 670 TC_CLSBPF_OFFLOAD,
68d64063 671 TC_CLSBPF_STATS,
332ae8e2
JK
672};
673
674struct tc_cls_bpf_offload {
f9e30088 675 struct flow_cls_common_offload common;
332ae8e2
JK
676 enum tc_clsbpf_command command;
677 struct tcf_exts *exts;
678 struct bpf_prog *prog;
102740bd 679 struct bpf_prog *oldprog;
332ae8e2
JK
680 const char *name;
681 bool exts_integrated;
682};
683
4e8b86c0
AN
684struct tc_mqprio_qopt_offload {
685 /* struct tc_mqprio_qopt must always be the first element */
686 struct tc_mqprio_qopt qopt;
687 u16 mode;
688 u16 shaper;
689 u32 flags;
690 u64 min_rate[TC_QOPT_MAX_QUEUE];
691 u64 max_rate[TC_QOPT_MAX_QUEUE];
692};
1045ba77
JHS
693
694/* This structure holds cookie structure that is passed from user
695 * to the kernel for actions and classifiers
696 */
697struct tc_cookie {
698 u8 *data;
699 u32 len;
eec94fdb 700 struct rcu_head rcu;
1045ba77 701};
602f3baf 702
f34b4aac
NF
703struct tc_qopt_offload_stats {
704 struct gnet_stats_basic_packed *bstats;
705 struct gnet_stats_queue *qstats;
706};
707
f971b132
JK
708enum tc_mq_command {
709 TC_MQ_CREATE,
710 TC_MQ_DESTROY,
47c669a4 711 TC_MQ_STATS,
d577a3d2
JK
712 TC_MQ_GRAFT,
713};
714
715struct tc_mq_opt_offload_graft_params {
716 unsigned long queue;
717 u32 child_handle;
f971b132
JK
718};
719
720struct tc_mq_qopt_offload {
721 enum tc_mq_command command;
722 u32 handle;
d577a3d2
JK
723 union {
724 struct tc_qopt_offload_stats stats;
725 struct tc_mq_opt_offload_graft_params graft_params;
726 };
f971b132
JK
727};
728
602f3baf
NF
729enum tc_red_command {
730 TC_RED_REPLACE,
731 TC_RED_DESTROY,
732 TC_RED_STATS,
733 TC_RED_XSTATS,
bf2a752b 734 TC_RED_GRAFT,
602f3baf
NF
735};
736
737struct tc_red_qopt_offload_params {
738 u32 min;
739 u32 max;
740 u32 probability;
c0b7490b 741 u32 limit;
602f3baf 742 bool is_ecn;
190852a5 743 bool is_harddrop;
416ef9b1 744 struct gnet_stats_queue *qstats;
602f3baf 745};
602f3baf
NF
746
747struct tc_red_qopt_offload {
748 enum tc_red_command command;
749 u32 handle;
750 u32 parent;
751 union {
752 struct tc_red_qopt_offload_params set;
f34b4aac 753 struct tc_qopt_offload_stats stats;
602f3baf 754 struct red_stats *xstats;
bf2a752b 755 u32 child_handle;
602f3baf
NF
756 };
757};
758
890d8d23
JK
759enum tc_gred_command {
760 TC_GRED_REPLACE,
761 TC_GRED_DESTROY,
e49efd52 762 TC_GRED_STATS,
890d8d23
JK
763};
764
765struct tc_gred_vq_qopt_offload_params {
766 bool present;
767 u32 limit;
768 u32 prio;
769 u32 min;
770 u32 max;
771 bool is_ecn;
772 bool is_harddrop;
773 u32 probability;
774 /* Only need backlog, see struct tc_prio_qopt_offload_params */
775 u32 *backlog;
776};
777
778struct tc_gred_qopt_offload_params {
779 bool grio_on;
780 bool wred_on;
781 unsigned int dp_cnt;
782 unsigned int dp_def;
783 struct gnet_stats_queue *qstats;
784 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
785};
786
e49efd52
JK
787struct tc_gred_qopt_offload_stats {
788 struct gnet_stats_basic_packed bstats[MAX_DPs];
789 struct gnet_stats_queue qstats[MAX_DPs];
790 struct red_stats *xstats[MAX_DPs];
791};
792
890d8d23
JK
793struct tc_gred_qopt_offload {
794 enum tc_gred_command command;
795 u32 handle;
796 u32 parent;
797 union {
798 struct tc_gred_qopt_offload_params set;
e49efd52 799 struct tc_gred_qopt_offload_stats stats;
890d8d23
JK
800 };
801};
802
7fdb61b4
NF
803enum tc_prio_command {
804 TC_PRIO_REPLACE,
805 TC_PRIO_DESTROY,
806 TC_PRIO_STATS,
b9c7a7ac 807 TC_PRIO_GRAFT,
7fdb61b4
NF
808};
809
810struct tc_prio_qopt_offload_params {
811 int bands;
812 u8 priomap[TC_PRIO_MAX + 1];
813 /* In case that a prio qdisc is offloaded and now is changed to a
814 * non-offloadedable config, it needs to update the backlog & qlen
815 * values to negate the HW backlog & qlen values (and only them).
816 */
817 struct gnet_stats_queue *qstats;
818};
819
b9c7a7ac
NF
820struct tc_prio_qopt_offload_graft_params {
821 u8 band;
822 u32 child_handle;
823};
824
7fdb61b4
NF
825struct tc_prio_qopt_offload {
826 enum tc_prio_command command;
827 u32 handle;
828 u32 parent;
829 union {
830 struct tc_prio_qopt_offload_params replace_params;
831 struct tc_qopt_offload_stats stats;
b9c7a7ac 832 struct tc_prio_qopt_offload_graft_params graft_params;
7fdb61b4
NF
833 };
834};
b9c7a7ac 835
98b0e5f6
JK
836enum tc_root_command {
837 TC_ROOT_GRAFT,
838};
839
840struct tc_root_qopt_offload {
841 enum tc_root_command command;
842 u32 handle;
843 bool ingress;
844};
845
1da177e4 846#endif