1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
10 extern unsigned int nf_tables_net_id
;
12 static struct nft_flow_rule
*nft_flow_rule_alloc(int num_actions
)
14 struct nft_flow_rule
*flow
;
16 flow
= kzalloc(sizeof(struct nft_flow_rule
), GFP_KERNEL
);
20 flow
->rule
= flow_rule_alloc(num_actions
);
26 flow
->rule
->match
.dissector
= &flow
->match
.dissector
;
27 flow
->rule
->match
.mask
= &flow
->match
.mask
;
28 flow
->rule
->match
.key
= &flow
->match
.key
;
33 void nft_flow_rule_set_addr_type(struct nft_flow_rule
*flow
,
34 enum flow_dissector_key_id addr_type
)
36 struct nft_flow_match
*match
= &flow
->match
;
37 struct nft_flow_key
*mask
= &match
->mask
;
38 struct nft_flow_key
*key
= &match
->key
;
40 if (match
->dissector
.used_keys
& BIT(FLOW_DISSECTOR_KEY_CONTROL
))
43 key
->control
.addr_type
= addr_type
;
44 mask
->control
.addr_type
= 0xffff;
45 match
->dissector
.used_keys
|= BIT(FLOW_DISSECTOR_KEY_CONTROL
);
46 match
->dissector
.offset
[FLOW_DISSECTOR_KEY_CONTROL
] =
47 offsetof(struct nft_flow_key
, control
);
50 struct nft_offload_ethertype
{
55 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx
*ctx
,
56 struct nft_flow_rule
*flow
)
58 struct nft_flow_match
*match
= &flow
->match
;
59 struct nft_offload_ethertype ethertype
;
61 if (match
->dissector
.used_keys
& BIT(FLOW_DISSECTOR_KEY_CONTROL
) &&
62 match
->key
.basic
.n_proto
!= htons(ETH_P_8021Q
) &&
63 match
->key
.basic
.n_proto
!= htons(ETH_P_8021AD
))
66 ethertype
.value
= match
->key
.basic
.n_proto
;
67 ethertype
.mask
= match
->mask
.basic
.n_proto
;
69 if (match
->dissector
.used_keys
& BIT(FLOW_DISSECTOR_KEY_VLAN
) &&
70 (match
->key
.vlan
.vlan_tpid
== htons(ETH_P_8021Q
) ||
71 match
->key
.vlan
.vlan_tpid
== htons(ETH_P_8021AD
))) {
72 match
->key
.basic
.n_proto
= match
->key
.cvlan
.vlan_tpid
;
73 match
->mask
.basic
.n_proto
= match
->mask
.cvlan
.vlan_tpid
;
74 match
->key
.cvlan
.vlan_tpid
= match
->key
.vlan
.vlan_tpid
;
75 match
->mask
.cvlan
.vlan_tpid
= match
->mask
.vlan
.vlan_tpid
;
76 match
->key
.vlan
.vlan_tpid
= ethertype
.value
;
77 match
->mask
.vlan
.vlan_tpid
= ethertype
.mask
;
78 match
->dissector
.offset
[FLOW_DISSECTOR_KEY_CVLAN
] =
79 offsetof(struct nft_flow_key
, cvlan
);
80 match
->dissector
.used_keys
|= BIT(FLOW_DISSECTOR_KEY_CVLAN
);
82 match
->key
.basic
.n_proto
= match
->key
.vlan
.vlan_tpid
;
83 match
->mask
.basic
.n_proto
= match
->mask
.vlan
.vlan_tpid
;
84 match
->key
.vlan
.vlan_tpid
= ethertype
.value
;
85 match
->mask
.vlan
.vlan_tpid
= ethertype
.mask
;
86 match
->dissector
.offset
[FLOW_DISSECTOR_KEY_VLAN
] =
87 offsetof(struct nft_flow_key
, vlan
);
88 match
->dissector
.used_keys
|= BIT(FLOW_DISSECTOR_KEY_VLAN
);
92 struct nft_flow_rule
*nft_flow_rule_create(struct net
*net
,
93 const struct nft_rule
*rule
)
95 struct nft_offload_ctx
*ctx
;
96 struct nft_flow_rule
*flow
;
97 int num_actions
= 0, err
;
98 struct nft_expr
*expr
;
100 expr
= nft_expr_first(rule
);
101 while (nft_expr_more(rule
, expr
)) {
102 if (expr
->ops
->offload_flags
& NFT_OFFLOAD_F_ACTION
)
105 expr
= nft_expr_next(expr
);
108 if (num_actions
== 0)
109 return ERR_PTR(-EOPNOTSUPP
);
111 flow
= nft_flow_rule_alloc(num_actions
);
113 return ERR_PTR(-ENOMEM
);
115 expr
= nft_expr_first(rule
);
117 ctx
= kzalloc(sizeof(struct nft_offload_ctx
), GFP_KERNEL
);
123 ctx
->dep
.type
= NFT_OFFLOAD_DEP_UNSPEC
;
125 while (nft_expr_more(rule
, expr
)) {
126 if (!expr
->ops
->offload
) {
130 err
= expr
->ops
->offload(ctx
, flow
, expr
);
134 expr
= nft_expr_next(expr
);
136 nft_flow_rule_transfer_vlan(ctx
, flow
);
138 flow
->proto
= ctx
->dep
.l3num
;
144 nft_flow_rule_destroy(flow
);
149 void nft_flow_rule_destroy(struct nft_flow_rule
*flow
)
151 struct flow_action_entry
*entry
;
154 flow_action_for_each(i
, entry
, &flow
->rule
->action
) {
156 case FLOW_ACTION_REDIRECT
:
157 case FLOW_ACTION_MIRRED
:
168 void nft_offload_set_dependency(struct nft_offload_ctx
*ctx
,
169 enum nft_offload_dep_type type
)
171 ctx
->dep
.type
= type
;
174 void nft_offload_update_dependency(struct nft_offload_ctx
*ctx
,
175 const void *data
, u32 len
)
177 switch (ctx
->dep
.type
) {
178 case NFT_OFFLOAD_DEP_NETWORK
:
179 WARN_ON(len
!= sizeof(__u16
));
180 memcpy(&ctx
->dep
.l3num
, data
, sizeof(__u16
));
182 case NFT_OFFLOAD_DEP_TRANSPORT
:
183 WARN_ON(len
!= sizeof(__u8
));
184 memcpy(&ctx
->dep
.protonum
, data
, sizeof(__u8
));
189 ctx
->dep
.type
= NFT_OFFLOAD_DEP_UNSPEC
;
192 static void nft_flow_offload_common_init(struct flow_cls_common_offload
*common
,
193 __be16 proto
, int priority
,
194 struct netlink_ext_ack
*extack
)
196 common
->protocol
= proto
;
197 common
->prio
= priority
;
198 common
->extack
= extack
;
201 static int nft_setup_cb_call(enum tc_setup_type type
, void *type_data
,
202 struct list_head
*cb_list
)
204 struct flow_block_cb
*block_cb
;
207 list_for_each_entry(block_cb
, cb_list
, list
) {
208 err
= block_cb
->cb(type
, type_data
, block_cb
->cb_priv
);
215 int nft_chain_offload_priority(struct nft_base_chain
*basechain
)
217 if (basechain
->ops
.priority
<= 0 ||
218 basechain
->ops
.priority
> USHRT_MAX
)
224 static void nft_flow_cls_offload_setup(struct flow_cls_offload
*cls_flow
,
225 const struct nft_base_chain
*basechain
,
226 const struct nft_rule
*rule
,
227 const struct nft_flow_rule
*flow
,
228 struct netlink_ext_ack
*extack
,
229 enum flow_cls_command command
)
231 __be16 proto
= ETH_P_ALL
;
233 memset(cls_flow
, 0, sizeof(*cls_flow
));
238 nft_flow_offload_common_init(&cls_flow
->common
, proto
,
239 basechain
->ops
.priority
, extack
);
240 cls_flow
->command
= command
;
241 cls_flow
->cookie
= (unsigned long) rule
;
243 cls_flow
->rule
= flow
->rule
;
246 static int nft_flow_offload_cmd(const struct nft_chain
*chain
,
247 const struct nft_rule
*rule
,
248 struct nft_flow_rule
*flow
,
249 enum flow_cls_command command
,
250 struct flow_cls_offload
*cls_flow
)
252 struct netlink_ext_ack extack
= {};
253 struct nft_base_chain
*basechain
;
255 if (!nft_is_base_chain(chain
))
258 basechain
= nft_base_chain(chain
);
259 nft_flow_cls_offload_setup(cls_flow
, basechain
, rule
, flow
, &extack
,
262 return nft_setup_cb_call(TC_SETUP_CLSFLOWER
, cls_flow
,
263 &basechain
->flow_block
.cb_list
);
266 static int nft_flow_offload_rule(const struct nft_chain
*chain
,
267 struct nft_rule
*rule
,
268 struct nft_flow_rule
*flow
,
269 enum flow_cls_command command
)
271 struct flow_cls_offload cls_flow
;
273 return nft_flow_offload_cmd(chain
, rule
, flow
, command
, &cls_flow
);
276 int nft_flow_rule_stats(const struct nft_chain
*chain
,
277 const struct nft_rule
*rule
)
279 struct flow_cls_offload cls_flow
= {};
280 struct nft_expr
*expr
, *next
;
283 err
= nft_flow_offload_cmd(chain
, rule
, NULL
, FLOW_CLS_STATS
,
288 nft_rule_for_each_expr(expr
, next
, rule
) {
289 if (expr
->ops
->offload_stats
)
290 expr
->ops
->offload_stats(expr
, &cls_flow
.stats
);
296 static int nft_flow_offload_bind(struct flow_block_offload
*bo
,
297 struct nft_base_chain
*basechain
)
299 list_splice(&bo
->cb_list
, &basechain
->flow_block
.cb_list
);
303 static int nft_flow_offload_unbind(struct flow_block_offload
*bo
,
304 struct nft_base_chain
*basechain
)
306 struct flow_block_cb
*block_cb
, *next
;
307 struct flow_cls_offload cls_flow
;
308 struct netlink_ext_ack extack
;
309 struct nft_chain
*chain
;
310 struct nft_rule
*rule
;
312 chain
= &basechain
->chain
;
313 list_for_each_entry(rule
, &chain
->rules
, list
) {
314 memset(&extack
, 0, sizeof(extack
));
315 nft_flow_cls_offload_setup(&cls_flow
, basechain
, rule
, NULL
,
316 &extack
, FLOW_CLS_DESTROY
);
317 nft_setup_cb_call(TC_SETUP_CLSFLOWER
, &cls_flow
, &bo
->cb_list
);
320 list_for_each_entry_safe(block_cb
, next
, &bo
->cb_list
, list
) {
321 list_del(&block_cb
->list
);
322 flow_block_cb_free(block_cb
);
328 static int nft_block_setup(struct nft_base_chain
*basechain
,
329 struct flow_block_offload
*bo
,
330 enum flow_block_command cmd
)
335 case FLOW_BLOCK_BIND
:
336 err
= nft_flow_offload_bind(bo
, basechain
);
338 case FLOW_BLOCK_UNBIND
:
339 err
= nft_flow_offload_unbind(bo
, basechain
);
349 static void nft_flow_block_offload_init(struct flow_block_offload
*bo
,
351 enum flow_block_command cmd
,
352 struct nft_base_chain
*basechain
,
353 struct netlink_ext_ack
*extack
)
355 memset(bo
, 0, sizeof(*bo
));
357 bo
->block
= &basechain
->flow_block
;
359 bo
->binder_type
= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
;
361 INIT_LIST_HEAD(&bo
->cb_list
);
364 static int nft_block_offload_cmd(struct nft_base_chain
*chain
,
365 struct net_device
*dev
,
366 enum flow_block_command cmd
)
368 struct netlink_ext_ack extack
= {};
369 struct flow_block_offload bo
;
372 nft_flow_block_offload_init(&bo
, dev_net(dev
), cmd
, chain
, &extack
);
374 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_BLOCK
, &bo
);
378 return nft_block_setup(chain
, &bo
, cmd
);
381 static void nft_indr_block_cleanup(struct flow_block_cb
*block_cb
)
383 struct nft_base_chain
*basechain
= block_cb
->indr
.data
;
384 struct net_device
*dev
= block_cb
->indr
.dev
;
385 struct netlink_ext_ack extack
= {};
386 struct nftables_pernet
*nft_net
;
387 struct net
*net
= dev_net(dev
);
388 struct flow_block_offload bo
;
390 nft_flow_block_offload_init(&bo
, dev_net(dev
), FLOW_BLOCK_UNBIND
,
392 nft_net
= net_generic(net
, nf_tables_net_id
);
393 mutex_lock(&nft_net
->commit_mutex
);
394 list_del(&block_cb
->driver_list
);
395 list_move(&block_cb
->list
, &bo
.cb_list
);
396 nft_flow_offload_unbind(&bo
, basechain
);
397 mutex_unlock(&nft_net
->commit_mutex
);
400 static int nft_indr_block_offload_cmd(struct nft_base_chain
*basechain
,
401 struct net_device
*dev
,
402 enum flow_block_command cmd
)
404 struct netlink_ext_ack extack
= {};
405 struct flow_block_offload bo
;
408 nft_flow_block_offload_init(&bo
, dev_net(dev
), cmd
, basechain
, &extack
);
410 err
= flow_indr_dev_setup_offload(dev
, NULL
, TC_SETUP_BLOCK
, basechain
, &bo
,
411 nft_indr_block_cleanup
);
415 if (list_empty(&bo
.cb_list
))
418 return nft_block_setup(basechain
, &bo
, cmd
);
421 static int nft_chain_offload_cmd(struct nft_base_chain
*basechain
,
422 struct net_device
*dev
,
423 enum flow_block_command cmd
)
427 if (dev
->netdev_ops
->ndo_setup_tc
)
428 err
= nft_block_offload_cmd(basechain
, dev
, cmd
);
430 err
= nft_indr_block_offload_cmd(basechain
, dev
, cmd
);
435 static int nft_flow_block_chain(struct nft_base_chain
*basechain
,
436 const struct net_device
*this_dev
,
437 enum flow_block_command cmd
)
439 struct net_device
*dev
;
440 struct nft_hook
*hook
;
443 list_for_each_entry(hook
, &basechain
->hook_list
, list
) {
445 if (this_dev
&& this_dev
!= dev
)
448 err
= nft_chain_offload_cmd(basechain
, dev
, cmd
);
449 if (err
< 0 && cmd
== FLOW_BLOCK_BIND
) {
461 list_for_each_entry(hook
, &basechain
->hook_list
, list
) {
466 nft_chain_offload_cmd(basechain
, dev
, FLOW_BLOCK_UNBIND
);
471 static int nft_flow_offload_chain(struct nft_chain
*chain
, u8
*ppolicy
,
472 enum flow_block_command cmd
)
474 struct nft_base_chain
*basechain
;
477 if (!nft_is_base_chain(chain
))
480 basechain
= nft_base_chain(chain
);
481 policy
= ppolicy
? *ppolicy
: basechain
->policy
;
483 /* Only default policy to accept is supported for now. */
484 if (cmd
== FLOW_BLOCK_BIND
&& policy
== NF_DROP
)
487 return nft_flow_block_chain(basechain
, NULL
, cmd
);
490 static void nft_flow_rule_offload_abort(struct net
*net
,
491 struct nft_trans
*trans
)
493 struct nftables_pernet
*nft_net
= net_generic(net
, nf_tables_net_id
);
496 list_for_each_entry_continue_reverse(trans
, &nft_net
->commit_list
, list
) {
497 if (trans
->ctx
.family
!= NFPROTO_NETDEV
)
500 switch (trans
->msg_type
) {
501 case NFT_MSG_NEWCHAIN
:
502 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
) ||
503 nft_trans_chain_update(trans
))
506 err
= nft_flow_offload_chain(trans
->ctx
.chain
, NULL
,
509 case NFT_MSG_DELCHAIN
:
510 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
513 err
= nft_flow_offload_chain(trans
->ctx
.chain
, NULL
,
516 case NFT_MSG_NEWRULE
:
517 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
520 err
= nft_flow_offload_rule(trans
->ctx
.chain
,
521 nft_trans_rule(trans
),
522 NULL
, FLOW_CLS_DESTROY
);
524 case NFT_MSG_DELRULE
:
525 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
528 err
= nft_flow_offload_rule(trans
->ctx
.chain
,
529 nft_trans_rule(trans
),
530 nft_trans_flow_rule(trans
),
535 if (WARN_ON_ONCE(err
))
540 int nft_flow_rule_offload_commit(struct net
*net
)
542 struct nftables_pernet
*nft_net
= net_generic(net
, nf_tables_net_id
);
543 struct nft_trans
*trans
;
547 list_for_each_entry(trans
, &nft_net
->commit_list
, list
) {
548 if (trans
->ctx
.family
!= NFPROTO_NETDEV
)
551 switch (trans
->msg_type
) {
552 case NFT_MSG_NEWCHAIN
:
553 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
) ||
554 nft_trans_chain_update(trans
))
557 policy
= nft_trans_chain_policy(trans
);
558 err
= nft_flow_offload_chain(trans
->ctx
.chain
, &policy
,
561 case NFT_MSG_DELCHAIN
:
562 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
565 policy
= nft_trans_chain_policy(trans
);
566 err
= nft_flow_offload_chain(trans
->ctx
.chain
, &policy
,
569 case NFT_MSG_NEWRULE
:
570 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
573 if (trans
->ctx
.flags
& NLM_F_REPLACE
||
574 !(trans
->ctx
.flags
& NLM_F_APPEND
)) {
578 err
= nft_flow_offload_rule(trans
->ctx
.chain
,
579 nft_trans_rule(trans
),
580 nft_trans_flow_rule(trans
),
583 case NFT_MSG_DELRULE
:
584 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
587 err
= nft_flow_offload_rule(trans
->ctx
.chain
,
588 nft_trans_rule(trans
),
589 NULL
, FLOW_CLS_DESTROY
);
594 nft_flow_rule_offload_abort(net
, trans
);
599 list_for_each_entry(trans
, &nft_net
->commit_list
, list
) {
600 if (trans
->ctx
.family
!= NFPROTO_NETDEV
)
603 switch (trans
->msg_type
) {
604 case NFT_MSG_NEWRULE
:
605 case NFT_MSG_DELRULE
:
606 if (!(trans
->ctx
.chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
609 nft_flow_rule_destroy(nft_trans_flow_rule(trans
));
619 static struct nft_chain
*__nft_offload_get_chain(const struct nftables_pernet
*nft_net
,
620 struct net_device
*dev
)
622 struct nft_base_chain
*basechain
;
623 struct nft_hook
*hook
, *found
;
624 const struct nft_table
*table
;
625 struct nft_chain
*chain
;
627 list_for_each_entry(table
, &nft_net
->tables
, list
) {
628 if (table
->family
!= NFPROTO_NETDEV
)
631 list_for_each_entry(chain
, &table
->chains
, list
) {
632 if (!nft_is_base_chain(chain
) ||
633 !(chain
->flags
& NFT_CHAIN_HW_OFFLOAD
))
637 basechain
= nft_base_chain(chain
);
638 list_for_each_entry(hook
, &basechain
->hook_list
, list
) {
639 if (hook
->ops
.dev
!= dev
)
655 static int nft_offload_netdev_event(struct notifier_block
*this,
656 unsigned long event
, void *ptr
)
658 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
659 struct nftables_pernet
*nft_net
;
660 struct net
*net
= dev_net(dev
);
661 struct nft_chain
*chain
;
663 if (event
!= NETDEV_UNREGISTER
)
666 nft_net
= net_generic(net
, nf_tables_net_id
);
667 mutex_lock(&nft_net
->commit_mutex
);
668 chain
= __nft_offload_get_chain(nft_net
, dev
);
670 nft_flow_block_chain(nft_base_chain(chain
), dev
,
673 mutex_unlock(&nft_net
->commit_mutex
);
678 static struct notifier_block nft_offload_netdev_notifier
= {
679 .notifier_call
= nft_offload_netdev_event
,
682 int nft_offload_init(void)
684 return register_netdevice_notifier(&nft_offload_netdev_notifier
);
687 void nft_offload_exit(void)
689 unregister_netdevice_notifier(&nft_offload_netdev_notifier
);