2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
48 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
49 TCPHDR_PSH | TCPHDR_URG)
51 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
52 (FLOW_DIS_IS_FRAGMENT | \
55 #define NFP_FLOWER_WHITELIST_DISSECTOR \
56 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
57 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
58 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
59 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
60 BIT(FLOW_DISSECTOR_KEY_TCP) | \
61 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
62 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
63 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
64 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
65 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
67 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
68 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
69 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
70 BIT(FLOW_DISSECTOR_KEY_IP))
72 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
73 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
74 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
75 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
76 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
77 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
79 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
80 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
81 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
82 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
85 nfp_flower_xmit_flow(struct net_device
*netdev
,
86 struct nfp_fl_payload
*nfp_flow
, u8 mtype
)
88 u32 meta_len
, key_len
, mask_len
, act_len
, tot_len
;
89 struct nfp_repr
*priv
= netdev_priv(netdev
);
93 meta_len
= sizeof(struct nfp_fl_rule_metadata
);
94 key_len
= nfp_flow
->meta
.key_len
;
95 mask_len
= nfp_flow
->meta
.mask_len
;
96 act_len
= nfp_flow
->meta
.act_len
;
98 tot_len
= meta_len
+ key_len
+ mask_len
+ act_len
;
100 /* Convert to long words as firmware expects
101 * lengths in units of NFP_FL_LW_SIZ.
103 nfp_flow
->meta
.key_len
>>= NFP_FL_LW_SIZ
;
104 nfp_flow
->meta
.mask_len
>>= NFP_FL_LW_SIZ
;
105 nfp_flow
->meta
.act_len
>>= NFP_FL_LW_SIZ
;
107 skb
= nfp_flower_cmsg_alloc(priv
->app
, tot_len
, mtype
, GFP_KERNEL
);
111 msg
= nfp_flower_cmsg_get_data(skb
);
112 memcpy(msg
, &nfp_flow
->meta
, meta_len
);
113 memcpy(&msg
[meta_len
], nfp_flow
->unmasked_data
, key_len
);
114 memcpy(&msg
[meta_len
+ key_len
], nfp_flow
->mask_data
, mask_len
);
115 memcpy(&msg
[meta_len
+ key_len
+ mask_len
],
116 nfp_flow
->action_data
, act_len
);
118 /* Convert back to bytes as software expects
119 * lengths in units of bytes.
121 nfp_flow
->meta
.key_len
<<= NFP_FL_LW_SIZ
;
122 nfp_flow
->meta
.mask_len
<<= NFP_FL_LW_SIZ
;
123 nfp_flow
->meta
.act_len
<<= NFP_FL_LW_SIZ
;
125 nfp_ctrl_tx(priv
->app
->ctrl
, skb
);
130 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload
*f
)
132 return dissector_uses_key(f
->dissector
,
133 FLOW_DISSECTOR_KEY_IPV4_ADDRS
) ||
134 dissector_uses_key(f
->dissector
,
135 FLOW_DISSECTOR_KEY_IPV6_ADDRS
) ||
136 dissector_uses_key(f
->dissector
,
137 FLOW_DISSECTOR_KEY_PORTS
) ||
138 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ICMP
);
142 nfp_flower_calculate_key_layers(struct nfp_app
*app
,
143 struct nfp_fl_key_ls
*ret_key_ls
,
144 struct tc_cls_flower_offload
*flow
,
146 enum nfp_flower_tun_type
*tun_type
)
148 struct flow_dissector_key_basic
*mask_basic
= NULL
;
149 struct flow_dissector_key_basic
*key_basic
= NULL
;
150 struct nfp_flower_priv
*priv
= app
->priv
;
155 if (flow
->dissector
->used_keys
& ~NFP_FLOWER_WHITELIST_DISSECTOR
)
158 /* If any tun dissector is used then the required set must be used. */
159 if (flow
->dissector
->used_keys
& NFP_FLOWER_WHITELIST_TUN_DISSECTOR
&&
160 (flow
->dissector
->used_keys
& NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R
)
161 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R
)
165 key_layer
= NFP_FLOWER_LAYER_PORT
;
166 key_size
= sizeof(struct nfp_flower_meta_tci
) +
167 sizeof(struct nfp_flower_in_port
);
169 if (dissector_uses_key(flow
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
) ||
170 dissector_uses_key(flow
->dissector
, FLOW_DISSECTOR_KEY_MPLS
)) {
171 key_layer
|= NFP_FLOWER_LAYER_MAC
;
172 key_size
+= sizeof(struct nfp_flower_mac_mpls
);
175 if (dissector_uses_key(flow
->dissector
,
176 FLOW_DISSECTOR_KEY_ENC_CONTROL
)) {
177 struct flow_dissector_key_ipv4_addrs
*mask_ipv4
= NULL
;
178 struct flow_dissector_key_ports
*mask_enc_ports
= NULL
;
179 struct flow_dissector_key_ports
*enc_ports
= NULL
;
180 struct flow_dissector_key_control
*mask_enc_ctl
=
181 skb_flow_dissector_target(flow
->dissector
,
182 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
184 struct flow_dissector_key_control
*enc_ctl
=
185 skb_flow_dissector_target(flow
->dissector
,
186 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
191 if (mask_enc_ctl
->addr_type
!= 0xffff ||
192 enc_ctl
->addr_type
!= FLOW_DISSECTOR_KEY_IPV4_ADDRS
)
195 /* These fields are already verified as used. */
197 skb_flow_dissector_target(flow
->dissector
,
198 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
200 if (mask_ipv4
->dst
!= cpu_to_be32(~0))
204 skb_flow_dissector_target(flow
->dissector
,
205 FLOW_DISSECTOR_KEY_ENC_PORTS
,
208 skb_flow_dissector_target(flow
->dissector
,
209 FLOW_DISSECTOR_KEY_ENC_PORTS
,
212 if (mask_enc_ports
->dst
!= cpu_to_be16(~0))
215 switch (enc_ports
->dst
) {
216 case htons(NFP_FL_VXLAN_PORT
):
217 *tun_type
= NFP_FL_TUNNEL_VXLAN
;
218 key_layer
|= NFP_FLOWER_LAYER_VXLAN
;
219 key_size
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
221 case htons(NFP_FL_GENEVE_PORT
):
222 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE
))
224 *tun_type
= NFP_FL_TUNNEL_GENEVE
;
225 key_layer
|= NFP_FLOWER_LAYER_EXT_META
;
226 key_size
+= sizeof(struct nfp_flower_ext_meta
);
227 key_layer_two
|= NFP_FLOWER_LAYER2_GENEVE
;
228 key_size
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
234 /* Reject non tunnel matches offloaded to egress repr. */
238 if (dissector_uses_key(flow
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
239 mask_basic
= skb_flow_dissector_target(flow
->dissector
,
240 FLOW_DISSECTOR_KEY_BASIC
,
243 key_basic
= skb_flow_dissector_target(flow
->dissector
,
244 FLOW_DISSECTOR_KEY_BASIC
,
248 if (mask_basic
&& mask_basic
->n_proto
) {
249 /* Ethernet type is present in the key. */
250 switch (key_basic
->n_proto
) {
251 case cpu_to_be16(ETH_P_IP
):
252 key_layer
|= NFP_FLOWER_LAYER_IPV4
;
253 key_size
+= sizeof(struct nfp_flower_ipv4
);
256 case cpu_to_be16(ETH_P_IPV6
):
257 key_layer
|= NFP_FLOWER_LAYER_IPV6
;
258 key_size
+= sizeof(struct nfp_flower_ipv6
);
261 /* Currently we do not offload ARP
262 * because we rely on it to get to the host.
264 case cpu_to_be16(ETH_P_ARP
):
267 /* Will be included in layer 2. */
268 case cpu_to_be16(ETH_P_8021Q
):
272 /* Other ethtype - we need check the masks for the
273 * remainder of the key to ensure we can offload.
275 if (nfp_flower_check_higher_than_mac(flow
))
281 if (mask_basic
&& mask_basic
->ip_proto
) {
282 /* Ethernet type is present in the key. */
283 switch (key_basic
->ip_proto
) {
289 key_layer
|= NFP_FLOWER_LAYER_TP
;
290 key_size
+= sizeof(struct nfp_flower_tp_ports
);
293 /* Other ip proto - we need check the masks for the
294 * remainder of the key to ensure we can offload.
300 if (dissector_uses_key(flow
->dissector
, FLOW_DISSECTOR_KEY_TCP
)) {
301 struct flow_dissector_key_tcp
*tcp
;
304 tcp
= skb_flow_dissector_target(flow
->dissector
,
305 FLOW_DISSECTOR_KEY_TCP
,
307 tcp_flags
= be16_to_cpu(tcp
->flags
);
309 if (tcp_flags
& ~NFP_FLOWER_SUPPORTED_TCPFLAGS
)
312 /* We only support PSH and URG flags when either
313 * FIN, SYN or RST is present as well.
315 if ((tcp_flags
& (TCPHDR_PSH
| TCPHDR_URG
)) &&
316 !(tcp_flags
& (TCPHDR_FIN
| TCPHDR_SYN
| TCPHDR_RST
)))
319 /* We need to store TCP flags in the IPv4 key space, thus
320 * we need to ensure we include a IPv4 key layer if we have
321 * not done so already.
323 if (!(key_layer
& NFP_FLOWER_LAYER_IPV4
)) {
324 key_layer
|= NFP_FLOWER_LAYER_IPV4
;
325 key_size
+= sizeof(struct nfp_flower_ipv4
);
329 if (dissector_uses_key(flow
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
330 struct flow_dissector_key_control
*key_ctl
;
332 key_ctl
= skb_flow_dissector_target(flow
->dissector
,
333 FLOW_DISSECTOR_KEY_CONTROL
,
336 if (key_ctl
->flags
& ~NFP_FLOWER_SUPPORTED_CTLFLAGS
)
340 ret_key_ls
->key_layer
= key_layer
;
341 ret_key_ls
->key_layer_two
= key_layer_two
;
342 ret_key_ls
->key_size
= key_size
;
347 static struct nfp_fl_payload
*
348 nfp_flower_allocate_new(struct nfp_fl_key_ls
*key_layer
)
350 struct nfp_fl_payload
*flow_pay
;
352 flow_pay
= kmalloc(sizeof(*flow_pay
), GFP_KERNEL
);
356 flow_pay
->meta
.key_len
= key_layer
->key_size
;
357 flow_pay
->unmasked_data
= kmalloc(key_layer
->key_size
, GFP_KERNEL
);
358 if (!flow_pay
->unmasked_data
)
361 flow_pay
->meta
.mask_len
= key_layer
->key_size
;
362 flow_pay
->mask_data
= kmalloc(key_layer
->key_size
, GFP_KERNEL
);
363 if (!flow_pay
->mask_data
)
364 goto err_free_unmasked
;
366 flow_pay
->action_data
= kmalloc(NFP_FL_MAX_A_SIZ
, GFP_KERNEL
);
367 if (!flow_pay
->action_data
)
370 flow_pay
->nfp_tun_ipv4_addr
= 0;
371 flow_pay
->meta
.flags
= 0;
372 spin_lock_init(&flow_pay
->lock
);
377 kfree(flow_pay
->mask_data
);
379 kfree(flow_pay
->unmasked_data
);
386 * nfp_flower_add_offload() - Adds a new flow to hardware.
387 * @app: Pointer to the APP handle
388 * @netdev: netdev structure.
389 * @flow: TC flower classifier offload structure.
390 * @egress: NFP netdev is the egress.
392 * Adds a new flow to the repeated hash structure and action payload.
394 * Return: negative value on error, 0 if configured successfully.
397 nfp_flower_add_offload(struct nfp_app
*app
, struct net_device
*netdev
,
398 struct tc_cls_flower_offload
*flow
, bool egress
)
400 enum nfp_flower_tun_type tun_type
= NFP_FL_TUNNEL_NONE
;
401 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
402 struct nfp_flower_priv
*priv
= app
->priv
;
403 struct nfp_fl_payload
*flow_pay
;
404 struct nfp_fl_key_ls
*key_layer
;
407 key_layer
= kmalloc(sizeof(*key_layer
), GFP_KERNEL
);
411 err
= nfp_flower_calculate_key_layers(app
, key_layer
, flow
, egress
,
414 goto err_free_key_ls
;
416 flow_pay
= nfp_flower_allocate_new(key_layer
);
419 goto err_free_key_ls
;
422 err
= nfp_flower_compile_flow_match(flow
, key_layer
, netdev
, flow_pay
,
425 goto err_destroy_flow
;
427 err
= nfp_flower_compile_action(flow
, netdev
, flow_pay
);
429 goto err_destroy_flow
;
431 err
= nfp_compile_flow_metadata(app
, flow
, flow_pay
);
433 goto err_destroy_flow
;
435 err
= nfp_flower_xmit_flow(netdev
, flow_pay
,
436 NFP_FLOWER_CMSG_TYPE_FLOW_ADD
);
438 goto err_destroy_flow
;
440 INIT_HLIST_NODE(&flow_pay
->link
);
441 flow_pay
->tc_flower_cookie
= flow
->cookie
;
442 hash_add_rcu(priv
->flow_table
, &flow_pay
->link
, flow
->cookie
);
443 port
->tc_offload_cnt
++;
445 /* Deallocate flow payload when flower rule has been destroyed. */
451 kfree(flow_pay
->action_data
);
452 kfree(flow_pay
->mask_data
);
453 kfree(flow_pay
->unmasked_data
);
461 * nfp_flower_del_offload() - Removes a flow from hardware.
462 * @app: Pointer to the APP handle
463 * @netdev: netdev structure.
464 * @flow: TC flower classifier offload structure
466 * Removes a flow from the repeated hash structure and clears the
469 * Return: negative value on error, 0 if removed successfully.
472 nfp_flower_del_offload(struct nfp_app
*app
, struct net_device
*netdev
,
473 struct tc_cls_flower_offload
*flow
)
475 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
476 struct nfp_fl_payload
*nfp_flow
;
479 nfp_flow
= nfp_flower_search_fl_table(app
, flow
->cookie
);
483 err
= nfp_modify_flow_metadata(app
, nfp_flow
);
487 if (nfp_flow
->nfp_tun_ipv4_addr
)
488 nfp_tunnel_del_ipv4_off(app
, nfp_flow
->nfp_tun_ipv4_addr
);
490 err
= nfp_flower_xmit_flow(netdev
, nfp_flow
,
491 NFP_FLOWER_CMSG_TYPE_FLOW_DEL
);
496 hash_del_rcu(&nfp_flow
->link
);
497 port
->tc_offload_cnt
--;
498 kfree(nfp_flow
->action_data
);
499 kfree(nfp_flow
->mask_data
);
500 kfree(nfp_flow
->unmasked_data
);
501 kfree_rcu(nfp_flow
, rcu
);
506 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
507 * @app: Pointer to the APP handle
508 * @flow: TC flower classifier offload structure
510 * Populates a flow statistics structure which which corresponds to a
513 * Return: negative value on error, 0 if stats populated successfully.
516 nfp_flower_get_stats(struct nfp_app
*app
, struct tc_cls_flower_offload
*flow
)
518 struct nfp_fl_payload
*nfp_flow
;
520 nfp_flow
= nfp_flower_search_fl_table(app
, flow
->cookie
);
524 spin_lock_bh(&nfp_flow
->lock
);
525 tcf_exts_stats_update(flow
->exts
, nfp_flow
->stats
.bytes
,
526 nfp_flow
->stats
.pkts
, nfp_flow
->stats
.used
);
528 nfp_flow
->stats
.pkts
= 0;
529 nfp_flow
->stats
.bytes
= 0;
530 spin_unlock_bh(&nfp_flow
->lock
);
536 nfp_flower_repr_offload(struct nfp_app
*app
, struct net_device
*netdev
,
537 struct tc_cls_flower_offload
*flower
, bool egress
)
539 if (!eth_proto_is_802_3(flower
->common
.protocol
))
542 switch (flower
->command
) {
543 case TC_CLSFLOWER_REPLACE
:
544 return nfp_flower_add_offload(app
, netdev
, flower
, egress
);
545 case TC_CLSFLOWER_DESTROY
:
546 return nfp_flower_del_offload(app
, netdev
, flower
);
547 case TC_CLSFLOWER_STATS
:
548 return nfp_flower_get_stats(app
, flower
);
554 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type
, void *type_data
,
557 struct nfp_repr
*repr
= cb_priv
;
559 if (!tc_cls_can_offload_and_chain0(repr
->netdev
, type_data
))
563 case TC_SETUP_CLSFLOWER
:
564 return nfp_flower_repr_offload(repr
->app
, repr
->netdev
,
571 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type
,
572 void *type_data
, void *cb_priv
)
574 struct nfp_repr
*repr
= cb_priv
;
576 if (!tc_cls_can_offload_and_chain0(repr
->netdev
, type_data
))
580 case TC_SETUP_CLSFLOWER
:
581 return nfp_flower_repr_offload(repr
->app
, repr
->netdev
,
588 static int nfp_flower_setup_tc_block(struct net_device
*netdev
,
589 struct tc_block_offload
*f
)
591 struct nfp_repr
*repr
= netdev_priv(netdev
);
593 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
596 switch (f
->command
) {
598 return tcf_block_cb_register(f
->block
,
599 nfp_flower_setup_tc_block_cb
,
601 case TC_BLOCK_UNBIND
:
602 tcf_block_cb_unregister(f
->block
,
603 nfp_flower_setup_tc_block_cb
,
611 int nfp_flower_setup_tc(struct nfp_app
*app
, struct net_device
*netdev
,
612 enum tc_setup_type type
, void *type_data
)
616 return nfp_flower_setup_tc_block(netdev
, type_data
);