2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
52 MLX5E_TC_FLOW_ESWITCH
= BIT(0),
55 struct mlx5e_tc_flow
{
56 struct rhash_head node
;
59 struct mlx5_flow_handle
*rule
;
60 struct list_head encap
; /* flows sharing the same encap */
61 struct mlx5_esw_flow_attr
*attr
;
65 MLX5_HEADER_TYPE_VXLAN
= 0x0,
66 MLX5_HEADER_TYPE_NVGRE
= 0x1,
69 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 static struct mlx5_flow_handle
*
73 mlx5e_tc_add_nic_flow(struct mlx5e_priv
*priv
,
74 struct mlx5_flow_spec
*spec
,
75 u32 action
, u32 flow_tag
)
77 struct mlx5_core_dev
*dev
= priv
->mdev
;
78 struct mlx5_flow_destination dest
= { 0 };
79 struct mlx5_flow_act flow_act
= {
84 struct mlx5_fc
*counter
= NULL
;
85 struct mlx5_flow_handle
*rule
;
86 bool table_created
= false;
88 if (action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
89 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
90 dest
.ft
= priv
->fs
.vlan
.ft
.t
;
91 } else if (action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
92 counter
= mlx5_fc_create(dev
, true);
94 return ERR_CAST(counter
);
96 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
97 dest
.counter
= counter
;
100 if (IS_ERR_OR_NULL(priv
->fs
.tc
.t
)) {
102 mlx5_create_auto_grouped_flow_table(priv
->fs
.ns
,
104 MLX5E_TC_TABLE_NUM_ENTRIES
,
105 MLX5E_TC_TABLE_NUM_GROUPS
,
107 if (IS_ERR(priv
->fs
.tc
.t
)) {
108 netdev_err(priv
->netdev
,
109 "Failed to create tc offload table\n");
110 rule
= ERR_CAST(priv
->fs
.tc
.t
);
114 table_created
= true;
117 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
118 rule
= mlx5_add_flow_rules(priv
->fs
.tc
.t
, spec
, &flow_act
, &dest
, 1);
127 mlx5_destroy_flow_table(priv
->fs
.tc
.t
);
128 priv
->fs
.tc
.t
= NULL
;
131 mlx5_fc_destroy(dev
, counter
);
136 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv
*priv
,
137 struct mlx5e_tc_flow
*flow
)
139 struct mlx5_fc
*counter
= NULL
;
141 if (!IS_ERR(flow
->rule
)) {
142 counter
= mlx5_flow_rule_counter(flow
->rule
);
143 mlx5_del_flow_rules(flow
->rule
);
144 mlx5_fc_destroy(priv
->mdev
, counter
);
147 if (!mlx5e_tc_num_filters(priv
) && (priv
->fs
.tc
.t
)) {
148 mlx5_destroy_flow_table(priv
->fs
.tc
.t
);
149 priv
->fs
.tc
.t
= NULL
;
153 static struct mlx5_flow_handle
*
154 mlx5e_tc_add_fdb_flow(struct mlx5e_priv
*priv
,
155 struct mlx5_flow_spec
*spec
,
156 struct mlx5_esw_flow_attr
*attr
)
158 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
161 err
= mlx5_eswitch_add_vlan_action(esw
, attr
);
165 return mlx5_eswitch_add_offloaded_rule(esw
, spec
, attr
);
168 static void mlx5e_detach_encap(struct mlx5e_priv
*priv
,
169 struct mlx5e_tc_flow
*flow
);
171 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv
*priv
,
172 struct mlx5e_tc_flow
*flow
)
174 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
176 mlx5_eswitch_del_offloaded_rule(esw
, flow
->rule
, flow
->attr
);
178 mlx5_eswitch_del_vlan_action(esw
, flow
->attr
);
180 if (flow
->attr
->action
& MLX5_FLOW_CONTEXT_ACTION_ENCAP
)
181 mlx5e_detach_encap(priv
, flow
);
184 static void mlx5e_detach_encap(struct mlx5e_priv
*priv
,
185 struct mlx5e_tc_flow
*flow
)
187 struct list_head
*next
= flow
->encap
.next
;
189 list_del(&flow
->encap
);
190 if (list_empty(next
)) {
191 struct mlx5_encap_entry
*e
;
193 e
= list_entry(next
, struct mlx5_encap_entry
, flows
);
195 mlx5_encap_dealloc(priv
->mdev
, e
->encap_id
);
198 hlist_del_rcu(&e
->encap_hlist
);
203 /* we get here also when setting rule to the FW failed, etc. It means that the
204 * flow rule itself might not exist, but some offloading related to the actions
207 static void mlx5e_tc_del_flow(struct mlx5e_priv
*priv
,
208 struct mlx5e_tc_flow
*flow
)
210 if (flow
->flags
& MLX5E_TC_FLOW_ESWITCH
)
211 mlx5e_tc_del_fdb_flow(priv
, flow
);
213 mlx5e_tc_del_nic_flow(priv
, flow
);
216 static void parse_vxlan_attr(struct mlx5_flow_spec
*spec
,
217 struct tc_cls_flower_offload
*f
)
219 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
221 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
223 void *misc_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
225 void *misc_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
228 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, ip_protocol
);
229 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
, IPPROTO_UDP
);
231 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
232 struct flow_dissector_key_keyid
*key
=
233 skb_flow_dissector_target(f
->dissector
,
234 FLOW_DISSECTOR_KEY_ENC_KEYID
,
236 struct flow_dissector_key_keyid
*mask
=
237 skb_flow_dissector_target(f
->dissector
,
238 FLOW_DISSECTOR_KEY_ENC_KEYID
,
240 MLX5_SET(fte_match_set_misc
, misc_c
, vxlan_vni
,
241 be32_to_cpu(mask
->keyid
));
242 MLX5_SET(fte_match_set_misc
, misc_v
, vxlan_vni
,
243 be32_to_cpu(key
->keyid
));
247 static int parse_tunnel_attr(struct mlx5e_priv
*priv
,
248 struct mlx5_flow_spec
*spec
,
249 struct tc_cls_flower_offload
*f
)
251 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
253 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
256 struct flow_dissector_key_control
*enc_control
=
257 skb_flow_dissector_target(f
->dissector
,
258 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
261 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) {
262 struct flow_dissector_key_ports
*key
=
263 skb_flow_dissector_target(f
->dissector
,
264 FLOW_DISSECTOR_KEY_ENC_PORTS
,
266 struct flow_dissector_key_ports
*mask
=
267 skb_flow_dissector_target(f
->dissector
,
268 FLOW_DISSECTOR_KEY_ENC_PORTS
,
270 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
271 struct net_device
*up_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
272 struct mlx5e_priv
*up_priv
= netdev_priv(up_dev
);
274 /* Full udp dst port must be given */
275 if (memchr_inv(&mask
->dst
, 0xff, sizeof(mask
->dst
)))
276 goto vxlan_match_offload_err
;
278 if (mlx5e_vxlan_lookup_port(up_priv
, be16_to_cpu(key
->dst
)) &&
279 MLX5_CAP_ESW(priv
->mdev
, vxlan_encap_decap
))
280 parse_vxlan_attr(spec
, f
);
282 netdev_warn(priv
->netdev
,
283 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key
->dst
));
287 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
288 udp_dport
, ntohs(mask
->dst
));
289 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
290 udp_dport
, ntohs(key
->dst
));
292 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
293 udp_sport
, ntohs(mask
->src
));
294 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
295 udp_sport
, ntohs(key
->src
));
296 } else { /* udp dst port must be given */
297 vxlan_match_offload_err
:
298 netdev_warn(priv
->netdev
,
299 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
303 if (enc_control
->addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
304 struct flow_dissector_key_ipv4_addrs
*key
=
305 skb_flow_dissector_target(f
->dissector
,
306 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
308 struct flow_dissector_key_ipv4_addrs
*mask
=
309 skb_flow_dissector_target(f
->dissector
,
310 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
312 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
313 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
,
315 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
316 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
,
319 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
320 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
,
322 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
323 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
,
326 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, ethertype
);
327 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
, ETH_P_IP
);
328 } else if (enc_control
->addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
329 struct flow_dissector_key_ipv6_addrs
*key
=
330 skb_flow_dissector_target(f
->dissector
,
331 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
,
333 struct flow_dissector_key_ipv6_addrs
*mask
=
334 skb_flow_dissector_target(f
->dissector
,
335 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
,
338 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
339 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
340 &mask
->src
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
342 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
343 &key
->src
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
346 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
347 &mask
->dst
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
348 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
349 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
350 &key
->dst
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
352 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, ethertype
);
353 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
, ETH_P_IPV6
);
356 /* Enforce DMAC when offloading incoming tunneled flows.
357 * Flow counters require a match on the DMAC.
359 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, dmac_47_16
);
360 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, dmac_15_0
);
361 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
362 dmac_47_16
), priv
->netdev
->dev_addr
);
364 /* let software handle IP fragments */
365 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, frag
, 1);
366 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, frag
, 0);
371 static int __parse_cls_flower(struct mlx5e_priv
*priv
,
372 struct mlx5_flow_spec
*spec
,
373 struct tc_cls_flower_offload
*f
,
376 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
378 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
383 *min_inline
= MLX5_INLINE_MODE_L2
;
385 if (f
->dissector
->used_keys
&
386 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
387 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
388 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
389 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
390 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
391 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
392 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
393 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
) |
394 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
) |
395 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
) |
396 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS
) |
397 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL
))) {
398 netdev_warn(priv
->netdev
, "Unsupported key used: 0x%x\n",
399 f
->dissector
->used_keys
);
403 if ((dissector_uses_key(f
->dissector
,
404 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
) ||
405 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
) ||
406 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) &&
407 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_CONTROL
)) {
408 struct flow_dissector_key_control
*key
=
409 skb_flow_dissector_target(f
->dissector
,
410 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
412 switch (key
->addr_type
) {
413 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
414 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
415 if (parse_tunnel_attr(priv
, spec
, f
))
422 /* In decap flow, header pointers should point to the inner
423 * headers, outer header were already set by parse_tunnel_attr
425 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
427 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
431 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
432 struct flow_dissector_key_control
*key
=
433 skb_flow_dissector_target(f
->dissector
,
434 FLOW_DISSECTOR_KEY_CONTROL
,
437 struct flow_dissector_key_control
*mask
=
438 skb_flow_dissector_target(f
->dissector
,
439 FLOW_DISSECTOR_KEY_CONTROL
,
441 addr_type
= key
->addr_type
;
443 if (mask
->flags
& FLOW_DIS_IS_FRAGMENT
) {
444 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, frag
, 1);
445 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, frag
,
446 key
->flags
& FLOW_DIS_IS_FRAGMENT
);
448 /* the HW doesn't need L3 inline to match on frag=no */
449 if (key
->flags
& FLOW_DIS_IS_FRAGMENT
)
450 *min_inline
= MLX5_INLINE_MODE_IP
;
454 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
455 struct flow_dissector_key_basic
*key
=
456 skb_flow_dissector_target(f
->dissector
,
457 FLOW_DISSECTOR_KEY_BASIC
,
459 struct flow_dissector_key_basic
*mask
=
460 skb_flow_dissector_target(f
->dissector
,
461 FLOW_DISSECTOR_KEY_BASIC
,
463 ip_proto
= key
->ip_proto
;
465 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ethertype
,
466 ntohs(mask
->n_proto
));
467 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
,
468 ntohs(key
->n_proto
));
470 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ip_protocol
,
472 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
,
476 *min_inline
= MLX5_INLINE_MODE_IP
;
479 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
480 struct flow_dissector_key_eth_addrs
*key
=
481 skb_flow_dissector_target(f
->dissector
,
482 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
484 struct flow_dissector_key_eth_addrs
*mask
=
485 skb_flow_dissector_target(f
->dissector
,
486 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
489 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
492 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
496 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
499 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
504 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
505 struct flow_dissector_key_vlan
*key
=
506 skb_flow_dissector_target(f
->dissector
,
507 FLOW_DISSECTOR_KEY_VLAN
,
509 struct flow_dissector_key_vlan
*mask
=
510 skb_flow_dissector_target(f
->dissector
,
511 FLOW_DISSECTOR_KEY_VLAN
,
513 if (mask
->vlan_id
|| mask
->vlan_priority
) {
514 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, cvlan_tag
, 1);
515 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, cvlan_tag
, 1);
517 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, first_vid
, mask
->vlan_id
);
518 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, first_vid
, key
->vlan_id
);
520 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, first_prio
, mask
->vlan_priority
);
521 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, first_prio
, key
->vlan_priority
);
525 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
526 struct flow_dissector_key_ipv4_addrs
*key
=
527 skb_flow_dissector_target(f
->dissector
,
528 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
530 struct flow_dissector_key_ipv4_addrs
*mask
=
531 skb_flow_dissector_target(f
->dissector
,
532 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
536 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
537 &mask
->src
, sizeof(mask
->src
));
538 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
539 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
540 &key
->src
, sizeof(key
->src
));
541 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
542 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
543 &mask
->dst
, sizeof(mask
->dst
));
544 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
545 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
546 &key
->dst
, sizeof(key
->dst
));
548 if (mask
->src
|| mask
->dst
)
549 *min_inline
= MLX5_INLINE_MODE_IP
;
552 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
553 struct flow_dissector_key_ipv6_addrs
*key
=
554 skb_flow_dissector_target(f
->dissector
,
555 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
557 struct flow_dissector_key_ipv6_addrs
*mask
=
558 skb_flow_dissector_target(f
->dissector
,
559 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
562 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
563 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
564 &mask
->src
, sizeof(mask
->src
));
565 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
566 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
567 &key
->src
, sizeof(key
->src
));
569 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
570 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
571 &mask
->dst
, sizeof(mask
->dst
));
572 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
573 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
574 &key
->dst
, sizeof(key
->dst
));
576 if (ipv6_addr_type(&mask
->src
) != IPV6_ADDR_ANY
||
577 ipv6_addr_type(&mask
->dst
) != IPV6_ADDR_ANY
)
578 *min_inline
= MLX5_INLINE_MODE_IP
;
581 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
582 struct flow_dissector_key_ports
*key
=
583 skb_flow_dissector_target(f
->dissector
,
584 FLOW_DISSECTOR_KEY_PORTS
,
586 struct flow_dissector_key_ports
*mask
=
587 skb_flow_dissector_target(f
->dissector
,
588 FLOW_DISSECTOR_KEY_PORTS
,
592 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
593 tcp_sport
, ntohs(mask
->src
));
594 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
595 tcp_sport
, ntohs(key
->src
));
597 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
598 tcp_dport
, ntohs(mask
->dst
));
599 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
600 tcp_dport
, ntohs(key
->dst
));
604 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
605 udp_sport
, ntohs(mask
->src
));
606 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
607 udp_sport
, ntohs(key
->src
));
609 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
610 udp_dport
, ntohs(mask
->dst
));
611 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
612 udp_dport
, ntohs(key
->dst
));
615 netdev_err(priv
->netdev
,
616 "Only UDP and TCP transport are supported\n");
620 if (mask
->src
|| mask
->dst
)
621 *min_inline
= MLX5_INLINE_MODE_TCP_UDP
;
627 static int parse_cls_flower(struct mlx5e_priv
*priv
,
628 struct mlx5e_tc_flow
*flow
,
629 struct mlx5_flow_spec
*spec
,
630 struct tc_cls_flower_offload
*f
)
632 struct mlx5_core_dev
*dev
= priv
->mdev
;
633 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
634 struct mlx5_eswitch_rep
*rep
= priv
->ppriv
;
638 err
= __parse_cls_flower(priv
, spec
, f
, &min_inline
);
640 if (!err
&& (flow
->flags
& MLX5E_TC_FLOW_ESWITCH
) &&
641 rep
->vport
!= FDB_UPLINK_VPORT
) {
642 if (esw
->offloads
.inline_mode
!= MLX5_INLINE_MODE_NONE
&&
643 esw
->offloads
.inline_mode
< min_inline
) {
644 netdev_warn(priv
->netdev
,
645 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
646 min_inline
, esw
->offloads
.inline_mode
);
654 static int parse_tc_nic_actions(struct mlx5e_priv
*priv
, struct tcf_exts
*exts
,
655 u32
*action
, u32
*flow_tag
)
657 const struct tc_action
*a
;
660 if (tc_no_actions(exts
))
663 *flow_tag
= MLX5_FS_DEFAULT_FLOW_TAG
;
666 tcf_exts_to_list(exts
, &actions
);
667 list_for_each_entry(a
, &actions
, list
) {
668 /* Only support a single action per rule */
672 if (is_tcf_gact_shot(a
)) {
673 *action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
;
674 if (MLX5_CAP_FLOWTABLE(priv
->mdev
,
675 flow_table_properties_nic_receive
.flow_counter
))
676 *action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
680 if (is_tcf_skbedit_mark(a
)) {
681 u32 mark
= tcf_skbedit_mark(a
);
683 if (mark
& ~MLX5E_TC_FLOW_ID_MASK
) {
684 netdev_warn(priv
->netdev
, "Bad flow mark - only 16 bit is supported: 0x%x\n",
690 *action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
700 static inline int cmp_encap_info(struct ip_tunnel_key
*a
,
701 struct ip_tunnel_key
*b
)
703 return memcmp(a
, b
, sizeof(*a
));
706 static inline int hash_encap_info(struct ip_tunnel_key
*key
)
708 return jhash(key
, sizeof(*key
), 0);
711 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv
*priv
,
712 struct net_device
*mirred_dev
,
713 struct net_device
**out_dev
,
715 struct neighbour
**out_n
,
718 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
720 struct neighbour
*n
= NULL
;
722 #if IS_ENABLED(CONFIG_INET)
725 rt
= ip_route_output_key(dev_net(mirred_dev
), fl4
);
726 ret
= PTR_ERR_OR_ZERO(rt
);
732 /* if the egress device isn't on the same HW e-switch, we use the uplink */
733 if (!switchdev_port_same_parent_id(priv
->netdev
, rt
->dst
.dev
))
734 *out_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
736 *out_dev
= rt
->dst
.dev
;
738 *out_ttl
= ip4_dst_hoplimit(&rt
->dst
);
739 n
= dst_neigh_lookup(&rt
->dst
, &fl4
->daddr
);
748 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv
*priv
,
749 struct net_device
*mirred_dev
,
750 struct net_device
**out_dev
,
752 struct neighbour
**out_n
,
755 struct neighbour
*n
= NULL
;
756 struct dst_entry
*dst
;
758 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
759 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
762 dst
= ip6_route_output(dev_net(mirred_dev
), NULL
, fl6
);
769 *out_ttl
= ip6_dst_hoplimit(dst
);
771 /* if the egress device isn't on the same HW e-switch, we use the uplink */
772 if (!switchdev_port_same_parent_id(priv
->netdev
, dst
->dev
))
773 *out_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
780 n
= dst_neigh_lookup(dst
, &fl6
->daddr
);
789 static void gen_vxlan_header_ipv4(struct net_device
*out_dev
,
790 char buf
[], int encap_size
,
791 unsigned char h_dest
[ETH_ALEN
],
798 struct ethhdr
*eth
= (struct ethhdr
*)buf
;
799 struct iphdr
*ip
= (struct iphdr
*)((char *)eth
+ sizeof(struct ethhdr
));
800 struct udphdr
*udp
= (struct udphdr
*)((char *)ip
+ sizeof(struct iphdr
));
801 struct vxlanhdr
*vxh
= (struct vxlanhdr
*)((char *)udp
+ sizeof(struct udphdr
));
803 memset(buf
, 0, encap_size
);
805 ether_addr_copy(eth
->h_dest
, h_dest
);
806 ether_addr_copy(eth
->h_source
, out_dev
->dev_addr
);
807 eth
->h_proto
= htons(ETH_P_IP
);
813 ip
->protocol
= IPPROTO_UDP
;
817 udp
->dest
= udp_dst_port
;
818 vxh
->vx_flags
= VXLAN_HF_VNI
;
819 vxh
->vx_vni
= vxlan_vni_field(vx_vni
);
822 static int gen_vxlan_header_ipv6(struct net_device
*out_dev
,
824 unsigned char h_dest
[ETH_ALEN
],
826 struct in6_addr
*daddr
,
827 struct in6_addr
*saddr
,
831 int encap_size
= VXLAN_HLEN
+ sizeof(struct ipv6hdr
) + ETH_HLEN
;
832 struct ethhdr
*eth
= (struct ethhdr
*)buf
;
833 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)((char *)eth
+ sizeof(struct ethhdr
));
834 struct udphdr
*udp
= (struct udphdr
*)((char *)ip6h
+ sizeof(struct ipv6hdr
));
835 struct vxlanhdr
*vxh
= (struct vxlanhdr
*)((char *)udp
+ sizeof(struct udphdr
));
837 memset(buf
, 0, encap_size
);
839 ether_addr_copy(eth
->h_dest
, h_dest
);
840 ether_addr_copy(eth
->h_source
, out_dev
->dev_addr
);
841 eth
->h_proto
= htons(ETH_P_IPV6
);
843 ip6_flow_hdr(ip6h
, 0, 0);
844 /* the HW fills up ipv6 payload len */
845 ip6h
->nexthdr
= IPPROTO_UDP
;
846 ip6h
->hop_limit
= ttl
;
847 ip6h
->daddr
= *daddr
;
848 ip6h
->saddr
= *saddr
;
850 udp
->dest
= udp_dst_port
;
851 vxh
->vx_flags
= VXLAN_HF_VNI
;
852 vxh
->vx_vni
= vxlan_vni_field(vx_vni
);
857 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv
*priv
,
858 struct net_device
*mirred_dev
,
859 struct mlx5_encap_entry
*e
,
860 struct net_device
**out_dev
)
862 int max_encap_size
= MLX5_CAP_ESW(priv
->mdev
, max_encap_header_size
);
863 int ipv4_encap_size
= ETH_HLEN
+ sizeof(struct iphdr
) + VXLAN_HLEN
;
864 struct ip_tunnel_key
*tun_key
= &e
->tun_info
.key
;
865 struct neighbour
*n
= NULL
;
866 struct flowi4 fl4
= {};
870 if (max_encap_size
< ipv4_encap_size
) {
871 mlx5_core_warn(priv
->mdev
, "encap size %d too big, max supported is %d\n",
872 ipv4_encap_size
, max_encap_size
);
876 encap_header
= kzalloc(ipv4_encap_size
, GFP_KERNEL
);
880 switch (e
->tunnel_type
) {
881 case MLX5_HEADER_TYPE_VXLAN
:
882 fl4
.flowi4_proto
= IPPROTO_UDP
;
883 fl4
.fl4_dport
= tun_key
->tp_dst
;
889 fl4
.flowi4_tos
= tun_key
->tos
;
890 fl4
.daddr
= tun_key
->u
.ipv4
.dst
;
891 fl4
.saddr
= tun_key
->u
.ipv4
.src
;
893 err
= mlx5e_route_lookup_ipv4(priv
, mirred_dev
, out_dev
,
898 if (!(n
->nud_state
& NUD_VALID
)) {
899 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__
, &fl4
.daddr
);
905 e
->out_dev
= *out_dev
;
907 neigh_ha_snapshot(e
->h_dest
, n
, *out_dev
);
909 switch (e
->tunnel_type
) {
910 case MLX5_HEADER_TYPE_VXLAN
:
911 gen_vxlan_header_ipv4(*out_dev
, encap_header
,
912 ipv4_encap_size
, e
->h_dest
, ttl
,
914 fl4
.saddr
, tun_key
->tp_dst
,
915 tunnel_id_to_key32(tun_key
->tun_id
));
922 err
= mlx5_encap_alloc(priv
->mdev
, e
->tunnel_type
,
923 ipv4_encap_size
, encap_header
, &e
->encap_id
);
931 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv
*priv
,
932 struct net_device
*mirred_dev
,
933 struct mlx5_encap_entry
*e
,
934 struct net_device
**out_dev
)
937 int max_encap_size
= MLX5_CAP_ESW(priv
->mdev
, max_encap_header_size
);
938 struct ip_tunnel_key
*tun_key
= &e
->tun_info
.key
;
939 int encap_size
, err
, ttl
= 0;
940 struct neighbour
*n
= NULL
;
941 struct flowi6 fl6
= {};
944 encap_header
= kzalloc(max_encap_size
, GFP_KERNEL
);
948 switch (e
->tunnel_type
) {
949 case MLX5_HEADER_TYPE_VXLAN
:
950 fl6
.flowi6_proto
= IPPROTO_UDP
;
951 fl6
.fl6_dport
= tun_key
->tp_dst
;
958 fl6
.flowlabel
= ip6_make_flowinfo(RT_TOS(tun_key
->tos
), tun_key
->label
);
959 fl6
.daddr
= tun_key
->u
.ipv6
.dst
;
960 fl6
.saddr
= tun_key
->u
.ipv6
.src
;
962 err
= mlx5e_route_lookup_ipv6(priv
, mirred_dev
, out_dev
,
967 if (!(n
->nud_state
& NUD_VALID
)) {
968 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__
, &fl6
.daddr
);
974 e
->out_dev
= *out_dev
;
976 neigh_ha_snapshot(e
->h_dest
, n
, *out_dev
);
978 switch (e
->tunnel_type
) {
979 case MLX5_HEADER_TYPE_VXLAN
:
980 encap_size
= gen_vxlan_header_ipv6(*out_dev
, encap_header
,
983 &fl6
.saddr
, tun_key
->tp_dst
,
984 tunnel_id_to_key32(tun_key
->tun_id
));
991 err
= mlx5_encap_alloc(priv
->mdev
, e
->tunnel_type
,
992 encap_size
, encap_header
, &e
->encap_id
);
1000 static int mlx5e_attach_encap(struct mlx5e_priv
*priv
,
1001 struct ip_tunnel_info
*tun_info
,
1002 struct net_device
*mirred_dev
,
1003 struct mlx5_esw_flow_attr
*attr
)
1005 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1006 struct net_device
*up_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
1007 struct mlx5e_priv
*up_priv
= netdev_priv(up_dev
);
1008 unsigned short family
= ip_tunnel_info_af(tun_info
);
1009 struct ip_tunnel_key
*key
= &tun_info
->key
;
1010 struct mlx5_encap_entry
*e
;
1011 struct net_device
*out_dev
;
1012 int tunnel_type
, err
= -EOPNOTSUPP
;
1016 /* udp dst port must be set */
1017 if (!memchr_inv(&key
->tp_dst
, 0, sizeof(key
->tp_dst
)))
1018 goto vxlan_encap_offload_err
;
1020 /* setting udp src port isn't supported */
1021 if (memchr_inv(&key
->tp_src
, 0, sizeof(key
->tp_src
))) {
1022 vxlan_encap_offload_err
:
1023 netdev_warn(priv
->netdev
,
1024 "must set udp dst port and not set udp src port\n");
1028 if (mlx5e_vxlan_lookup_port(up_priv
, be16_to_cpu(key
->tp_dst
)) &&
1029 MLX5_CAP_ESW(priv
->mdev
, vxlan_encap_decap
)) {
1030 tunnel_type
= MLX5_HEADER_TYPE_VXLAN
;
1032 netdev_warn(priv
->netdev
,
1033 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key
->tp_dst
));
1037 hash_key
= hash_encap_info(key
);
1039 hash_for_each_possible_rcu(esw
->offloads
.encap_tbl
, e
,
1040 encap_hlist
, hash_key
) {
1041 if (!cmp_encap_info(&e
->tun_info
.key
, key
)) {
1052 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
1056 e
->tun_info
= *tun_info
;
1057 e
->tunnel_type
= tunnel_type
;
1058 INIT_LIST_HEAD(&e
->flows
);
1060 if (family
== AF_INET
)
1061 err
= mlx5e_create_encap_header_ipv4(priv
, mirred_dev
, e
, &out_dev
);
1062 else if (family
== AF_INET6
)
1063 err
= mlx5e_create_encap_header_ipv6(priv
, mirred_dev
, e
, &out_dev
);
1069 hash_add_rcu(esw
->offloads
.encap_tbl
, &e
->encap_hlist
, hash_key
);
1078 static int parse_tc_fdb_actions(struct mlx5e_priv
*priv
, struct tcf_exts
*exts
,
1079 struct mlx5e_tc_flow
*flow
)
1081 struct mlx5_esw_flow_attr
*attr
= flow
->attr
;
1082 struct ip_tunnel_info
*info
= NULL
;
1083 const struct tc_action
*a
;
1088 if (tc_no_actions(exts
))
1091 memset(attr
, 0, sizeof(*attr
));
1092 attr
->in_rep
= priv
->ppriv
;
1094 tcf_exts_to_list(exts
, &actions
);
1095 list_for_each_entry(a
, &actions
, list
) {
1096 if (is_tcf_gact_shot(a
)) {
1097 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
|
1098 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1102 if (is_tcf_mirred_egress_redirect(a
)) {
1103 int ifindex
= tcf_mirred_ifindex(a
);
1104 struct net_device
*out_dev
;
1105 struct mlx5e_priv
*out_priv
;
1107 out_dev
= __dev_get_by_index(dev_net(priv
->netdev
), ifindex
);
1109 if (switchdev_port_same_parent_id(priv
->netdev
,
1111 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
1112 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1113 out_priv
= netdev_priv(out_dev
);
1114 attr
->out_rep
= out_priv
->ppriv
;
1116 err
= mlx5e_attach_encap(priv
, info
,
1120 list_add(&flow
->encap
, &attr
->encap
->flows
);
1121 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_ENCAP
|
1122 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
1123 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1124 out_priv
= netdev_priv(attr
->encap
->out_dev
);
1125 attr
->out_rep
= out_priv
->ppriv
;
1127 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1128 priv
->netdev
->name
, out_dev
->name
);
1134 if (is_tcf_tunnel_set(a
)) {
1135 info
= tcf_tunnel_info(a
);
1143 if (is_tcf_vlan(a
)) {
1144 if (tcf_vlan_action(a
) == TCA_VLAN_ACT_POP
) {
1145 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
;
1146 } else if (tcf_vlan_action(a
) == TCA_VLAN_ACT_PUSH
) {
1147 if (tcf_vlan_push_proto(a
) != htons(ETH_P_8021Q
))
1150 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
;
1151 attr
->vlan
= tcf_vlan_push_vid(a
);
1152 } else { /* action is TCA_VLAN_ACT_MODIFY */
1158 if (is_tcf_tunnel_release(a
)) {
1159 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_DECAP
;
1168 int mlx5e_configure_flower(struct mlx5e_priv
*priv
, __be16 protocol
,
1169 struct tc_cls_flower_offload
*f
)
1171 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1172 int err
, attr_size
= 0;
1173 u32 flow_tag
, action
;
1174 struct mlx5e_tc_flow
*flow
;
1175 struct mlx5_flow_spec
*spec
;
1176 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1179 if (esw
&& esw
->mode
== SRIOV_OFFLOADS
) {
1180 flow_flags
= MLX5E_TC_FLOW_ESWITCH
;
1181 attr_size
= sizeof(struct mlx5_esw_flow_attr
);
1184 flow
= kzalloc(sizeof(*flow
) + attr_size
, GFP_KERNEL
);
1185 spec
= mlx5_vzalloc(sizeof(*spec
));
1186 if (!spec
|| !flow
) {
1191 flow
->cookie
= f
->cookie
;
1192 flow
->flags
= flow_flags
;
1194 err
= parse_cls_flower(priv
, flow
, spec
, f
);
1198 if (flow
->flags
& MLX5E_TC_FLOW_ESWITCH
) {
1199 flow
->attr
= (struct mlx5_esw_flow_attr
*)(flow
+ 1);
1200 err
= parse_tc_fdb_actions(priv
, f
->exts
, flow
);
1203 flow
->rule
= mlx5e_tc_add_fdb_flow(priv
, spec
, flow
->attr
);
1205 err
= parse_tc_nic_actions(priv
, f
->exts
, &action
, &flow_tag
);
1208 flow
->rule
= mlx5e_tc_add_nic_flow(priv
, spec
, action
, flow_tag
);
1211 if (IS_ERR(flow
->rule
)) {
1212 err
= PTR_ERR(flow
->rule
);
1216 err
= rhashtable_insert_fast(&tc
->ht
, &flow
->node
,
1224 mlx5e_tc_del_flow(priv
, flow
);
1233 int mlx5e_delete_flower(struct mlx5e_priv
*priv
,
1234 struct tc_cls_flower_offload
*f
)
1236 struct mlx5e_tc_flow
*flow
;
1237 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1239 flow
= rhashtable_lookup_fast(&tc
->ht
, &f
->cookie
,
1244 rhashtable_remove_fast(&tc
->ht
, &flow
->node
, tc
->ht_params
);
1246 mlx5e_tc_del_flow(priv
, flow
);
1254 int mlx5e_stats_flower(struct mlx5e_priv
*priv
,
1255 struct tc_cls_flower_offload
*f
)
1257 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1258 struct mlx5e_tc_flow
*flow
;
1259 struct tc_action
*a
;
1260 struct mlx5_fc
*counter
;
1266 flow
= rhashtable_lookup_fast(&tc
->ht
, &f
->cookie
,
1271 counter
= mlx5_flow_rule_counter(flow
->rule
);
1275 mlx5_fc_query_cached(counter
, &bytes
, &packets
, &lastuse
);
1279 tcf_exts_to_list(f
->exts
, &actions
);
1280 list_for_each_entry(a
, &actions
, list
)
1281 tcf_action_stats_update(a
, bytes
, packets
, lastuse
);
1288 static const struct rhashtable_params mlx5e_tc_flow_ht_params
= {
1289 .head_offset
= offsetof(struct mlx5e_tc_flow
, node
),
1290 .key_offset
= offsetof(struct mlx5e_tc_flow
, cookie
),
1291 .key_len
= sizeof(((struct mlx5e_tc_flow
*)0)->cookie
),
1292 .automatic_shrinking
= true,
1295 int mlx5e_tc_init(struct mlx5e_priv
*priv
)
1297 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1299 tc
->ht_params
= mlx5e_tc_flow_ht_params
;
1300 return rhashtable_init(&tc
->ht
, &tc
->ht_params
);
1303 static void _mlx5e_tc_del_flow(void *ptr
, void *arg
)
1305 struct mlx5e_tc_flow
*flow
= ptr
;
1306 struct mlx5e_priv
*priv
= arg
;
1308 mlx5e_tc_del_flow(priv
, flow
);
1312 void mlx5e_tc_cleanup(struct mlx5e_priv
*priv
)
1314 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1316 rhashtable_free_and_destroy(&tc
->ht
, _mlx5e_tc_del_flow
, priv
);
1318 if (!IS_ERR_OR_NULL(tc
->t
)) {
1319 mlx5_destroy_flow_table(tc
->t
);