2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
52 MLX5E_TC_FLOW_ESWITCH
= BIT(0),
55 struct mlx5e_tc_flow
{
56 struct rhash_head node
;
59 struct mlx5_flow_handle
*rule
;
60 struct list_head encap
; /* flows sharing the same encap */
61 struct mlx5_esw_flow_attr
*attr
;
65 MLX5_HEADER_TYPE_VXLAN
= 0x0,
66 MLX5_HEADER_TYPE_NVGRE
= 0x1,
69 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 static struct mlx5_flow_handle
*
73 mlx5e_tc_add_nic_flow(struct mlx5e_priv
*priv
,
74 struct mlx5_flow_spec
*spec
,
75 u32 action
, u32 flow_tag
)
77 struct mlx5_core_dev
*dev
= priv
->mdev
;
78 struct mlx5_flow_destination dest
= { 0 };
79 struct mlx5_flow_act flow_act
= {
84 struct mlx5_fc
*counter
= NULL
;
85 struct mlx5_flow_handle
*rule
;
86 bool table_created
= false;
88 if (action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
89 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
90 dest
.ft
= priv
->fs
.vlan
.ft
.t
;
91 } else if (action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
92 counter
= mlx5_fc_create(dev
, true);
94 return ERR_CAST(counter
);
96 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
97 dest
.counter
= counter
;
100 if (IS_ERR_OR_NULL(priv
->fs
.tc
.t
)) {
102 mlx5_create_auto_grouped_flow_table(priv
->fs
.ns
,
104 MLX5E_TC_TABLE_NUM_ENTRIES
,
105 MLX5E_TC_TABLE_NUM_GROUPS
,
107 if (IS_ERR(priv
->fs
.tc
.t
)) {
108 netdev_err(priv
->netdev
,
109 "Failed to create tc offload table\n");
110 rule
= ERR_CAST(priv
->fs
.tc
.t
);
114 table_created
= true;
117 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
118 rule
= mlx5_add_flow_rules(priv
->fs
.tc
.t
, spec
, &flow_act
, &dest
, 1);
127 mlx5_destroy_flow_table(priv
->fs
.tc
.t
);
128 priv
->fs
.tc
.t
= NULL
;
131 mlx5_fc_destroy(dev
, counter
);
136 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv
*priv
,
137 struct mlx5e_tc_flow
*flow
)
139 struct mlx5_fc
*counter
= NULL
;
141 if (!IS_ERR(flow
->rule
)) {
142 counter
= mlx5_flow_rule_counter(flow
->rule
);
143 mlx5_del_flow_rules(flow
->rule
);
144 mlx5_fc_destroy(priv
->mdev
, counter
);
147 if (!mlx5e_tc_num_filters(priv
) && (priv
->fs
.tc
.t
)) {
148 mlx5_destroy_flow_table(priv
->fs
.tc
.t
);
149 priv
->fs
.tc
.t
= NULL
;
153 static struct mlx5_flow_handle
*
154 mlx5e_tc_add_fdb_flow(struct mlx5e_priv
*priv
,
155 struct mlx5_flow_spec
*spec
,
156 struct mlx5_esw_flow_attr
*attr
)
158 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
161 err
= mlx5_eswitch_add_vlan_action(esw
, attr
);
165 return mlx5_eswitch_add_offloaded_rule(esw
, spec
, attr
);
168 static void mlx5e_detach_encap(struct mlx5e_priv
*priv
,
169 struct mlx5e_tc_flow
*flow
);
171 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv
*priv
,
172 struct mlx5e_tc_flow
*flow
)
174 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
176 mlx5_eswitch_del_offloaded_rule(esw
, flow
->rule
, flow
->attr
);
178 mlx5_eswitch_del_vlan_action(esw
, flow
->attr
);
180 if (flow
->attr
->action
& MLX5_FLOW_CONTEXT_ACTION_ENCAP
)
181 mlx5e_detach_encap(priv
, flow
);
184 static void mlx5e_detach_encap(struct mlx5e_priv
*priv
,
185 struct mlx5e_tc_flow
*flow
)
187 struct list_head
*next
= flow
->encap
.next
;
189 list_del(&flow
->encap
);
190 if (list_empty(next
)) {
191 struct mlx5_encap_entry
*e
;
193 e
= list_entry(next
, struct mlx5_encap_entry
, flows
);
195 mlx5_encap_dealloc(priv
->mdev
, e
->encap_id
);
198 hlist_del_rcu(&e
->encap_hlist
);
203 /* we get here also when setting rule to the FW failed, etc. It means that the
204 * flow rule itself might not exist, but some offloading related to the actions
207 static void mlx5e_tc_del_flow(struct mlx5e_priv
*priv
,
208 struct mlx5e_tc_flow
*flow
)
210 if (flow
->flags
& MLX5E_TC_FLOW_ESWITCH
)
211 mlx5e_tc_del_fdb_flow(priv
, flow
);
213 mlx5e_tc_del_nic_flow(priv
, flow
);
216 static void parse_vxlan_attr(struct mlx5_flow_spec
*spec
,
217 struct tc_cls_flower_offload
*f
)
219 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
221 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
223 void *misc_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
225 void *misc_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
228 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, ip_protocol
);
229 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
, IPPROTO_UDP
);
231 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
232 struct flow_dissector_key_keyid
*key
=
233 skb_flow_dissector_target(f
->dissector
,
234 FLOW_DISSECTOR_KEY_ENC_KEYID
,
236 struct flow_dissector_key_keyid
*mask
=
237 skb_flow_dissector_target(f
->dissector
,
238 FLOW_DISSECTOR_KEY_ENC_KEYID
,
240 MLX5_SET(fte_match_set_misc
, misc_c
, vxlan_vni
,
241 be32_to_cpu(mask
->keyid
));
242 MLX5_SET(fte_match_set_misc
, misc_v
, vxlan_vni
,
243 be32_to_cpu(key
->keyid
));
247 static int parse_tunnel_attr(struct mlx5e_priv
*priv
,
248 struct mlx5_flow_spec
*spec
,
249 struct tc_cls_flower_offload
*f
)
251 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
253 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
256 struct flow_dissector_key_control
*enc_control
=
257 skb_flow_dissector_target(f
->dissector
,
258 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
261 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) {
262 struct flow_dissector_key_ports
*key
=
263 skb_flow_dissector_target(f
->dissector
,
264 FLOW_DISSECTOR_KEY_ENC_PORTS
,
266 struct flow_dissector_key_ports
*mask
=
267 skb_flow_dissector_target(f
->dissector
,
268 FLOW_DISSECTOR_KEY_ENC_PORTS
,
270 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
271 struct net_device
*up_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
272 struct mlx5e_priv
*up_priv
= netdev_priv(up_dev
);
274 /* Full udp dst port must be given */
275 if (memchr_inv(&mask
->dst
, 0xff, sizeof(mask
->dst
)))
276 goto vxlan_match_offload_err
;
278 if (mlx5e_vxlan_lookup_port(up_priv
, be16_to_cpu(key
->dst
)) &&
279 MLX5_CAP_ESW(priv
->mdev
, vxlan_encap_decap
))
280 parse_vxlan_attr(spec
, f
);
282 netdev_warn(priv
->netdev
,
283 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key
->dst
));
287 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
288 udp_dport
, ntohs(mask
->dst
));
289 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
290 udp_dport
, ntohs(key
->dst
));
292 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
293 udp_sport
, ntohs(mask
->src
));
294 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
295 udp_sport
, ntohs(key
->src
));
296 } else { /* udp dst port must be given */
297 vxlan_match_offload_err
:
298 netdev_warn(priv
->netdev
,
299 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
303 if (enc_control
->addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
304 struct flow_dissector_key_ipv4_addrs
*key
=
305 skb_flow_dissector_target(f
->dissector
,
306 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
308 struct flow_dissector_key_ipv4_addrs
*mask
=
309 skb_flow_dissector_target(f
->dissector
,
310 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
312 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
313 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
,
315 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
316 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
,
319 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
320 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
,
322 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
323 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
,
326 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, ethertype
);
327 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
, ETH_P_IP
);
328 } else if (enc_control
->addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
329 struct flow_dissector_key_ipv6_addrs
*key
=
330 skb_flow_dissector_target(f
->dissector
,
331 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
,
333 struct flow_dissector_key_ipv6_addrs
*mask
=
334 skb_flow_dissector_target(f
->dissector
,
335 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
,
338 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
339 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
340 &mask
->src
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
342 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
343 &key
->src
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
346 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
347 &mask
->dst
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
348 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
349 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
350 &key
->dst
, MLX5_FLD_SZ_BYTES(ipv6_layout
, ipv6
));
352 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, ethertype
);
353 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
, ETH_P_IPV6
);
356 /* Enforce DMAC when offloading incoming tunneled flows.
357 * Flow counters require a match on the DMAC.
359 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, dmac_47_16
);
360 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4
, headers_c
, dmac_15_0
);
361 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
362 dmac_47_16
), priv
->netdev
->dev_addr
);
364 /* let software handle IP fragments */
365 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, frag
, 1);
366 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, frag
, 0);
371 static int __parse_cls_flower(struct mlx5e_priv
*priv
,
372 struct mlx5_flow_spec
*spec
,
373 struct tc_cls_flower_offload
*f
,
376 void *headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
378 void *headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
383 *min_inline
= MLX5_INLINE_MODE_L2
;
385 if (f
->dissector
->used_keys
&
386 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
387 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
388 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
389 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
390 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
391 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
392 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
393 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
) |
394 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
) |
395 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
) |
396 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS
) |
397 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL
))) {
398 netdev_warn(priv
->netdev
, "Unsupported key used: 0x%x\n",
399 f
->dissector
->used_keys
);
403 if ((dissector_uses_key(f
->dissector
,
404 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
) ||
405 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
) ||
406 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) &&
407 dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_CONTROL
)) {
408 struct flow_dissector_key_control
*key
=
409 skb_flow_dissector_target(f
->dissector
,
410 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
412 switch (key
->addr_type
) {
413 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
414 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
415 if (parse_tunnel_attr(priv
, spec
, f
))
422 /* In decap flow, header pointers should point to the inner
423 * headers, outer header were already set by parse_tunnel_attr
425 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
427 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
431 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
432 struct flow_dissector_key_control
*key
=
433 skb_flow_dissector_target(f
->dissector
,
434 FLOW_DISSECTOR_KEY_CONTROL
,
437 struct flow_dissector_key_control
*mask
=
438 skb_flow_dissector_target(f
->dissector
,
439 FLOW_DISSECTOR_KEY_CONTROL
,
441 addr_type
= key
->addr_type
;
443 if (mask
->flags
& FLOW_DIS_IS_FRAGMENT
) {
444 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, frag
, 1);
445 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, frag
,
446 key
->flags
& FLOW_DIS_IS_FRAGMENT
);
448 /* the HW doesn't need L3 inline to match on frag=no */
449 if (key
->flags
& FLOW_DIS_IS_FRAGMENT
)
450 *min_inline
= MLX5_INLINE_MODE_IP
;
454 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
455 struct flow_dissector_key_basic
*key
=
456 skb_flow_dissector_target(f
->dissector
,
457 FLOW_DISSECTOR_KEY_BASIC
,
459 struct flow_dissector_key_basic
*mask
=
460 skb_flow_dissector_target(f
->dissector
,
461 FLOW_DISSECTOR_KEY_BASIC
,
463 ip_proto
= key
->ip_proto
;
465 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ethertype
,
466 ntohs(mask
->n_proto
));
467 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ethertype
,
468 ntohs(key
->n_proto
));
470 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ip_protocol
,
472 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
,
476 *min_inline
= MLX5_INLINE_MODE_IP
;
479 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
480 struct flow_dissector_key_eth_addrs
*key
=
481 skb_flow_dissector_target(f
->dissector
,
482 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
484 struct flow_dissector_key_eth_addrs
*mask
=
485 skb_flow_dissector_target(f
->dissector
,
486 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
489 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
492 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
496 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
499 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
504 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
505 struct flow_dissector_key_vlan
*key
=
506 skb_flow_dissector_target(f
->dissector
,
507 FLOW_DISSECTOR_KEY_VLAN
,
509 struct flow_dissector_key_vlan
*mask
=
510 skb_flow_dissector_target(f
->dissector
,
511 FLOW_DISSECTOR_KEY_VLAN
,
513 if (mask
->vlan_id
|| mask
->vlan_priority
) {
514 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, cvlan_tag
, 1);
515 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, cvlan_tag
, 1);
517 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, first_vid
, mask
->vlan_id
);
518 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, first_vid
, key
->vlan_id
);
520 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, first_prio
, mask
->vlan_priority
);
521 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, first_prio
, key
->vlan_priority
);
525 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
526 struct flow_dissector_key_ipv4_addrs
*key
=
527 skb_flow_dissector_target(f
->dissector
,
528 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
530 struct flow_dissector_key_ipv4_addrs
*mask
=
531 skb_flow_dissector_target(f
->dissector
,
532 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
536 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
537 &mask
->src
, sizeof(mask
->src
));
538 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
539 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
540 &key
->src
, sizeof(key
->src
));
541 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
542 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
543 &mask
->dst
, sizeof(mask
->dst
));
544 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
545 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
546 &key
->dst
, sizeof(key
->dst
));
548 if (mask
->src
|| mask
->dst
)
549 *min_inline
= MLX5_INLINE_MODE_IP
;
552 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
553 struct flow_dissector_key_ipv6_addrs
*key
=
554 skb_flow_dissector_target(f
->dissector
,
555 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
557 struct flow_dissector_key_ipv6_addrs
*mask
=
558 skb_flow_dissector_target(f
->dissector
,
559 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
562 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
563 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
564 &mask
->src
, sizeof(mask
->src
));
565 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
566 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
567 &key
->src
, sizeof(key
->src
));
569 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
570 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
571 &mask
->dst
, sizeof(mask
->dst
));
572 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
573 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
574 &key
->dst
, sizeof(key
->dst
));
576 if (ipv6_addr_type(&mask
->src
) != IPV6_ADDR_ANY
||
577 ipv6_addr_type(&mask
->dst
) != IPV6_ADDR_ANY
)
578 *min_inline
= MLX5_INLINE_MODE_IP
;
581 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
582 struct flow_dissector_key_ports
*key
=
583 skb_flow_dissector_target(f
->dissector
,
584 FLOW_DISSECTOR_KEY_PORTS
,
586 struct flow_dissector_key_ports
*mask
=
587 skb_flow_dissector_target(f
->dissector
,
588 FLOW_DISSECTOR_KEY_PORTS
,
592 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
593 tcp_sport
, ntohs(mask
->src
));
594 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
595 tcp_sport
, ntohs(key
->src
));
597 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
598 tcp_dport
, ntohs(mask
->dst
));
599 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
600 tcp_dport
, ntohs(key
->dst
));
604 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
605 udp_sport
, ntohs(mask
->src
));
606 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
607 udp_sport
, ntohs(key
->src
));
609 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
610 udp_dport
, ntohs(mask
->dst
));
611 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
612 udp_dport
, ntohs(key
->dst
));
615 netdev_err(priv
->netdev
,
616 "Only UDP and TCP transport are supported\n");
620 if (mask
->src
|| mask
->dst
)
621 *min_inline
= MLX5_INLINE_MODE_TCP_UDP
;
627 static int parse_cls_flower(struct mlx5e_priv
*priv
,
628 struct mlx5e_tc_flow
*flow
,
629 struct mlx5_flow_spec
*spec
,
630 struct tc_cls_flower_offload
*f
)
632 struct mlx5_core_dev
*dev
= priv
->mdev
;
633 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
634 struct mlx5_eswitch_rep
*rep
= priv
->ppriv
;
638 err
= __parse_cls_flower(priv
, spec
, f
, &min_inline
);
640 if (!err
&& (flow
->flags
& MLX5E_TC_FLOW_ESWITCH
) &&
641 rep
->vport
!= FDB_UPLINK_VPORT
) {
642 if (min_inline
> esw
->offloads
.inline_mode
) {
643 netdev_warn(priv
->netdev
,
644 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
645 min_inline
, esw
->offloads
.inline_mode
);
653 static int parse_tc_nic_actions(struct mlx5e_priv
*priv
, struct tcf_exts
*exts
,
654 u32
*action
, u32
*flow_tag
)
656 const struct tc_action
*a
;
659 if (tc_no_actions(exts
))
662 *flow_tag
= MLX5_FS_DEFAULT_FLOW_TAG
;
665 tcf_exts_to_list(exts
, &actions
);
666 list_for_each_entry(a
, &actions
, list
) {
667 /* Only support a single action per rule */
671 if (is_tcf_gact_shot(a
)) {
672 *action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
;
673 if (MLX5_CAP_FLOWTABLE(priv
->mdev
,
674 flow_table_properties_nic_receive
.flow_counter
))
675 *action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
679 if (is_tcf_skbedit_mark(a
)) {
680 u32 mark
= tcf_skbedit_mark(a
);
682 if (mark
& ~MLX5E_TC_FLOW_ID_MASK
) {
683 netdev_warn(priv
->netdev
, "Bad flow mark - only 16 bit is supported: 0x%x\n",
689 *action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
699 static inline int cmp_encap_info(struct ip_tunnel_key
*a
,
700 struct ip_tunnel_key
*b
)
702 return memcmp(a
, b
, sizeof(*a
));
705 static inline int hash_encap_info(struct ip_tunnel_key
*key
)
707 return jhash(key
, sizeof(*key
), 0);
710 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv
*priv
,
711 struct net_device
*mirred_dev
,
712 struct net_device
**out_dev
,
714 struct neighbour
**out_n
,
717 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
719 struct neighbour
*n
= NULL
;
721 #if IS_ENABLED(CONFIG_INET)
724 rt
= ip_route_output_key(dev_net(mirred_dev
), fl4
);
725 ret
= PTR_ERR_OR_ZERO(rt
);
731 /* if the egress device isn't on the same HW e-switch, we use the uplink */
732 if (!switchdev_port_same_parent_id(priv
->netdev
, rt
->dst
.dev
))
733 *out_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
735 *out_dev
= rt
->dst
.dev
;
737 *out_ttl
= ip4_dst_hoplimit(&rt
->dst
);
738 n
= dst_neigh_lookup(&rt
->dst
, &fl4
->daddr
);
747 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv
*priv
,
748 struct net_device
*mirred_dev
,
749 struct net_device
**out_dev
,
751 struct neighbour
**out_n
,
754 struct neighbour
*n
= NULL
;
755 struct dst_entry
*dst
;
757 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
758 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
761 dst
= ip6_route_output(dev_net(mirred_dev
), NULL
, fl6
);
768 *out_ttl
= ip6_dst_hoplimit(dst
);
770 /* if the egress device isn't on the same HW e-switch, we use the uplink */
771 if (!switchdev_port_same_parent_id(priv
->netdev
, dst
->dev
))
772 *out_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
779 n
= dst_neigh_lookup(dst
, &fl6
->daddr
);
788 static int gen_vxlan_header_ipv4(struct net_device
*out_dev
,
790 unsigned char h_dest
[ETH_ALEN
],
797 int encap_size
= VXLAN_HLEN
+ sizeof(struct iphdr
) + ETH_HLEN
;
798 struct ethhdr
*eth
= (struct ethhdr
*)buf
;
799 struct iphdr
*ip
= (struct iphdr
*)((char *)eth
+ sizeof(struct ethhdr
));
800 struct udphdr
*udp
= (struct udphdr
*)((char *)ip
+ sizeof(struct iphdr
));
801 struct vxlanhdr
*vxh
= (struct vxlanhdr
*)((char *)udp
+ sizeof(struct udphdr
));
803 memset(buf
, 0, encap_size
);
805 ether_addr_copy(eth
->h_dest
, h_dest
);
806 ether_addr_copy(eth
->h_source
, out_dev
->dev_addr
);
807 eth
->h_proto
= htons(ETH_P_IP
);
813 ip
->protocol
= IPPROTO_UDP
;
817 udp
->dest
= udp_dst_port
;
818 vxh
->vx_flags
= VXLAN_HF_VNI
;
819 vxh
->vx_vni
= vxlan_vni_field(vx_vni
);
824 static int gen_vxlan_header_ipv6(struct net_device
*out_dev
,
826 unsigned char h_dest
[ETH_ALEN
],
828 struct in6_addr
*daddr
,
829 struct in6_addr
*saddr
,
833 int encap_size
= VXLAN_HLEN
+ sizeof(struct ipv6hdr
) + ETH_HLEN
;
834 struct ethhdr
*eth
= (struct ethhdr
*)buf
;
835 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)((char *)eth
+ sizeof(struct ethhdr
));
836 struct udphdr
*udp
= (struct udphdr
*)((char *)ip6h
+ sizeof(struct ipv6hdr
));
837 struct vxlanhdr
*vxh
= (struct vxlanhdr
*)((char *)udp
+ sizeof(struct udphdr
));
839 memset(buf
, 0, encap_size
);
841 ether_addr_copy(eth
->h_dest
, h_dest
);
842 ether_addr_copy(eth
->h_source
, out_dev
->dev_addr
);
843 eth
->h_proto
= htons(ETH_P_IPV6
);
845 ip6_flow_hdr(ip6h
, 0, 0);
846 /* the HW fills up ipv6 payload len */
847 ip6h
->nexthdr
= IPPROTO_UDP
;
848 ip6h
->hop_limit
= ttl
;
849 ip6h
->daddr
= *daddr
;
850 ip6h
->saddr
= *saddr
;
852 udp
->dest
= udp_dst_port
;
853 vxh
->vx_flags
= VXLAN_HF_VNI
;
854 vxh
->vx_vni
= vxlan_vni_field(vx_vni
);
859 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv
*priv
,
860 struct net_device
*mirred_dev
,
861 struct mlx5_encap_entry
*e
,
862 struct net_device
**out_dev
)
864 int max_encap_size
= MLX5_CAP_ESW(priv
->mdev
, max_encap_header_size
);
865 struct ip_tunnel_key
*tun_key
= &e
->tun_info
.key
;
866 int encap_size
, ttl
, err
;
867 struct neighbour
*n
= NULL
;
868 struct flowi4 fl4
= {};
871 encap_header
= kzalloc(max_encap_size
, GFP_KERNEL
);
875 switch (e
->tunnel_type
) {
876 case MLX5_HEADER_TYPE_VXLAN
:
877 fl4
.flowi4_proto
= IPPROTO_UDP
;
878 fl4
.fl4_dport
= tun_key
->tp_dst
;
884 fl4
.flowi4_tos
= tun_key
->tos
;
885 fl4
.daddr
= tun_key
->u
.ipv4
.dst
;
886 fl4
.saddr
= tun_key
->u
.ipv4
.src
;
888 err
= mlx5e_route_lookup_ipv4(priv
, mirred_dev
, out_dev
,
893 if (!(n
->nud_state
& NUD_VALID
)) {
894 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__
, &fl4
.daddr
);
900 e
->out_dev
= *out_dev
;
902 neigh_ha_snapshot(e
->h_dest
, n
, *out_dev
);
904 switch (e
->tunnel_type
) {
905 case MLX5_HEADER_TYPE_VXLAN
:
906 encap_size
= gen_vxlan_header_ipv4(*out_dev
, encap_header
,
909 fl4
.saddr
, tun_key
->tp_dst
,
910 tunnel_id_to_key32(tun_key
->tun_id
));
917 err
= mlx5_encap_alloc(priv
->mdev
, e
->tunnel_type
,
918 encap_size
, encap_header
, &e
->encap_id
);
926 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv
*priv
,
927 struct net_device
*mirred_dev
,
928 struct mlx5_encap_entry
*e
,
929 struct net_device
**out_dev
)
932 int max_encap_size
= MLX5_CAP_ESW(priv
->mdev
, max_encap_header_size
);
933 struct ip_tunnel_key
*tun_key
= &e
->tun_info
.key
;
934 int encap_size
, err
, ttl
= 0;
935 struct neighbour
*n
= NULL
;
936 struct flowi6 fl6
= {};
939 encap_header
= kzalloc(max_encap_size
, GFP_KERNEL
);
943 switch (e
->tunnel_type
) {
944 case MLX5_HEADER_TYPE_VXLAN
:
945 fl6
.flowi6_proto
= IPPROTO_UDP
;
946 fl6
.fl6_dport
= tun_key
->tp_dst
;
953 fl6
.flowlabel
= ip6_make_flowinfo(RT_TOS(tun_key
->tos
), tun_key
->label
);
954 fl6
.daddr
= tun_key
->u
.ipv6
.dst
;
955 fl6
.saddr
= tun_key
->u
.ipv6
.src
;
957 err
= mlx5e_route_lookup_ipv6(priv
, mirred_dev
, out_dev
,
962 if (!(n
->nud_state
& NUD_VALID
)) {
963 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__
, &fl6
.daddr
);
969 e
->out_dev
= *out_dev
;
971 neigh_ha_snapshot(e
->h_dest
, n
, *out_dev
);
973 switch (e
->tunnel_type
) {
974 case MLX5_HEADER_TYPE_VXLAN
:
975 encap_size
= gen_vxlan_header_ipv6(*out_dev
, encap_header
,
978 &fl6
.saddr
, tun_key
->tp_dst
,
979 tunnel_id_to_key32(tun_key
->tun_id
));
986 err
= mlx5_encap_alloc(priv
->mdev
, e
->tunnel_type
,
987 encap_size
, encap_header
, &e
->encap_id
);
995 static int mlx5e_attach_encap(struct mlx5e_priv
*priv
,
996 struct ip_tunnel_info
*tun_info
,
997 struct net_device
*mirred_dev
,
998 struct mlx5_esw_flow_attr
*attr
)
1000 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1001 struct net_device
*up_dev
= mlx5_eswitch_get_uplink_netdev(esw
);
1002 struct mlx5e_priv
*up_priv
= netdev_priv(up_dev
);
1003 unsigned short family
= ip_tunnel_info_af(tun_info
);
1004 struct ip_tunnel_key
*key
= &tun_info
->key
;
1005 struct mlx5_encap_entry
*e
;
1006 struct net_device
*out_dev
;
1007 int tunnel_type
, err
= -EOPNOTSUPP
;
1011 /* udp dst port must be set */
1012 if (!memchr_inv(&key
->tp_dst
, 0, sizeof(key
->tp_dst
)))
1013 goto vxlan_encap_offload_err
;
1015 /* setting udp src port isn't supported */
1016 if (memchr_inv(&key
->tp_src
, 0, sizeof(key
->tp_src
))) {
1017 vxlan_encap_offload_err
:
1018 netdev_warn(priv
->netdev
,
1019 "must set udp dst port and not set udp src port\n");
1023 if (mlx5e_vxlan_lookup_port(up_priv
, be16_to_cpu(key
->tp_dst
)) &&
1024 MLX5_CAP_ESW(priv
->mdev
, vxlan_encap_decap
)) {
1025 tunnel_type
= MLX5_HEADER_TYPE_VXLAN
;
1027 netdev_warn(priv
->netdev
,
1028 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key
->tp_dst
));
1032 hash_key
= hash_encap_info(key
);
1034 hash_for_each_possible_rcu(esw
->offloads
.encap_tbl
, e
,
1035 encap_hlist
, hash_key
) {
1036 if (!cmp_encap_info(&e
->tun_info
.key
, key
)) {
1047 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
1051 e
->tun_info
= *tun_info
;
1052 e
->tunnel_type
= tunnel_type
;
1053 INIT_LIST_HEAD(&e
->flows
);
1055 if (family
== AF_INET
)
1056 err
= mlx5e_create_encap_header_ipv4(priv
, mirred_dev
, e
, &out_dev
);
1057 else if (family
== AF_INET6
)
1058 err
= mlx5e_create_encap_header_ipv6(priv
, mirred_dev
, e
, &out_dev
);
1064 hash_add_rcu(esw
->offloads
.encap_tbl
, &e
->encap_hlist
, hash_key
);
1073 static int parse_tc_fdb_actions(struct mlx5e_priv
*priv
, struct tcf_exts
*exts
,
1074 struct mlx5e_tc_flow
*flow
)
1076 struct mlx5_esw_flow_attr
*attr
= flow
->attr
;
1077 struct ip_tunnel_info
*info
= NULL
;
1078 const struct tc_action
*a
;
1083 if (tc_no_actions(exts
))
1086 memset(attr
, 0, sizeof(*attr
));
1087 attr
->in_rep
= priv
->ppriv
;
1089 tcf_exts_to_list(exts
, &actions
);
1090 list_for_each_entry(a
, &actions
, list
) {
1091 if (is_tcf_gact_shot(a
)) {
1092 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
|
1093 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1097 if (is_tcf_mirred_egress_redirect(a
)) {
1098 int ifindex
= tcf_mirred_ifindex(a
);
1099 struct net_device
*out_dev
;
1100 struct mlx5e_priv
*out_priv
;
1102 out_dev
= __dev_get_by_index(dev_net(priv
->netdev
), ifindex
);
1104 if (switchdev_port_same_parent_id(priv
->netdev
,
1106 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
1107 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1108 out_priv
= netdev_priv(out_dev
);
1109 attr
->out_rep
= out_priv
->ppriv
;
1111 err
= mlx5e_attach_encap(priv
, info
,
1115 list_add(&flow
->encap
, &attr
->encap
->flows
);
1116 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_ENCAP
|
1117 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
|
1118 MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1119 out_priv
= netdev_priv(attr
->encap
->out_dev
);
1120 attr
->out_rep
= out_priv
->ppriv
;
1122 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1123 priv
->netdev
->name
, out_dev
->name
);
1129 if (is_tcf_tunnel_set(a
)) {
1130 info
= tcf_tunnel_info(a
);
1138 if (is_tcf_vlan(a
)) {
1139 if (tcf_vlan_action(a
) == TCA_VLAN_ACT_POP
) {
1140 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
;
1141 } else if (tcf_vlan_action(a
) == TCA_VLAN_ACT_PUSH
) {
1142 if (tcf_vlan_push_proto(a
) != htons(ETH_P_8021Q
))
1145 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
;
1146 attr
->vlan
= tcf_vlan_push_vid(a
);
1147 } else { /* action is TCA_VLAN_ACT_MODIFY */
1153 if (is_tcf_tunnel_release(a
)) {
1154 attr
->action
|= MLX5_FLOW_CONTEXT_ACTION_DECAP
;
1163 int mlx5e_configure_flower(struct mlx5e_priv
*priv
, __be16 protocol
,
1164 struct tc_cls_flower_offload
*f
)
1166 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1167 int err
, attr_size
= 0;
1168 u32 flow_tag
, action
;
1169 struct mlx5e_tc_flow
*flow
;
1170 struct mlx5_flow_spec
*spec
;
1171 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
1174 if (esw
&& esw
->mode
== SRIOV_OFFLOADS
) {
1175 flow_flags
= MLX5E_TC_FLOW_ESWITCH
;
1176 attr_size
= sizeof(struct mlx5_esw_flow_attr
);
1179 flow
= kzalloc(sizeof(*flow
) + attr_size
, GFP_KERNEL
);
1180 spec
= mlx5_vzalloc(sizeof(*spec
));
1181 if (!spec
|| !flow
) {
1186 flow
->cookie
= f
->cookie
;
1187 flow
->flags
= flow_flags
;
1189 err
= parse_cls_flower(priv
, flow
, spec
, f
);
1193 if (flow
->flags
& MLX5E_TC_FLOW_ESWITCH
) {
1194 flow
->attr
= (struct mlx5_esw_flow_attr
*)(flow
+ 1);
1195 err
= parse_tc_fdb_actions(priv
, f
->exts
, flow
);
1198 flow
->rule
= mlx5e_tc_add_fdb_flow(priv
, spec
, flow
->attr
);
1200 err
= parse_tc_nic_actions(priv
, f
->exts
, &action
, &flow_tag
);
1203 flow
->rule
= mlx5e_tc_add_nic_flow(priv
, spec
, action
, flow_tag
);
1206 if (IS_ERR(flow
->rule
)) {
1207 err
= PTR_ERR(flow
->rule
);
1211 err
= rhashtable_insert_fast(&tc
->ht
, &flow
->node
,
1219 mlx5e_tc_del_flow(priv
, flow
);
1228 int mlx5e_delete_flower(struct mlx5e_priv
*priv
,
1229 struct tc_cls_flower_offload
*f
)
1231 struct mlx5e_tc_flow
*flow
;
1232 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1234 flow
= rhashtable_lookup_fast(&tc
->ht
, &f
->cookie
,
1239 rhashtable_remove_fast(&tc
->ht
, &flow
->node
, tc
->ht_params
);
1241 mlx5e_tc_del_flow(priv
, flow
);
1249 int mlx5e_stats_flower(struct mlx5e_priv
*priv
,
1250 struct tc_cls_flower_offload
*f
)
1252 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1253 struct mlx5e_tc_flow
*flow
;
1254 struct tc_action
*a
;
1255 struct mlx5_fc
*counter
;
1261 flow
= rhashtable_lookup_fast(&tc
->ht
, &f
->cookie
,
1266 counter
= mlx5_flow_rule_counter(flow
->rule
);
1270 mlx5_fc_query_cached(counter
, &bytes
, &packets
, &lastuse
);
1274 tcf_exts_to_list(f
->exts
, &actions
);
1275 list_for_each_entry(a
, &actions
, list
)
1276 tcf_action_stats_update(a
, bytes
, packets
, lastuse
);
1283 static const struct rhashtable_params mlx5e_tc_flow_ht_params
= {
1284 .head_offset
= offsetof(struct mlx5e_tc_flow
, node
),
1285 .key_offset
= offsetof(struct mlx5e_tc_flow
, cookie
),
1286 .key_len
= sizeof(((struct mlx5e_tc_flow
*)0)->cookie
),
1287 .automatic_shrinking
= true,
1290 int mlx5e_tc_init(struct mlx5e_priv
*priv
)
1292 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1294 tc
->ht_params
= mlx5e_tc_flow_ht_params
;
1295 return rhashtable_init(&tc
->ht
, &tc
->ht_params
);
1298 static void _mlx5e_tc_del_flow(void *ptr
, void *arg
)
1300 struct mlx5e_tc_flow
*flow
= ptr
;
1301 struct mlx5e_priv
*priv
= arg
;
1303 mlx5e_tc_del_flow(priv
, flow
);
1307 void mlx5e_tc_cleanup(struct mlx5e_priv
*priv
)
1309 struct mlx5e_tc_table
*tc
= &priv
->fs
.tc
;
1311 rhashtable_free_and_destroy(&tc
->ht
, _mlx5e_tc_del_flow
, priv
);
1313 if (!IS_ERR_OR_NULL(tc
->t
)) {
1314 mlx5_destroy_flow_table(tc
->t
);