]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5: E-Switch, Allow fine tuning of eswitch vport push/pop vlan
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed
AV
33#include <net/flow_dissector.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
12185a9f 36#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
37#include <linux/mlx5/fs.h>
38#include <linux/mlx5/device.h>
39#include <linux/rhashtable.h>
03a9d11e
OG
40#include <net/switchdev.h>
41#include <net/tc_act/tc_mirred.h>
e8f887ac
AV
42#include "en.h"
43#include "en_tc.h"
03a9d11e 44#include "eswitch.h"
e8f887ac
AV
45
46struct mlx5e_tc_flow {
47 struct rhash_head node;
48 u64 cookie;
49 struct mlx5_flow_rule *rule;
50};
51
acff797c
MG
52#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
53#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 54
5c40348c
OG
55static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
56 struct mlx5_flow_spec *spec,
57 u32 action, u32 flow_tag)
e8f887ac 58{
aad7e08d
AV
59 struct mlx5_core_dev *dev = priv->mdev;
60 struct mlx5_flow_destination dest = { 0 };
61 struct mlx5_fc *counter = NULL;
e8f887ac
AV
62 struct mlx5_flow_rule *rule;
63 bool table_created = false;
64
aad7e08d
AV
65 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
66 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
67 dest.ft = priv->fs.vlan.ft.t;
55130287 68 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
69 counter = mlx5_fc_create(dev, true);
70 if (IS_ERR(counter))
71 return ERR_CAST(counter);
72
73 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74 dest.counter = counter;
75 }
76
acff797c
MG
77 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
78 priv->fs.tc.t =
79 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
80 MLX5E_TC_PRIO,
81 MLX5E_TC_TABLE_NUM_ENTRIES,
82 MLX5E_TC_TABLE_NUM_GROUPS,
d63cd286 83 0);
acff797c 84 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
85 netdev_err(priv->netdev,
86 "Failed to create tc offload table\n");
aad7e08d
AV
87 rule = ERR_CAST(priv->fs.tc.t);
88 goto err_create_ft;
e8f887ac
AV
89 }
90
91 table_created = true;
92 }
93
c5bb1730
MG
94 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
95 rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
e8f887ac 96 action, flow_tag,
aad7e08d
AV
97 &dest);
98
99 if (IS_ERR(rule))
100 goto err_add_rule;
101
102 return rule;
e8f887ac 103
aad7e08d
AV
104err_add_rule:
105 if (table_created) {
acff797c
MG
106 mlx5_destroy_flow_table(priv->fs.tc.t);
107 priv->fs.tc.t = NULL;
e8f887ac 108 }
aad7e08d
AV
109err_create_ft:
110 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
111
112 return rule;
113}
114
adb4c123
OG
115static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
116 struct mlx5_flow_spec *spec,
117 u32 action, u32 dst_vport)
118{
119 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
120 struct mlx5_eswitch_rep *rep = priv->ppriv;
121 u32 src_vport;
122
9deb2241 123 src_vport = rep->vport;
adb4c123
OG
124
125 return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
126}
127
e8f887ac
AV
128static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
129 struct mlx5_flow_rule *rule)
130{
aad7e08d
AV
131 struct mlx5_fc *counter = NULL;
132
133 counter = mlx5_flow_rule_counter(rule);
134
e8f887ac
AV
135 mlx5_del_flow_rule(rule);
136
aad7e08d
AV
137 mlx5_fc_destroy(priv->mdev, counter);
138
5c40348c 139 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
140 mlx5_destroy_flow_table(priv->fs.tc.t);
141 priv->fs.tc.t = NULL;
e8f887ac
AV
142 }
143}
144
c5bb1730 145static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
e3a2b7ed
AV
146 struct tc_cls_flower_offload *f)
147{
c5bb1730
MG
148 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
149 outer_headers);
150 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
151 outer_headers);
e3a2b7ed
AV
152 u16 addr_type = 0;
153 u8 ip_proto = 0;
154
155 if (f->dissector->used_keys &
156 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
157 BIT(FLOW_DISSECTOR_KEY_BASIC) |
158 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
159 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
160 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
161 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
162 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
163 f->dissector->used_keys);
164 return -EOPNOTSUPP;
165 }
166
167 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
168 struct flow_dissector_key_control *key =
169 skb_flow_dissector_target(f->dissector,
1dbd0d37 170 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed
AV
171 f->key);
172 addr_type = key->addr_type;
173 }
174
175 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
176 struct flow_dissector_key_basic *key =
177 skb_flow_dissector_target(f->dissector,
178 FLOW_DISSECTOR_KEY_BASIC,
179 f->key);
180 struct flow_dissector_key_basic *mask =
181 skb_flow_dissector_target(f->dissector,
182 FLOW_DISSECTOR_KEY_BASIC,
183 f->mask);
184 ip_proto = key->ip_proto;
185
186 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
187 ntohs(mask->n_proto));
188 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
189 ntohs(key->n_proto));
190
191 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
192 mask->ip_proto);
193 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
194 key->ip_proto);
195 }
196
197 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
198 struct flow_dissector_key_eth_addrs *key =
199 skb_flow_dissector_target(f->dissector,
200 FLOW_DISSECTOR_KEY_ETH_ADDRS,
201 f->key);
202 struct flow_dissector_key_eth_addrs *mask =
203 skb_flow_dissector_target(f->dissector,
204 FLOW_DISSECTOR_KEY_ETH_ADDRS,
205 f->mask);
206
207 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
208 dmac_47_16),
209 mask->dst);
210 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
211 dmac_47_16),
212 key->dst);
213
214 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
215 smac_47_16),
216 mask->src);
217 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
218 smac_47_16),
219 key->src);
220 }
221
222 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
223 struct flow_dissector_key_ipv4_addrs *key =
224 skb_flow_dissector_target(f->dissector,
225 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
226 f->key);
227 struct flow_dissector_key_ipv4_addrs *mask =
228 skb_flow_dissector_target(f->dissector,
229 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
230 f->mask);
231
232 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
233 src_ipv4_src_ipv6.ipv4_layout.ipv4),
234 &mask->src, sizeof(mask->src));
235 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
236 src_ipv4_src_ipv6.ipv4_layout.ipv4),
237 &key->src, sizeof(key->src));
238 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
239 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
240 &mask->dst, sizeof(mask->dst));
241 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
242 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
243 &key->dst, sizeof(key->dst));
244 }
245
246 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
247 struct flow_dissector_key_ipv6_addrs *key =
248 skb_flow_dissector_target(f->dissector,
249 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
250 f->key);
251 struct flow_dissector_key_ipv6_addrs *mask =
252 skb_flow_dissector_target(f->dissector,
253 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
254 f->mask);
255
256 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
257 src_ipv4_src_ipv6.ipv6_layout.ipv6),
258 &mask->src, sizeof(mask->src));
259 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
260 src_ipv4_src_ipv6.ipv6_layout.ipv6),
261 &key->src, sizeof(key->src));
262
263 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
264 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
265 &mask->dst, sizeof(mask->dst));
266 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
267 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
268 &key->dst, sizeof(key->dst));
269 }
270
271 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
272 struct flow_dissector_key_ports *key =
273 skb_flow_dissector_target(f->dissector,
274 FLOW_DISSECTOR_KEY_PORTS,
275 f->key);
276 struct flow_dissector_key_ports *mask =
277 skb_flow_dissector_target(f->dissector,
278 FLOW_DISSECTOR_KEY_PORTS,
279 f->mask);
280 switch (ip_proto) {
281 case IPPROTO_TCP:
282 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
283 tcp_sport, ntohs(mask->src));
284 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
285 tcp_sport, ntohs(key->src));
286
287 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
288 tcp_dport, ntohs(mask->dst));
289 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
290 tcp_dport, ntohs(key->dst));
291 break;
292
293 case IPPROTO_UDP:
294 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
295 udp_sport, ntohs(mask->src));
296 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
297 udp_sport, ntohs(key->src));
298
299 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
300 udp_dport, ntohs(mask->dst));
301 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
302 udp_dport, ntohs(key->dst));
303 break;
304 default:
305 netdev_err(priv->netdev,
306 "Only UDP and TCP transport are supported\n");
307 return -EINVAL;
308 }
309 }
310
311 return 0;
312}
313
5c40348c
OG
314static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
315 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
316{
317 const struct tc_action *a;
22dc13c8 318 LIST_HEAD(actions);
e3a2b7ed
AV
319
320 if (tc_no_actions(exts))
321 return -EINVAL;
322
323 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
324 *action = 0;
325
22dc13c8
WC
326 tcf_exts_to_list(exts, &actions);
327 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
328 /* Only support a single action per rule */
329 if (*action)
330 return -EINVAL;
331
332 if (is_tcf_gact_shot(a)) {
333 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
334 if (MLX5_CAP_FLOWTABLE(priv->mdev,
335 flow_table_properties_nic_receive.flow_counter))
336 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
337 continue;
338 }
339
340 if (is_tcf_skbedit_mark(a)) {
341 u32 mark = tcf_skbedit_mark(a);
342
343 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
344 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
345 mark);
346 return -EINVAL;
347 }
348
349 *flow_tag = mark;
350 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
351 continue;
352 }
353
354 return -EINVAL;
355 }
356
357 return 0;
358}
359
03a9d11e
OG
360static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
361 u32 *action, u32 *dest_vport)
362{
363 const struct tc_action *a;
22dc13c8 364 LIST_HEAD(actions);
03a9d11e
OG
365
366 if (tc_no_actions(exts))
367 return -EINVAL;
368
369 *action = 0;
370
22dc13c8
WC
371 tcf_exts_to_list(exts, &actions);
372 list_for_each_entry(a, &actions, list) {
03a9d11e
OG
373 /* Only support a single action per rule */
374 if (*action)
375 return -EINVAL;
376
377 if (is_tcf_gact_shot(a)) {
378 *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
379 MLX5_FLOW_CONTEXT_ACTION_COUNT;
380 continue;
381 }
382
383 if (is_tcf_mirred_redirect(a)) {
384 int ifindex = tcf_mirred_ifindex(a);
385 struct net_device *out_dev;
386 struct mlx5e_priv *out_priv;
387 struct mlx5_eswitch_rep *out_rep;
388
389 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
390
391 if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
392 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
393 priv->netdev->name, out_dev->name);
394 return -EINVAL;
395 }
396
397 out_priv = netdev_priv(out_dev);
398 out_rep = out_priv->ppriv;
9deb2241 399 *dest_vport = out_rep->vport;
03a9d11e
OG
400 *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
401 continue;
402 }
403
404 return -EINVAL;
405 }
406 return 0;
407}
408
e3a2b7ed
AV
409int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
410 struct tc_cls_flower_offload *f)
411{
acff797c 412 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed 413 int err = 0;
adb4c123 414 u32 flow_tag, action, dest_vport = 0;
e3a2b7ed 415 struct mlx5e_tc_flow *flow;
c5bb1730 416 struct mlx5_flow_spec *spec;
e3a2b7ed 417 struct mlx5_flow_rule *old = NULL;
adb4c123 418 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
e3a2b7ed
AV
419
420 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
421 tc->ht_params);
422 if (flow)
423 old = flow->rule;
424 else
425 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
426
c5bb1730
MG
427 spec = mlx5_vzalloc(sizeof(*spec));
428 if (!spec || !flow) {
e3a2b7ed
AV
429 err = -ENOMEM;
430 goto err_free;
431 }
432
433 flow->cookie = f->cookie;
434
c5bb1730 435 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
436 if (err < 0)
437 goto err_free;
438
adb4c123
OG
439 if (esw && esw->mode == SRIOV_OFFLOADS) {
440 err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
441 if (err < 0)
442 goto err_free;
443 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
444 } else {
445 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
446 if (err < 0)
447 goto err_free;
448 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
449 }
e3a2b7ed 450
e3a2b7ed
AV
451 if (IS_ERR(flow->rule)) {
452 err = PTR_ERR(flow->rule);
5c40348c 453 goto err_free;
e3a2b7ed
AV
454 }
455
5c40348c
OG
456 err = rhashtable_insert_fast(&tc->ht, &flow->node,
457 tc->ht_params);
458 if (err)
459 goto err_del_rule;
460
e3a2b7ed
AV
461 if (old)
462 mlx5e_tc_del_flow(priv, old);
463
464 goto out;
465
5c40348c
OG
466err_del_rule:
467 mlx5_del_flow_rule(flow->rule);
e3a2b7ed
AV
468
469err_free:
470 if (!old)
471 kfree(flow);
472out:
c5bb1730 473 kvfree(spec);
e3a2b7ed
AV
474 return err;
475}
476
477int mlx5e_delete_flower(struct mlx5e_priv *priv,
478 struct tc_cls_flower_offload *f)
479{
480 struct mlx5e_tc_flow *flow;
acff797c 481 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
482
483 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
484 tc->ht_params);
485 if (!flow)
486 return -EINVAL;
487
488 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
489
490 mlx5e_tc_del_flow(priv, flow->rule);
491
492 kfree(flow);
493
494 return 0;
495}
496
aad7e08d
AV
497int mlx5e_stats_flower(struct mlx5e_priv *priv,
498 struct tc_cls_flower_offload *f)
499{
500 struct mlx5e_tc_table *tc = &priv->fs.tc;
501 struct mlx5e_tc_flow *flow;
502 struct tc_action *a;
503 struct mlx5_fc *counter;
22dc13c8 504 LIST_HEAD(actions);
aad7e08d
AV
505 u64 bytes;
506 u64 packets;
507 u64 lastuse;
508
509 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
510 tc->ht_params);
511 if (!flow)
512 return -EINVAL;
513
514 counter = mlx5_flow_rule_counter(flow->rule);
515 if (!counter)
516 return 0;
517
518 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
519
22dc13c8
WC
520 tcf_exts_to_list(f->exts, &actions);
521 list_for_each_entry(a, &actions, list)
aad7e08d
AV
522 tcf_action_stats_update(a, bytes, packets, lastuse);
523
524 return 0;
525}
526
e8f887ac
AV
527static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
528 .head_offset = offsetof(struct mlx5e_tc_flow, node),
529 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
530 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
531 .automatic_shrinking = true,
532};
533
534int mlx5e_tc_init(struct mlx5e_priv *priv)
535{
acff797c 536 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
537
538 tc->ht_params = mlx5e_tc_flow_ht_params;
539 return rhashtable_init(&tc->ht, &tc->ht_params);
540}
541
542static void _mlx5e_tc_del_flow(void *ptr, void *arg)
543{
544 struct mlx5e_tc_flow *flow = ptr;
545 struct mlx5e_priv *priv = arg;
546
547 mlx5e_tc_del_flow(priv, flow->rule);
548 kfree(flow);
549}
550
551void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
552{
acff797c 553 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
554
555 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
556
acff797c
MG
557 if (!IS_ERR_OR_NULL(tc->t)) {
558 mlx5_destroy_flow_table(tc->t);
559 tc->t = NULL;
e8f887ac
AV
560 }
561}