]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: TC ipv4 tunnel encap offload error flow fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac
AV
50
51struct mlx5e_tc_flow {
52 struct rhash_head node;
53 u64 cookie;
74491de9 54 struct mlx5_flow_handle *rule;
a54e20b4 55 struct list_head encap; /* flows sharing the same encap */
776b12b6 56 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
57};
58
a54e20b4
HHZ
59enum {
60 MLX5_HEADER_TYPE_VXLAN = 0x0,
61 MLX5_HEADER_TYPE_NVGRE = 0x1,
62};
63
acff797c
MG
64#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 66
74491de9
MB
67static struct mlx5_flow_handle *
68mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69 struct mlx5_flow_spec *spec,
70 u32 action, u32 flow_tag)
e8f887ac 71{
aad7e08d
AV
72 struct mlx5_core_dev *dev = priv->mdev;
73 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
74 struct mlx5_flow_act flow_act = {
75 .action = action,
76 .flow_tag = flow_tag,
77 .encap_id = 0,
78 };
aad7e08d 79 struct mlx5_fc *counter = NULL;
74491de9 80 struct mlx5_flow_handle *rule;
e8f887ac
AV
81 bool table_created = false;
82
aad7e08d
AV
83 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85 dest.ft = priv->fs.vlan.ft.t;
55130287 86 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
87 counter = mlx5_fc_create(dev, true);
88 if (IS_ERR(counter))
89 return ERR_CAST(counter);
90
91 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92 dest.counter = counter;
93 }
94
acff797c
MG
95 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96 priv->fs.tc.t =
97 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98 MLX5E_TC_PRIO,
99 MLX5E_TC_TABLE_NUM_ENTRIES,
100 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 101 0, 0);
acff797c 102 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
103 netdev_err(priv->netdev,
104 "Failed to create tc offload table\n");
aad7e08d
AV
105 rule = ERR_CAST(priv->fs.tc.t);
106 goto err_create_ft;
e8f887ac
AV
107 }
108
109 table_created = true;
110 }
111
c5bb1730 112 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 113 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
114
115 if (IS_ERR(rule))
116 goto err_add_rule;
117
118 return rule;
e8f887ac 119
aad7e08d
AV
120err_add_rule:
121 if (table_created) {
acff797c
MG
122 mlx5_destroy_flow_table(priv->fs.tc.t);
123 priv->fs.tc.t = NULL;
e8f887ac 124 }
aad7e08d
AV
125err_create_ft:
126 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
127
128 return rule;
129}
130
74491de9
MB
131static struct mlx5_flow_handle *
132mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133 struct mlx5_flow_spec *spec,
134 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
135{
136 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
137 int err;
138
139 err = mlx5_eswitch_add_vlan_action(esw, attr);
140 if (err)
141 return ERR_PTR(err);
adb4c123 142
776b12b6 143 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
144}
145
5067b602
RD
146static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147 struct mlx5e_tc_flow *flow) {
148 struct list_head *next = flow->encap.next;
149
150 list_del(&flow->encap);
151 if (list_empty(next)) {
152 struct mlx5_encap_entry *e;
153
154 e = list_entry(next, struct mlx5_encap_entry, flows);
155 if (e->n) {
156 mlx5_encap_dealloc(priv->mdev, e->encap_id);
157 neigh_release(e->n);
158 }
159 hlist_del_rcu(&e->encap_hlist);
160 kfree(e);
161 }
162}
163
e8f887ac 164static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 165 struct mlx5e_tc_flow *flow)
e8f887ac 166{
8b32580d 167 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aad7e08d
AV
168 struct mlx5_fc *counter = NULL;
169
961e8979 170 counter = mlx5_flow_rule_counter(flow->rule);
aad7e08d 171
961e8979 172 mlx5_del_flow_rules(flow->rule);
86a33ae1 173
5067b602 174 if (esw && esw->mode == SRIOV_OFFLOADS) {
961e8979 175 mlx5_eswitch_del_vlan_action(esw, flow->attr);
5067b602
RD
176 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
177 mlx5e_detach_encap(priv, flow);
178 }
8b32580d 179
aad7e08d
AV
180 mlx5_fc_destroy(priv->mdev, counter);
181
5c40348c 182 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
183 mlx5_destroy_flow_table(priv->fs.tc.t);
184 priv->fs.tc.t = NULL;
e8f887ac
AV
185 }
186}
187
bbd00f7e
HHZ
188static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
189 struct tc_cls_flower_offload *f)
190{
191 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
192 outer_headers);
193 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
194 outer_headers);
195 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196 misc_parameters);
197 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198 misc_parameters);
199
200 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
201 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
202
203 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
204 struct flow_dissector_key_keyid *key =
205 skb_flow_dissector_target(f->dissector,
206 FLOW_DISSECTOR_KEY_ENC_KEYID,
207 f->key);
208 struct flow_dissector_key_keyid *mask =
209 skb_flow_dissector_target(f->dissector,
210 FLOW_DISSECTOR_KEY_ENC_KEYID,
211 f->mask);
212 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
213 be32_to_cpu(mask->keyid));
214 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
215 be32_to_cpu(key->keyid));
216 }
217}
218
219static int parse_tunnel_attr(struct mlx5e_priv *priv,
220 struct mlx5_flow_spec *spec,
221 struct tc_cls_flower_offload *f)
222{
223 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224 outer_headers);
225 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226 outer_headers);
227
228 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
229 struct flow_dissector_key_ports *key =
230 skb_flow_dissector_target(f->dissector,
231 FLOW_DISSECTOR_KEY_ENC_PORTS,
232 f->key);
233 struct flow_dissector_key_ports *mask =
234 skb_flow_dissector_target(f->dissector,
235 FLOW_DISSECTOR_KEY_ENC_PORTS,
236 f->mask);
237
238 /* Full udp dst port must be given */
239 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 240 goto vxlan_match_offload_err;
bbd00f7e 241
bbd00f7e
HHZ
242 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
243 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
244 parse_vxlan_attr(spec, f);
2fcd82e9
OG
245 else {
246 netdev_warn(priv->netdev,
247 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 248 return -EOPNOTSUPP;
2fcd82e9 249 }
bbd00f7e
HHZ
250
251 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
252 udp_dport, ntohs(mask->dst));
253 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
254 udp_dport, ntohs(key->dst));
255
cd377663
OG
256 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257 udp_sport, ntohs(mask->src));
258 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
259 udp_sport, ntohs(key->src));
bbd00f7e 260 } else { /* udp dst port must be given */
2fcd82e9
OG
261vxlan_match_offload_err:
262 netdev_warn(priv->netdev,
263 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
264 return -EOPNOTSUPP;
bbd00f7e
HHZ
265 }
266
267 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
268 struct flow_dissector_key_ipv4_addrs *key =
269 skb_flow_dissector_target(f->dissector,
270 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
271 f->key);
272 struct flow_dissector_key_ipv4_addrs *mask =
273 skb_flow_dissector_target(f->dissector,
274 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
275 f->mask);
276 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
277 src_ipv4_src_ipv6.ipv4_layout.ipv4,
278 ntohl(mask->src));
279 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
280 src_ipv4_src_ipv6.ipv4_layout.ipv4,
281 ntohl(key->src));
282
283 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
284 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
285 ntohl(mask->dst));
286 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
287 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
288 ntohl(key->dst));
289 }
290
291 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
293
294 /* Enforce DMAC when offloading incoming tunneled flows.
295 * Flow counters require a match on the DMAC.
296 */
297 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
298 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
299 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
300 dmac_47_16), priv->netdev->dev_addr);
301
302 /* let software handle IP fragments */
303 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
304 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
305
306 return 0;
307}
308
de0af0bf
RD
309static int __parse_cls_flower(struct mlx5e_priv *priv,
310 struct mlx5_flow_spec *spec,
311 struct tc_cls_flower_offload *f,
312 u8 *min_inline)
e3a2b7ed 313{
c5bb1730
MG
314 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
315 outer_headers);
316 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
317 outer_headers);
e3a2b7ed
AV
318 u16 addr_type = 0;
319 u8 ip_proto = 0;
320
de0af0bf
RD
321 *min_inline = MLX5_INLINE_MODE_L2;
322
e3a2b7ed
AV
323 if (f->dissector->used_keys &
324 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
325 BIT(FLOW_DISSECTOR_KEY_BASIC) |
326 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 327 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
328 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
329 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
330 BIT(FLOW_DISSECTOR_KEY_PORTS) |
331 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
332 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
333 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
334 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
335 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
336 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
337 f->dissector->used_keys);
338 return -EOPNOTSUPP;
339 }
340
bbd00f7e
HHZ
341 if ((dissector_uses_key(f->dissector,
342 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
343 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
344 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
345 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
346 struct flow_dissector_key_control *key =
347 skb_flow_dissector_target(f->dissector,
348 FLOW_DISSECTOR_KEY_ENC_CONTROL,
349 f->key);
350 switch (key->addr_type) {
351 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
352 if (parse_tunnel_attr(priv, spec, f))
353 return -EOPNOTSUPP;
354 break;
2fcd82e9
OG
355 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
356 netdev_warn(priv->netdev,
357 "IPv6 tunnel decap offload isn't supported\n");
bbd00f7e
HHZ
358 default:
359 return -EOPNOTSUPP;
360 }
361
362 /* In decap flow, header pointers should point to the inner
363 * headers, outer header were already set by parse_tunnel_attr
364 */
365 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
366 inner_headers);
367 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
368 inner_headers);
369 }
370
e3a2b7ed
AV
371 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
372 struct flow_dissector_key_control *key =
373 skb_flow_dissector_target(f->dissector,
1dbd0d37 374 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 375 f->key);
3f7d0eb4
OG
376
377 struct flow_dissector_key_control *mask =
378 skb_flow_dissector_target(f->dissector,
379 FLOW_DISSECTOR_KEY_CONTROL,
380 f->mask);
e3a2b7ed 381 addr_type = key->addr_type;
3f7d0eb4
OG
382
383 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
384 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
385 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
386 key->flags & FLOW_DIS_IS_FRAGMENT);
387 }
e3a2b7ed
AV
388 }
389
390 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
391 struct flow_dissector_key_basic *key =
392 skb_flow_dissector_target(f->dissector,
393 FLOW_DISSECTOR_KEY_BASIC,
394 f->key);
395 struct flow_dissector_key_basic *mask =
396 skb_flow_dissector_target(f->dissector,
397 FLOW_DISSECTOR_KEY_BASIC,
398 f->mask);
399 ip_proto = key->ip_proto;
400
401 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
402 ntohs(mask->n_proto));
403 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
404 ntohs(key->n_proto));
405
406 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
407 mask->ip_proto);
408 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
409 key->ip_proto);
de0af0bf
RD
410
411 if (mask->ip_proto)
412 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
413 }
414
415 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
416 struct flow_dissector_key_eth_addrs *key =
417 skb_flow_dissector_target(f->dissector,
418 FLOW_DISSECTOR_KEY_ETH_ADDRS,
419 f->key);
420 struct flow_dissector_key_eth_addrs *mask =
421 skb_flow_dissector_target(f->dissector,
422 FLOW_DISSECTOR_KEY_ETH_ADDRS,
423 f->mask);
424
425 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
426 dmac_47_16),
427 mask->dst);
428 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
429 dmac_47_16),
430 key->dst);
431
432 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
433 smac_47_16),
434 mask->src);
435 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
436 smac_47_16),
437 key->src);
438 }
439
095b6cfd
OG
440 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
441 struct flow_dissector_key_vlan *key =
442 skb_flow_dissector_target(f->dissector,
443 FLOW_DISSECTOR_KEY_VLAN,
444 f->key);
445 struct flow_dissector_key_vlan *mask =
446 skb_flow_dissector_target(f->dissector,
447 FLOW_DISSECTOR_KEY_VLAN,
448 f->mask);
358d79a4 449 if (mask->vlan_id || mask->vlan_priority) {
095b6cfd
OG
450 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
451 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
452
453 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
454 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
455
456 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
457 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
458 }
459 }
460
e3a2b7ed
AV
461 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
462 struct flow_dissector_key_ipv4_addrs *key =
463 skb_flow_dissector_target(f->dissector,
464 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
465 f->key);
466 struct flow_dissector_key_ipv4_addrs *mask =
467 skb_flow_dissector_target(f->dissector,
468 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
469 f->mask);
470
471 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
472 src_ipv4_src_ipv6.ipv4_layout.ipv4),
473 &mask->src, sizeof(mask->src));
474 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
475 src_ipv4_src_ipv6.ipv4_layout.ipv4),
476 &key->src, sizeof(key->src));
477 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
478 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
479 &mask->dst, sizeof(mask->dst));
480 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
481 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
482 &key->dst, sizeof(key->dst));
de0af0bf
RD
483
484 if (mask->src || mask->dst)
485 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
486 }
487
488 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
489 struct flow_dissector_key_ipv6_addrs *key =
490 skb_flow_dissector_target(f->dissector,
491 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
492 f->key);
493 struct flow_dissector_key_ipv6_addrs *mask =
494 skb_flow_dissector_target(f->dissector,
495 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
496 f->mask);
497
498 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
499 src_ipv4_src_ipv6.ipv6_layout.ipv6),
500 &mask->src, sizeof(mask->src));
501 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
502 src_ipv4_src_ipv6.ipv6_layout.ipv6),
503 &key->src, sizeof(key->src));
504
505 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
506 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
507 &mask->dst, sizeof(mask->dst));
508 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
509 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
510 &key->dst, sizeof(key->dst));
de0af0bf
RD
511
512 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
513 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
514 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
515 }
516
517 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
518 struct flow_dissector_key_ports *key =
519 skb_flow_dissector_target(f->dissector,
520 FLOW_DISSECTOR_KEY_PORTS,
521 f->key);
522 struct flow_dissector_key_ports *mask =
523 skb_flow_dissector_target(f->dissector,
524 FLOW_DISSECTOR_KEY_PORTS,
525 f->mask);
526 switch (ip_proto) {
527 case IPPROTO_TCP:
528 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
529 tcp_sport, ntohs(mask->src));
530 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
531 tcp_sport, ntohs(key->src));
532
533 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
534 tcp_dport, ntohs(mask->dst));
535 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
536 tcp_dport, ntohs(key->dst));
537 break;
538
539 case IPPROTO_UDP:
540 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
541 udp_sport, ntohs(mask->src));
542 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
543 udp_sport, ntohs(key->src));
544
545 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
546 udp_dport, ntohs(mask->dst));
547 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
548 udp_dport, ntohs(key->dst));
549 break;
550 default:
551 netdev_err(priv->netdev,
552 "Only UDP and TCP transport are supported\n");
553 return -EINVAL;
554 }
de0af0bf
RD
555
556 if (mask->src || mask->dst)
557 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
558 }
559
560 return 0;
561}
562
de0af0bf
RD
563static int parse_cls_flower(struct mlx5e_priv *priv,
564 struct mlx5_flow_spec *spec,
565 struct tc_cls_flower_offload *f)
566{
567 struct mlx5_core_dev *dev = priv->mdev;
568 struct mlx5_eswitch *esw = dev->priv.eswitch;
569 struct mlx5_eswitch_rep *rep = priv->ppriv;
570 u8 min_inline;
571 int err;
572
573 err = __parse_cls_flower(priv, spec, f, &min_inline);
574
575 if (!err && esw->mode == SRIOV_OFFLOADS &&
576 rep->vport != FDB_UPLINK_VPORT) {
577 if (min_inline > esw->offloads.inline_mode) {
578 netdev_warn(priv->netdev,
579 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
580 min_inline, esw->offloads.inline_mode);
581 return -EOPNOTSUPP;
582 }
583 }
584
585 return err;
586}
587
5c40348c
OG
588static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
589 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
590{
591 const struct tc_action *a;
22dc13c8 592 LIST_HEAD(actions);
e3a2b7ed
AV
593
594 if (tc_no_actions(exts))
595 return -EINVAL;
596
597 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
598 *action = 0;
599
22dc13c8
WC
600 tcf_exts_to_list(exts, &actions);
601 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
602 /* Only support a single action per rule */
603 if (*action)
604 return -EINVAL;
605
606 if (is_tcf_gact_shot(a)) {
607 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
608 if (MLX5_CAP_FLOWTABLE(priv->mdev,
609 flow_table_properties_nic_receive.flow_counter))
610 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
611 continue;
612 }
613
614 if (is_tcf_skbedit_mark(a)) {
615 u32 mark = tcf_skbedit_mark(a);
616
617 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
618 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
619 mark);
620 return -EINVAL;
621 }
622
623 *flow_tag = mark;
624 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
625 continue;
626 }
627
628 return -EINVAL;
629 }
630
631 return 0;
632}
633
a54e20b4
HHZ
634static inline int cmp_encap_info(struct mlx5_encap_info *a,
635 struct mlx5_encap_info *b)
636{
637 return memcmp(a, b, sizeof(*a));
638}
639
640static inline int hash_encap_info(struct mlx5_encap_info *info)
641{
642 return jhash(info, sizeof(*info), 0);
643}
644
645static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
646 struct net_device *mirred_dev,
647 struct net_device **out_dev,
648 struct flowi4 *fl4,
649 struct neighbour **out_n,
650 __be32 *saddr,
651 int *out_ttl)
652{
653 struct rtable *rt;
654 struct neighbour *n = NULL;
655 int ttl;
656
657#if IS_ENABLED(CONFIG_INET)
658 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
a42485eb
OG
659 if (IS_ERR(rt))
660 return PTR_ERR(rt);
a54e20b4
HHZ
661#else
662 return -EOPNOTSUPP;
663#endif
664
665 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
a42485eb 666 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
a54e20b4
HHZ
667 ip_rt_put(rt);
668 return -EOPNOTSUPP;
669 }
670
671 ttl = ip4_dst_hoplimit(&rt->dst);
672 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
673 ip_rt_put(rt);
674 if (!n)
675 return -ENOMEM;
676
677 *out_n = n;
678 *saddr = fl4->saddr;
679 *out_ttl = ttl;
680 *out_dev = rt->dst.dev;
681
682 return 0;
683}
684
685static int gen_vxlan_header_ipv4(struct net_device *out_dev,
686 char buf[],
687 unsigned char h_dest[ETH_ALEN],
688 int ttl,
689 __be32 daddr,
690 __be32 saddr,
691 __be16 udp_dst_port,
692 __be32 vx_vni)
693{
694 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
695 struct ethhdr *eth = (struct ethhdr *)buf;
696 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
697 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
698 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
699
700 memset(buf, 0, encap_size);
701
702 ether_addr_copy(eth->h_dest, h_dest);
703 ether_addr_copy(eth->h_source, out_dev->dev_addr);
704 eth->h_proto = htons(ETH_P_IP);
705
706 ip->daddr = daddr;
707 ip->saddr = saddr;
708
709 ip->ttl = ttl;
710 ip->protocol = IPPROTO_UDP;
711 ip->version = 0x4;
712 ip->ihl = 0x5;
713
714 udp->dest = udp_dst_port;
715 vxh->vx_flags = VXLAN_HF_VNI;
716 vxh->vx_vni = vxlan_vni_field(vx_vni);
717
718 return encap_size;
719}
720
721static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
722 struct net_device *mirred_dev,
723 struct mlx5_encap_entry *e,
724 struct net_device **out_dev)
725{
726 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
a42485eb 727 struct neighbour *n = NULL;
a54e20b4 728 struct flowi4 fl4 = {};
a54e20b4
HHZ
729 char *encap_header;
730 int encap_size;
731 __be32 saddr;
732 int ttl;
733 int err;
734
735 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
736 if (!encap_header)
737 return -ENOMEM;
738
739 switch (e->tunnel_type) {
740 case MLX5_HEADER_TYPE_VXLAN:
741 fl4.flowi4_proto = IPPROTO_UDP;
742 fl4.fl4_dport = e->tun_info.tp_dst;
743 break;
744 default:
745 err = -EOPNOTSUPP;
746 goto out;
747 }
748 fl4.daddr = e->tun_info.daddr;
749
750 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
751 &fl4, &n, &saddr, &ttl);
752 if (err)
753 goto out;
754
755 e->n = n;
756 e->out_dev = *out_dev;
757
758 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
759 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
760 err = -EOPNOTSUPP;
a54e20b4
HHZ
761 goto out;
762 }
763
764 neigh_ha_snapshot(e->h_dest, n, *out_dev);
765
766 switch (e->tunnel_type) {
767 case MLX5_HEADER_TYPE_VXLAN:
768 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
769 e->h_dest, ttl,
770 e->tun_info.daddr,
771 saddr, e->tun_info.tp_dst,
772 e->tun_info.tun_id);
773 break;
774 default:
775 err = -EOPNOTSUPP;
776 goto out;
777 }
778
779 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
780 encap_size, encap_header, &e->encap_id);
781out:
a42485eb
OG
782 if (err && n)
783 neigh_release(n);
a54e20b4
HHZ
784 kfree(encap_header);
785 return err;
786}
787
788static int mlx5e_attach_encap(struct mlx5e_priv *priv,
789 struct ip_tunnel_info *tun_info,
790 struct net_device *mirred_dev,
791 struct mlx5_esw_flow_attr *attr)
792{
793 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
794 unsigned short family = ip_tunnel_info_af(tun_info);
795 struct ip_tunnel_key *key = &tun_info->key;
796 struct mlx5_encap_info info;
797 struct mlx5_encap_entry *e;
798 struct net_device *out_dev;
799 uintptr_t hash_key;
800 bool found = false;
801 int tunnel_type;
802 int err;
803
2fcd82e9 804 /* udp dst port must be set */
a54e20b4 805 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 806 goto vxlan_encap_offload_err;
a54e20b4 807
cd377663 808 /* setting udp src port isn't supported */
2fcd82e9
OG
809 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
810vxlan_encap_offload_err:
811 netdev_warn(priv->netdev,
812 "must set udp dst port and not set udp src port\n");
cd377663 813 return -EOPNOTSUPP;
2fcd82e9 814 }
cd377663 815
a54e20b4
HHZ
816 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
817 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
818 info.tp_dst = key->tp_dst;
819 info.tun_id = tunnel_id_to_key32(key->tun_id);
820 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
821 } else {
2fcd82e9
OG
822 netdev_warn(priv->netdev,
823 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
824 return -EOPNOTSUPP;
825 }
826
827 switch (family) {
828 case AF_INET:
829 info.daddr = key->u.ipv4.dst;
830 break;
2fcd82e9
OG
831 case AF_INET6:
832 netdev_warn(priv->netdev,
833 "IPv6 tunnel encap offload isn't supported\n");
a54e20b4
HHZ
834 default:
835 return -EOPNOTSUPP;
836 }
837
838 hash_key = hash_encap_info(&info);
839
840 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
841 encap_hlist, hash_key) {
842 if (!cmp_encap_info(&e->tun_info, &info)) {
843 found = true;
844 break;
845 }
846 }
847
848 if (found) {
849 attr->encap = e;
850 return 0;
851 }
852
853 e = kzalloc(sizeof(*e), GFP_KERNEL);
854 if (!e)
855 return -ENOMEM;
856
857 e->tun_info = info;
858 e->tunnel_type = tunnel_type;
859 INIT_LIST_HEAD(&e->flows);
860
861 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
862 if (err)
863 goto out_err;
864
865 attr->encap = e;
866 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
867
868 return err;
869
870out_err:
871 kfree(e);
872 return err;
873}
874
03a9d11e 875static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 876 struct mlx5e_tc_flow *flow)
03a9d11e 877{
a54e20b4
HHZ
878 struct mlx5_esw_flow_attr *attr = flow->attr;
879 struct ip_tunnel_info *info = NULL;
03a9d11e 880 const struct tc_action *a;
22dc13c8 881 LIST_HEAD(actions);
a54e20b4
HHZ
882 bool encap = false;
883 int err;
03a9d11e
OG
884
885 if (tc_no_actions(exts))
886 return -EINVAL;
887
776b12b6
OG
888 memset(attr, 0, sizeof(*attr));
889 attr->in_rep = priv->ppriv;
03a9d11e 890
22dc13c8
WC
891 tcf_exts_to_list(exts, &actions);
892 list_for_each_entry(a, &actions, list) {
03a9d11e 893 if (is_tcf_gact_shot(a)) {
8b32580d
OG
894 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
895 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
896 continue;
897 }
898
5724b8b5 899 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
900 int ifindex = tcf_mirred_ifindex(a);
901 struct net_device *out_dev;
902 struct mlx5e_priv *out_priv;
03a9d11e
OG
903
904 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
905
a54e20b4
HHZ
906 if (switchdev_port_same_parent_id(priv->netdev,
907 out_dev)) {
908 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
909 MLX5_FLOW_CONTEXT_ACTION_COUNT;
910 out_priv = netdev_priv(out_dev);
911 attr->out_rep = out_priv->ppriv;
912 } else if (encap) {
913 err = mlx5e_attach_encap(priv, info,
914 out_dev, attr);
915 if (err)
916 return err;
917 list_add(&flow->encap, &attr->encap->flows);
918 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
919 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
920 MLX5_FLOW_CONTEXT_ACTION_COUNT;
921 out_priv = netdev_priv(attr->encap->out_dev);
922 attr->out_rep = out_priv->ppriv;
923 } else {
03a9d11e
OG
924 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
925 priv->netdev->name, out_dev->name);
926 return -EINVAL;
927 }
a54e20b4
HHZ
928 continue;
929 }
03a9d11e 930
a54e20b4
HHZ
931 if (is_tcf_tunnel_set(a)) {
932 info = tcf_tunnel_info(a);
933 if (info)
934 encap = true;
935 else
936 return -EOPNOTSUPP;
03a9d11e
OG
937 continue;
938 }
939
8b32580d
OG
940 if (is_tcf_vlan(a)) {
941 if (tcf_vlan_action(a) == VLAN_F_POP) {
942 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
943 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
944 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
945 return -EOPNOTSUPP;
946
947 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
948 attr->vlan = tcf_vlan_push_vid(a);
949 }
950 continue;
951 }
952
bbd00f7e
HHZ
953 if (is_tcf_tunnel_release(a)) {
954 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
955 continue;
956 }
957
03a9d11e
OG
958 return -EINVAL;
959 }
960 return 0;
961}
962
e3a2b7ed
AV
963int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
964 struct tc_cls_flower_offload *f)
965{
acff797c 966 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed 967 int err = 0;
776b12b6
OG
968 bool fdb_flow = false;
969 u32 flow_tag, action;
e3a2b7ed 970 struct mlx5e_tc_flow *flow;
c5bb1730 971 struct mlx5_flow_spec *spec;
adb4c123 972 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
e3a2b7ed 973
776b12b6
OG
974 if (esw && esw->mode == SRIOV_OFFLOADS)
975 fdb_flow = true;
976
53636068
RD
977 if (fdb_flow)
978 flow = kzalloc(sizeof(*flow) +
979 sizeof(struct mlx5_esw_flow_attr),
980 GFP_KERNEL);
981 else
982 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
e3a2b7ed 983
c5bb1730
MG
984 spec = mlx5_vzalloc(sizeof(*spec));
985 if (!spec || !flow) {
e3a2b7ed
AV
986 err = -ENOMEM;
987 goto err_free;
988 }
989
990 flow->cookie = f->cookie;
991
c5bb1730 992 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
993 if (err < 0)
994 goto err_free;
995
776b12b6
OG
996 if (fdb_flow) {
997 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 998 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
999 if (err < 0)
1000 goto err_free;
776b12b6 1001 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
1002 } else {
1003 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1004 if (err < 0)
1005 goto err_free;
1006 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1007 }
e3a2b7ed 1008
e3a2b7ed
AV
1009 if (IS_ERR(flow->rule)) {
1010 err = PTR_ERR(flow->rule);
5c40348c 1011 goto err_free;
e3a2b7ed
AV
1012 }
1013
5c40348c
OG
1014 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1015 tc->ht_params);
1016 if (err)
1017 goto err_del_rule;
1018
e3a2b7ed
AV
1019 goto out;
1020
5c40348c 1021err_del_rule:
74491de9 1022 mlx5_del_flow_rules(flow->rule);
e3a2b7ed
AV
1023
1024err_free:
53636068 1025 kfree(flow);
e3a2b7ed 1026out:
c5bb1730 1027 kvfree(spec);
e3a2b7ed
AV
1028 return err;
1029}
1030
1031int mlx5e_delete_flower(struct mlx5e_priv *priv,
1032 struct tc_cls_flower_offload *f)
1033{
1034 struct mlx5e_tc_flow *flow;
acff797c 1035 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1036
1037 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1038 tc->ht_params);
1039 if (!flow)
1040 return -EINVAL;
1041
1042 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1043
961e8979 1044 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1045
a54e20b4 1046
e3a2b7ed
AV
1047 kfree(flow);
1048
1049 return 0;
1050}
1051
aad7e08d
AV
1052int mlx5e_stats_flower(struct mlx5e_priv *priv,
1053 struct tc_cls_flower_offload *f)
1054{
1055 struct mlx5e_tc_table *tc = &priv->fs.tc;
1056 struct mlx5e_tc_flow *flow;
1057 struct tc_action *a;
1058 struct mlx5_fc *counter;
22dc13c8 1059 LIST_HEAD(actions);
aad7e08d
AV
1060 u64 bytes;
1061 u64 packets;
1062 u64 lastuse;
1063
1064 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1065 tc->ht_params);
1066 if (!flow)
1067 return -EINVAL;
1068
1069 counter = mlx5_flow_rule_counter(flow->rule);
1070 if (!counter)
1071 return 0;
1072
1073 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1074
22dc13c8
WC
1075 tcf_exts_to_list(f->exts, &actions);
1076 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1077 tcf_action_stats_update(a, bytes, packets, lastuse);
1078
1079 return 0;
1080}
1081
e8f887ac
AV
1082static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1083 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1084 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1085 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1086 .automatic_shrinking = true,
1087};
1088
1089int mlx5e_tc_init(struct mlx5e_priv *priv)
1090{
acff797c 1091 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1092
1093 tc->ht_params = mlx5e_tc_flow_ht_params;
1094 return rhashtable_init(&tc->ht, &tc->ht_params);
1095}
1096
1097static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1098{
1099 struct mlx5e_tc_flow *flow = ptr;
1100 struct mlx5e_priv *priv = arg;
1101
961e8979 1102 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1103 kfree(flow);
1104}
1105
1106void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1107{
acff797c 1108 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1109
1110 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1111
acff797c
MG
1112 if (!IS_ERR_OR_NULL(tc->t)) {
1113 mlx5_destroy_flow_table(tc->t);
1114 tc->t = NULL;
e8f887ac
AV
1115 }
1116}