]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
Merge branch 'rhashtable-allocation-failure-during-insertion'
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac
AV
50
51struct mlx5e_tc_flow {
52 struct rhash_head node;
53 u64 cookie;
74491de9 54 struct mlx5_flow_handle *rule;
a54e20b4 55 struct list_head encap; /* flows sharing the same encap */
776b12b6 56 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
57};
58
a54e20b4
HHZ
59enum {
60 MLX5_HEADER_TYPE_VXLAN = 0x0,
61 MLX5_HEADER_TYPE_NVGRE = 0x1,
62};
63
acff797c
MG
64#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 66
74491de9
MB
67static struct mlx5_flow_handle *
68mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69 struct mlx5_flow_spec *spec,
70 u32 action, u32 flow_tag)
e8f887ac 71{
aad7e08d
AV
72 struct mlx5_core_dev *dev = priv->mdev;
73 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
74 struct mlx5_flow_act flow_act = {
75 .action = action,
76 .flow_tag = flow_tag,
77 .encap_id = 0,
78 };
aad7e08d 79 struct mlx5_fc *counter = NULL;
74491de9 80 struct mlx5_flow_handle *rule;
e8f887ac
AV
81 bool table_created = false;
82
aad7e08d
AV
83 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85 dest.ft = priv->fs.vlan.ft.t;
55130287 86 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
87 counter = mlx5_fc_create(dev, true);
88 if (IS_ERR(counter))
89 return ERR_CAST(counter);
90
91 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92 dest.counter = counter;
93 }
94
acff797c
MG
95 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96 priv->fs.tc.t =
97 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98 MLX5E_TC_PRIO,
99 MLX5E_TC_TABLE_NUM_ENTRIES,
100 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 101 0, 0);
acff797c 102 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
103 netdev_err(priv->netdev,
104 "Failed to create tc offload table\n");
aad7e08d
AV
105 rule = ERR_CAST(priv->fs.tc.t);
106 goto err_create_ft;
e8f887ac
AV
107 }
108
109 table_created = true;
110 }
111
c5bb1730 112 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 113 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
114
115 if (IS_ERR(rule))
116 goto err_add_rule;
117
118 return rule;
e8f887ac 119
aad7e08d
AV
120err_add_rule:
121 if (table_created) {
acff797c
MG
122 mlx5_destroy_flow_table(priv->fs.tc.t);
123 priv->fs.tc.t = NULL;
e8f887ac 124 }
aad7e08d
AV
125err_create_ft:
126 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
127
128 return rule;
129}
130
74491de9
MB
131static struct mlx5_flow_handle *
132mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133 struct mlx5_flow_spec *spec,
134 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
135{
136 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
137 int err;
138
139 err = mlx5_eswitch_add_vlan_action(esw, attr);
140 if (err)
141 return ERR_PTR(err);
adb4c123 142
776b12b6 143 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
144}
145
5067b602
RD
146static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147 struct mlx5e_tc_flow *flow) {
148 struct list_head *next = flow->encap.next;
149
150 list_del(&flow->encap);
151 if (list_empty(next)) {
152 struct mlx5_encap_entry *e;
153
154 e = list_entry(next, struct mlx5_encap_entry, flows);
155 if (e->n) {
156 mlx5_encap_dealloc(priv->mdev, e->encap_id);
157 neigh_release(e->n);
158 }
159 hlist_del_rcu(&e->encap_hlist);
160 kfree(e);
161 }
162}
163
5e86397a
OG
164/* we get here also when setting rule to the FW failed, etc. It means that the
165 * flow rule itself might not exist, but some offloading related to the actions
166 * should be cleaned.
167 */
e8f887ac 168static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 169 struct mlx5e_tc_flow *flow)
e8f887ac 170{
8b32580d 171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aad7e08d
AV
172 struct mlx5_fc *counter = NULL;
173
5e86397a
OG
174 if (!IS_ERR(flow->rule)) {
175 counter = mlx5_flow_rule_counter(flow->rule);
176 mlx5_del_flow_rules(flow->rule);
177 mlx5_fc_destroy(priv->mdev, counter);
178 }
86a33ae1 179
5067b602 180 if (esw && esw->mode == SRIOV_OFFLOADS) {
961e8979 181 mlx5_eswitch_del_vlan_action(esw, flow->attr);
5067b602
RD
182 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
183 mlx5e_detach_encap(priv, flow);
184 }
8b32580d 185
5c40348c 186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
e8f887ac
AV
189 }
190}
191
bbd00f7e
HHZ
192static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
193 struct tc_cls_flower_offload *f)
194{
195 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196 outer_headers);
197 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198 outer_headers);
199 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
200 misc_parameters);
201 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
202 misc_parameters);
203
204 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
205 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
206
207 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
208 struct flow_dissector_key_keyid *key =
209 skb_flow_dissector_target(f->dissector,
210 FLOW_DISSECTOR_KEY_ENC_KEYID,
211 f->key);
212 struct flow_dissector_key_keyid *mask =
213 skb_flow_dissector_target(f->dissector,
214 FLOW_DISSECTOR_KEY_ENC_KEYID,
215 f->mask);
216 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
217 be32_to_cpu(mask->keyid));
218 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
219 be32_to_cpu(key->keyid));
220 }
221}
222
223static int parse_tunnel_attr(struct mlx5e_priv *priv,
224 struct mlx5_flow_spec *spec,
225 struct tc_cls_flower_offload *f)
226{
227 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
228 outer_headers);
229 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
230 outer_headers);
231
2e72eb43
OG
232 struct flow_dissector_key_control *enc_control =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_CONTROL,
235 f->key);
236
bbd00f7e
HHZ
237 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
238 struct flow_dissector_key_ports *key =
239 skb_flow_dissector_target(f->dissector,
240 FLOW_DISSECTOR_KEY_ENC_PORTS,
241 f->key);
242 struct flow_dissector_key_ports *mask =
243 skb_flow_dissector_target(f->dissector,
244 FLOW_DISSECTOR_KEY_ENC_PORTS,
245 f->mask);
246
247 /* Full udp dst port must be given */
248 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 249 goto vxlan_match_offload_err;
bbd00f7e 250
bbd00f7e
HHZ
251 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
252 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
253 parse_vxlan_attr(spec, f);
2fcd82e9
OG
254 else {
255 netdev_warn(priv->netdev,
256 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 257 return -EOPNOTSUPP;
2fcd82e9 258 }
bbd00f7e
HHZ
259
260 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
261 udp_dport, ntohs(mask->dst));
262 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
263 udp_dport, ntohs(key->dst));
264
cd377663
OG
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 udp_sport, ntohs(mask->src));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 udp_sport, ntohs(key->src));
bbd00f7e 269 } else { /* udp dst port must be given */
2fcd82e9
OG
270vxlan_match_offload_err:
271 netdev_warn(priv->netdev,
272 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273 return -EOPNOTSUPP;
bbd00f7e
HHZ
274 }
275
2e72eb43 276 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
277 struct flow_dissector_key_ipv4_addrs *key =
278 skb_flow_dissector_target(f->dissector,
279 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280 f->key);
281 struct flow_dissector_key_ipv4_addrs *mask =
282 skb_flow_dissector_target(f->dissector,
283 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
284 f->mask);
285 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286 src_ipv4_src_ipv6.ipv4_layout.ipv4,
287 ntohl(mask->src));
288 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
289 src_ipv4_src_ipv6.ipv4_layout.ipv4,
290 ntohl(key->src));
291
292 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
294 ntohl(mask->dst));
295 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
296 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
297 ntohl(key->dst));
bbd00f7e 298
2e72eb43
OG
299 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
300 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
301 }
bbd00f7e
HHZ
302
303 /* Enforce DMAC when offloading incoming tunneled flows.
304 * Flow counters require a match on the DMAC.
305 */
306 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
307 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
308 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
309 dmac_47_16), priv->netdev->dev_addr);
310
311 /* let software handle IP fragments */
312 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
313 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
314
315 return 0;
316}
317
de0af0bf
RD
318static int __parse_cls_flower(struct mlx5e_priv *priv,
319 struct mlx5_flow_spec *spec,
320 struct tc_cls_flower_offload *f,
321 u8 *min_inline)
e3a2b7ed 322{
c5bb1730
MG
323 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
324 outer_headers);
325 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
326 outer_headers);
e3a2b7ed
AV
327 u16 addr_type = 0;
328 u8 ip_proto = 0;
329
de0af0bf
RD
330 *min_inline = MLX5_INLINE_MODE_L2;
331
e3a2b7ed
AV
332 if (f->dissector->used_keys &
333 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
334 BIT(FLOW_DISSECTOR_KEY_BASIC) |
335 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 336 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
337 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
338 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
339 BIT(FLOW_DISSECTOR_KEY_PORTS) |
340 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
341 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
342 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
343 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
344 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
345 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
346 f->dissector->used_keys);
347 return -EOPNOTSUPP;
348 }
349
bbd00f7e
HHZ
350 if ((dissector_uses_key(f->dissector,
351 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
352 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
353 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
354 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
355 struct flow_dissector_key_control *key =
356 skb_flow_dissector_target(f->dissector,
357 FLOW_DISSECTOR_KEY_ENC_CONTROL,
358 f->key);
359 switch (key->addr_type) {
360 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
361 if (parse_tunnel_attr(priv, spec, f))
362 return -EOPNOTSUPP;
363 break;
2fcd82e9
OG
364 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
365 netdev_warn(priv->netdev,
366 "IPv6 tunnel decap offload isn't supported\n");
bbd00f7e
HHZ
367 default:
368 return -EOPNOTSUPP;
369 }
370
371 /* In decap flow, header pointers should point to the inner
372 * headers, outer header were already set by parse_tunnel_attr
373 */
374 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
375 inner_headers);
376 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
377 inner_headers);
378 }
379
e3a2b7ed
AV
380 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
381 struct flow_dissector_key_control *key =
382 skb_flow_dissector_target(f->dissector,
1dbd0d37 383 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 384 f->key);
3f7d0eb4
OG
385
386 struct flow_dissector_key_control *mask =
387 skb_flow_dissector_target(f->dissector,
388 FLOW_DISSECTOR_KEY_CONTROL,
389 f->mask);
e3a2b7ed 390 addr_type = key->addr_type;
3f7d0eb4
OG
391
392 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
393 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
394 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
395 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
396
397 /* the HW doesn't need L3 inline to match on frag=no */
398 if (key->flags & FLOW_DIS_IS_FRAGMENT)
399 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 400 }
e3a2b7ed
AV
401 }
402
403 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
404 struct flow_dissector_key_basic *key =
405 skb_flow_dissector_target(f->dissector,
406 FLOW_DISSECTOR_KEY_BASIC,
407 f->key);
408 struct flow_dissector_key_basic *mask =
409 skb_flow_dissector_target(f->dissector,
410 FLOW_DISSECTOR_KEY_BASIC,
411 f->mask);
412 ip_proto = key->ip_proto;
413
414 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
415 ntohs(mask->n_proto));
416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
417 ntohs(key->n_proto));
418
419 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
420 mask->ip_proto);
421 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
422 key->ip_proto);
de0af0bf
RD
423
424 if (mask->ip_proto)
425 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
426 }
427
428 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
429 struct flow_dissector_key_eth_addrs *key =
430 skb_flow_dissector_target(f->dissector,
431 FLOW_DISSECTOR_KEY_ETH_ADDRS,
432 f->key);
433 struct flow_dissector_key_eth_addrs *mask =
434 skb_flow_dissector_target(f->dissector,
435 FLOW_DISSECTOR_KEY_ETH_ADDRS,
436 f->mask);
437
438 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
439 dmac_47_16),
440 mask->dst);
441 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
442 dmac_47_16),
443 key->dst);
444
445 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
446 smac_47_16),
447 mask->src);
448 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
449 smac_47_16),
450 key->src);
451 }
452
095b6cfd
OG
453 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
454 struct flow_dissector_key_vlan *key =
455 skb_flow_dissector_target(f->dissector,
456 FLOW_DISSECTOR_KEY_VLAN,
457 f->key);
458 struct flow_dissector_key_vlan *mask =
459 skb_flow_dissector_target(f->dissector,
460 FLOW_DISSECTOR_KEY_VLAN,
461 f->mask);
358d79a4 462 if (mask->vlan_id || mask->vlan_priority) {
095b6cfd
OG
463 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
464 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
465
466 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
467 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
468
469 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
470 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
471 }
472 }
473
e3a2b7ed
AV
474 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
475 struct flow_dissector_key_ipv4_addrs *key =
476 skb_flow_dissector_target(f->dissector,
477 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
478 f->key);
479 struct flow_dissector_key_ipv4_addrs *mask =
480 skb_flow_dissector_target(f->dissector,
481 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
482 f->mask);
483
484 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
485 src_ipv4_src_ipv6.ipv4_layout.ipv4),
486 &mask->src, sizeof(mask->src));
487 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
488 src_ipv4_src_ipv6.ipv4_layout.ipv4),
489 &key->src, sizeof(key->src));
490 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
491 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
492 &mask->dst, sizeof(mask->dst));
493 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
494 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
495 &key->dst, sizeof(key->dst));
de0af0bf
RD
496
497 if (mask->src || mask->dst)
498 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
499 }
500
501 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
502 struct flow_dissector_key_ipv6_addrs *key =
503 skb_flow_dissector_target(f->dissector,
504 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
505 f->key);
506 struct flow_dissector_key_ipv6_addrs *mask =
507 skb_flow_dissector_target(f->dissector,
508 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
509 f->mask);
510
511 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
512 src_ipv4_src_ipv6.ipv6_layout.ipv6),
513 &mask->src, sizeof(mask->src));
514 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
515 src_ipv4_src_ipv6.ipv6_layout.ipv6),
516 &key->src, sizeof(key->src));
517
518 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
519 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
520 &mask->dst, sizeof(mask->dst));
521 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
522 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
523 &key->dst, sizeof(key->dst));
de0af0bf
RD
524
525 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
526 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
527 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
528 }
529
530 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
531 struct flow_dissector_key_ports *key =
532 skb_flow_dissector_target(f->dissector,
533 FLOW_DISSECTOR_KEY_PORTS,
534 f->key);
535 struct flow_dissector_key_ports *mask =
536 skb_flow_dissector_target(f->dissector,
537 FLOW_DISSECTOR_KEY_PORTS,
538 f->mask);
539 switch (ip_proto) {
540 case IPPROTO_TCP:
541 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
542 tcp_sport, ntohs(mask->src));
543 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
544 tcp_sport, ntohs(key->src));
545
546 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
547 tcp_dport, ntohs(mask->dst));
548 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
549 tcp_dport, ntohs(key->dst));
550 break;
551
552 case IPPROTO_UDP:
553 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
554 udp_sport, ntohs(mask->src));
555 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
556 udp_sport, ntohs(key->src));
557
558 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
559 udp_dport, ntohs(mask->dst));
560 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
561 udp_dport, ntohs(key->dst));
562 break;
563 default:
564 netdev_err(priv->netdev,
565 "Only UDP and TCP transport are supported\n");
566 return -EINVAL;
567 }
de0af0bf
RD
568
569 if (mask->src || mask->dst)
570 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
571 }
572
573 return 0;
574}
575
de0af0bf
RD
576static int parse_cls_flower(struct mlx5e_priv *priv,
577 struct mlx5_flow_spec *spec,
578 struct tc_cls_flower_offload *f)
579{
580 struct mlx5_core_dev *dev = priv->mdev;
581 struct mlx5_eswitch *esw = dev->priv.eswitch;
582 struct mlx5_eswitch_rep *rep = priv->ppriv;
583 u8 min_inline;
584 int err;
585
586 err = __parse_cls_flower(priv, spec, f, &min_inline);
587
588 if (!err && esw->mode == SRIOV_OFFLOADS &&
589 rep->vport != FDB_UPLINK_VPORT) {
590 if (min_inline > esw->offloads.inline_mode) {
591 netdev_warn(priv->netdev,
592 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
593 min_inline, esw->offloads.inline_mode);
594 return -EOPNOTSUPP;
595 }
596 }
597
598 return err;
599}
600
5c40348c
OG
601static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
602 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
603{
604 const struct tc_action *a;
22dc13c8 605 LIST_HEAD(actions);
e3a2b7ed
AV
606
607 if (tc_no_actions(exts))
608 return -EINVAL;
609
610 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
611 *action = 0;
612
22dc13c8
WC
613 tcf_exts_to_list(exts, &actions);
614 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
615 /* Only support a single action per rule */
616 if (*action)
617 return -EINVAL;
618
619 if (is_tcf_gact_shot(a)) {
620 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
621 if (MLX5_CAP_FLOWTABLE(priv->mdev,
622 flow_table_properties_nic_receive.flow_counter))
623 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
624 continue;
625 }
626
627 if (is_tcf_skbedit_mark(a)) {
628 u32 mark = tcf_skbedit_mark(a);
629
630 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
631 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
632 mark);
633 return -EINVAL;
634 }
635
636 *flow_tag = mark;
637 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
638 continue;
639 }
640
641 return -EINVAL;
642 }
643
644 return 0;
645}
646
a54e20b4
HHZ
647static inline int cmp_encap_info(struct mlx5_encap_info *a,
648 struct mlx5_encap_info *b)
649{
650 return memcmp(a, b, sizeof(*a));
651}
652
653static inline int hash_encap_info(struct mlx5_encap_info *info)
654{
655 return jhash(info, sizeof(*info), 0);
656}
657
658static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
659 struct net_device *mirred_dev,
660 struct net_device **out_dev,
661 struct flowi4 *fl4,
662 struct neighbour **out_n,
663 __be32 *saddr,
664 int *out_ttl)
665{
3e621b19 666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
667 struct rtable *rt;
668 struct neighbour *n = NULL;
669 int ttl;
670
671#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
672 int ret;
673
a54e20b4 674 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
675 ret = PTR_ERR_OR_ZERO(rt);
676 if (ret)
677 return ret;
a54e20b4
HHZ
678#else
679 return -EOPNOTSUPP;
680#endif
3e621b19
HHZ
681 /* if the egress device isn't on the same HW e-switch, we use the uplink */
682 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
683 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
684 else
685 *out_dev = rt->dst.dev;
a54e20b4
HHZ
686
687 ttl = ip4_dst_hoplimit(&rt->dst);
688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
689 ip_rt_put(rt);
690 if (!n)
691 return -ENOMEM;
692
693 *out_n = n;
694 *saddr = fl4->saddr;
695 *out_ttl = ttl;
a54e20b4
HHZ
696
697 return 0;
698}
699
700static int gen_vxlan_header_ipv4(struct net_device *out_dev,
701 char buf[],
702 unsigned char h_dest[ETH_ALEN],
703 int ttl,
704 __be32 daddr,
705 __be32 saddr,
706 __be16 udp_dst_port,
707 __be32 vx_vni)
708{
709 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
710 struct ethhdr *eth = (struct ethhdr *)buf;
711 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
712 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
713 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
714
715 memset(buf, 0, encap_size);
716
717 ether_addr_copy(eth->h_dest, h_dest);
718 ether_addr_copy(eth->h_source, out_dev->dev_addr);
719 eth->h_proto = htons(ETH_P_IP);
720
721 ip->daddr = daddr;
722 ip->saddr = saddr;
723
724 ip->ttl = ttl;
725 ip->protocol = IPPROTO_UDP;
726 ip->version = 0x4;
727 ip->ihl = 0x5;
728
729 udp->dest = udp_dst_port;
730 vxh->vx_flags = VXLAN_HF_VNI;
731 vxh->vx_vni = vxlan_vni_field(vx_vni);
732
733 return encap_size;
734}
735
736static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
737 struct net_device *mirred_dev,
738 struct mlx5_encap_entry *e,
739 struct net_device **out_dev)
740{
741 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
a42485eb 742 struct neighbour *n = NULL;
a54e20b4 743 struct flowi4 fl4 = {};
a54e20b4
HHZ
744 char *encap_header;
745 int encap_size;
abeffce9
AB
746 __be32 saddr;
747 int ttl;
a54e20b4
HHZ
748 int err;
749
750 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
751 if (!encap_header)
752 return -ENOMEM;
753
754 switch (e->tunnel_type) {
755 case MLX5_HEADER_TYPE_VXLAN:
756 fl4.flowi4_proto = IPPROTO_UDP;
757 fl4.fl4_dport = e->tun_info.tp_dst;
758 break;
759 default:
760 err = -EOPNOTSUPP;
761 goto out;
762 }
763 fl4.daddr = e->tun_info.daddr;
764
765 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
766 &fl4, &n, &saddr, &ttl);
767 if (err)
768 goto out;
769
770 e->n = n;
771 e->out_dev = *out_dev;
772
773 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
774 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
775 err = -EOPNOTSUPP;
a54e20b4
HHZ
776 goto out;
777 }
778
779 neigh_ha_snapshot(e->h_dest, n, *out_dev);
780
781 switch (e->tunnel_type) {
782 case MLX5_HEADER_TYPE_VXLAN:
783 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
784 e->h_dest, ttl,
785 e->tun_info.daddr,
786 saddr, e->tun_info.tp_dst,
787 e->tun_info.tun_id);
788 break;
789 default:
790 err = -EOPNOTSUPP;
791 goto out;
792 }
793
794 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
795 encap_size, encap_header, &e->encap_id);
796out:
a42485eb
OG
797 if (err && n)
798 neigh_release(n);
a54e20b4
HHZ
799 kfree(encap_header);
800 return err;
801}
802
803static int mlx5e_attach_encap(struct mlx5e_priv *priv,
804 struct ip_tunnel_info *tun_info,
805 struct net_device *mirred_dev,
806 struct mlx5_esw_flow_attr *attr)
807{
808 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
809 unsigned short family = ip_tunnel_info_af(tun_info);
810 struct ip_tunnel_key *key = &tun_info->key;
811 struct mlx5_encap_info info;
812 struct mlx5_encap_entry *e;
813 struct net_device *out_dev;
814 uintptr_t hash_key;
815 bool found = false;
816 int tunnel_type;
817 int err;
818
2fcd82e9 819 /* udp dst port must be set */
a54e20b4 820 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 821 goto vxlan_encap_offload_err;
a54e20b4 822
cd377663 823 /* setting udp src port isn't supported */
2fcd82e9
OG
824 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
825vxlan_encap_offload_err:
826 netdev_warn(priv->netdev,
827 "must set udp dst port and not set udp src port\n");
cd377663 828 return -EOPNOTSUPP;
2fcd82e9 829 }
cd377663 830
a54e20b4
HHZ
831 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
832 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
833 info.tp_dst = key->tp_dst;
834 info.tun_id = tunnel_id_to_key32(key->tun_id);
835 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
836 } else {
2fcd82e9
OG
837 netdev_warn(priv->netdev,
838 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
839 return -EOPNOTSUPP;
840 }
841
842 switch (family) {
843 case AF_INET:
844 info.daddr = key->u.ipv4.dst;
845 break;
2fcd82e9
OG
846 case AF_INET6:
847 netdev_warn(priv->netdev,
848 "IPv6 tunnel encap offload isn't supported\n");
a54e20b4
HHZ
849 default:
850 return -EOPNOTSUPP;
851 }
852
853 hash_key = hash_encap_info(&info);
854
855 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
856 encap_hlist, hash_key) {
857 if (!cmp_encap_info(&e->tun_info, &info)) {
858 found = true;
859 break;
860 }
861 }
862
863 if (found) {
864 attr->encap = e;
865 return 0;
866 }
867
868 e = kzalloc(sizeof(*e), GFP_KERNEL);
869 if (!e)
870 return -ENOMEM;
871
872 e->tun_info = info;
873 e->tunnel_type = tunnel_type;
874 INIT_LIST_HEAD(&e->flows);
875
876 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
877 if (err)
878 goto out_err;
879
880 attr->encap = e;
881 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
882
883 return err;
884
885out_err:
886 kfree(e);
887 return err;
888}
889
03a9d11e 890static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 891 struct mlx5e_tc_flow *flow)
03a9d11e 892{
a54e20b4
HHZ
893 struct mlx5_esw_flow_attr *attr = flow->attr;
894 struct ip_tunnel_info *info = NULL;
03a9d11e 895 const struct tc_action *a;
22dc13c8 896 LIST_HEAD(actions);
a54e20b4
HHZ
897 bool encap = false;
898 int err;
03a9d11e
OG
899
900 if (tc_no_actions(exts))
901 return -EINVAL;
902
776b12b6
OG
903 memset(attr, 0, sizeof(*attr));
904 attr->in_rep = priv->ppriv;
03a9d11e 905
22dc13c8
WC
906 tcf_exts_to_list(exts, &actions);
907 list_for_each_entry(a, &actions, list) {
03a9d11e 908 if (is_tcf_gact_shot(a)) {
8b32580d
OG
909 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
910 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
911 continue;
912 }
913
5724b8b5 914 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
915 int ifindex = tcf_mirred_ifindex(a);
916 struct net_device *out_dev;
917 struct mlx5e_priv *out_priv;
03a9d11e
OG
918
919 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
920
a54e20b4
HHZ
921 if (switchdev_port_same_parent_id(priv->netdev,
922 out_dev)) {
923 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
924 MLX5_FLOW_CONTEXT_ACTION_COUNT;
925 out_priv = netdev_priv(out_dev);
926 attr->out_rep = out_priv->ppriv;
927 } else if (encap) {
928 err = mlx5e_attach_encap(priv, info,
929 out_dev, attr);
930 if (err)
931 return err;
932 list_add(&flow->encap, &attr->encap->flows);
933 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
934 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
935 MLX5_FLOW_CONTEXT_ACTION_COUNT;
936 out_priv = netdev_priv(attr->encap->out_dev);
937 attr->out_rep = out_priv->ppriv;
938 } else {
03a9d11e
OG
939 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
940 priv->netdev->name, out_dev->name);
941 return -EINVAL;
942 }
a54e20b4
HHZ
943 continue;
944 }
03a9d11e 945
a54e20b4
HHZ
946 if (is_tcf_tunnel_set(a)) {
947 info = tcf_tunnel_info(a);
948 if (info)
949 encap = true;
950 else
951 return -EOPNOTSUPP;
03a9d11e
OG
952 continue;
953 }
954
8b32580d
OG
955 if (is_tcf_vlan(a)) {
956 if (tcf_vlan_action(a) == VLAN_F_POP) {
957 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
958 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
959 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
960 return -EOPNOTSUPP;
961
962 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
963 attr->vlan = tcf_vlan_push_vid(a);
964 }
965 continue;
966 }
967
bbd00f7e
HHZ
968 if (is_tcf_tunnel_release(a)) {
969 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
970 continue;
971 }
972
03a9d11e
OG
973 return -EINVAL;
974 }
975 return 0;
976}
977
e3a2b7ed
AV
978int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
979 struct tc_cls_flower_offload *f)
980{
acff797c 981 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed 982 int err = 0;
776b12b6
OG
983 bool fdb_flow = false;
984 u32 flow_tag, action;
e3a2b7ed 985 struct mlx5e_tc_flow *flow;
c5bb1730 986 struct mlx5_flow_spec *spec;
adb4c123 987 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
e3a2b7ed 988
776b12b6
OG
989 if (esw && esw->mode == SRIOV_OFFLOADS)
990 fdb_flow = true;
991
53636068
RD
992 if (fdb_flow)
993 flow = kzalloc(sizeof(*flow) +
994 sizeof(struct mlx5_esw_flow_attr),
995 GFP_KERNEL);
996 else
997 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
e3a2b7ed 998
c5bb1730
MG
999 spec = mlx5_vzalloc(sizeof(*spec));
1000 if (!spec || !flow) {
e3a2b7ed
AV
1001 err = -ENOMEM;
1002 goto err_free;
1003 }
1004
1005 flow->cookie = f->cookie;
1006
c5bb1730 1007 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
1008 if (err < 0)
1009 goto err_free;
1010
776b12b6
OG
1011 if (fdb_flow) {
1012 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 1013 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1014 if (err < 0)
1015 goto err_free;
776b12b6 1016 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
1017 } else {
1018 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1019 if (err < 0)
1020 goto err_free;
1021 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1022 }
e3a2b7ed 1023
e3a2b7ed
AV
1024 if (IS_ERR(flow->rule)) {
1025 err = PTR_ERR(flow->rule);
5e86397a 1026 goto err_del_rule;
e3a2b7ed
AV
1027 }
1028
5c40348c
OG
1029 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1030 tc->ht_params);
1031 if (err)
1032 goto err_del_rule;
1033
e3a2b7ed
AV
1034 goto out;
1035
5c40348c 1036err_del_rule:
5e86397a 1037 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1038
1039err_free:
53636068 1040 kfree(flow);
e3a2b7ed 1041out:
c5bb1730 1042 kvfree(spec);
e3a2b7ed
AV
1043 return err;
1044}
1045
1046int mlx5e_delete_flower(struct mlx5e_priv *priv,
1047 struct tc_cls_flower_offload *f)
1048{
1049 struct mlx5e_tc_flow *flow;
acff797c 1050 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1051
1052 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1053 tc->ht_params);
1054 if (!flow)
1055 return -EINVAL;
1056
1057 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1058
961e8979 1059 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1060
a54e20b4 1061
e3a2b7ed
AV
1062 kfree(flow);
1063
1064 return 0;
1065}
1066
aad7e08d
AV
1067int mlx5e_stats_flower(struct mlx5e_priv *priv,
1068 struct tc_cls_flower_offload *f)
1069{
1070 struct mlx5e_tc_table *tc = &priv->fs.tc;
1071 struct mlx5e_tc_flow *flow;
1072 struct tc_action *a;
1073 struct mlx5_fc *counter;
22dc13c8 1074 LIST_HEAD(actions);
aad7e08d
AV
1075 u64 bytes;
1076 u64 packets;
1077 u64 lastuse;
1078
1079 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1080 tc->ht_params);
1081 if (!flow)
1082 return -EINVAL;
1083
1084 counter = mlx5_flow_rule_counter(flow->rule);
1085 if (!counter)
1086 return 0;
1087
1088 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1089
22dc13c8
WC
1090 tcf_exts_to_list(f->exts, &actions);
1091 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1092 tcf_action_stats_update(a, bytes, packets, lastuse);
1093
1094 return 0;
1095}
1096
e8f887ac
AV
1097static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1098 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1099 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1100 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1101 .automatic_shrinking = true,
1102};
1103
1104int mlx5e_tc_init(struct mlx5e_priv *priv)
1105{
acff797c 1106 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1107
1108 tc->ht_params = mlx5e_tc_flow_ht_params;
1109 return rhashtable_init(&tc->ht, &tc->ht_params);
1110}
1111
1112static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1113{
1114 struct mlx5e_tc_flow *flow = ptr;
1115 struct mlx5e_priv *priv = arg;
1116
961e8979 1117 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1118 kfree(flow);
1119}
1120
1121void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1122{
acff797c 1123 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1124
1125 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1126
acff797c
MG
1127 if (!IS_ERR_OR_NULL(tc->t)) {
1128 mlx5_destroy_flow_table(tc->t);
1129 tc->t = NULL;
e8f887ac
AV
1130 }
1131}