]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Add ndo_udp_tunnel_add to VF representors
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed
AV
33#include <net/flow_dissector.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
12185a9f 36#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
37#include <linux/mlx5/fs.h>
38#include <linux/mlx5/device.h>
39#include <linux/rhashtable.h>
03a9d11e
OG
40#include <net/switchdev.h>
41#include <net/tc_act/tc_mirred.h>
776b12b6 42#include <net/tc_act/tc_vlan.h>
bbd00f7e 43#include <net/tc_act/tc_tunnel_key.h>
e8f887ac
AV
44#include "en.h"
45#include "en_tc.h"
03a9d11e 46#include "eswitch.h"
bbd00f7e 47#include "vxlan.h"
e8f887ac
AV
48
49struct mlx5e_tc_flow {
50 struct rhash_head node;
51 u64 cookie;
74491de9 52 struct mlx5_flow_handle *rule;
776b12b6 53 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
54};
55
acff797c
MG
56#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
57#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 58
74491de9
MB
59static struct mlx5_flow_handle *
60mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
61 struct mlx5_flow_spec *spec,
62 u32 action, u32 flow_tag)
e8f887ac 63{
aad7e08d
AV
64 struct mlx5_core_dev *dev = priv->mdev;
65 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
66 struct mlx5_flow_act flow_act = {
67 .action = action,
68 .flow_tag = flow_tag,
69 .encap_id = 0,
70 };
aad7e08d 71 struct mlx5_fc *counter = NULL;
74491de9 72 struct mlx5_flow_handle *rule;
e8f887ac
AV
73 bool table_created = false;
74
aad7e08d
AV
75 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
76 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
77 dest.ft = priv->fs.vlan.ft.t;
55130287 78 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
79 counter = mlx5_fc_create(dev, true);
80 if (IS_ERR(counter))
81 return ERR_CAST(counter);
82
83 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
84 dest.counter = counter;
85 }
86
acff797c
MG
87 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
88 priv->fs.tc.t =
89 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
90 MLX5E_TC_PRIO,
91 MLX5E_TC_TABLE_NUM_ENTRIES,
92 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 93 0, 0);
acff797c 94 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
95 netdev_err(priv->netdev,
96 "Failed to create tc offload table\n");
aad7e08d
AV
97 rule = ERR_CAST(priv->fs.tc.t);
98 goto err_create_ft;
e8f887ac
AV
99 }
100
101 table_created = true;
102 }
103
c5bb1730 104 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 105 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
106
107 if (IS_ERR(rule))
108 goto err_add_rule;
109
110 return rule;
e8f887ac 111
aad7e08d
AV
112err_add_rule:
113 if (table_created) {
acff797c
MG
114 mlx5_destroy_flow_table(priv->fs.tc.t);
115 priv->fs.tc.t = NULL;
e8f887ac 116 }
aad7e08d
AV
117err_create_ft:
118 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
119
120 return rule;
121}
122
74491de9
MB
123static struct mlx5_flow_handle *
124mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
125 struct mlx5_flow_spec *spec,
126 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
127{
128 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
129 int err;
130
131 err = mlx5_eswitch_add_vlan_action(esw, attr);
132 if (err)
133 return ERR_PTR(err);
adb4c123 134
776b12b6 135 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
136}
137
e8f887ac 138static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
74491de9 139 struct mlx5_flow_handle *rule,
8b32580d 140 struct mlx5_esw_flow_attr *attr)
e8f887ac 141{
8b32580d 142 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aad7e08d
AV
143 struct mlx5_fc *counter = NULL;
144
145 counter = mlx5_flow_rule_counter(rule);
146
8b32580d
OG
147 if (esw && esw->mode == SRIOV_OFFLOADS)
148 mlx5_eswitch_del_vlan_action(esw, attr);
149
74491de9 150 mlx5_del_flow_rules(rule);
e8f887ac 151
aad7e08d
AV
152 mlx5_fc_destroy(priv->mdev, counter);
153
5c40348c 154 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
155 mlx5_destroy_flow_table(priv->fs.tc.t);
156 priv->fs.tc.t = NULL;
e8f887ac
AV
157 }
158}
159
bbd00f7e
HHZ
160static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
161 struct tc_cls_flower_offload *f)
162{
163 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
164 outer_headers);
165 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
166 outer_headers);
167 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
168 misc_parameters);
169 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
170 misc_parameters);
171
172 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
173 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
174
175 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
176 struct flow_dissector_key_keyid *key =
177 skb_flow_dissector_target(f->dissector,
178 FLOW_DISSECTOR_KEY_ENC_KEYID,
179 f->key);
180 struct flow_dissector_key_keyid *mask =
181 skb_flow_dissector_target(f->dissector,
182 FLOW_DISSECTOR_KEY_ENC_KEYID,
183 f->mask);
184 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
185 be32_to_cpu(mask->keyid));
186 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
187 be32_to_cpu(key->keyid));
188 }
189}
190
191static int parse_tunnel_attr(struct mlx5e_priv *priv,
192 struct mlx5_flow_spec *spec,
193 struct tc_cls_flower_offload *f)
194{
195 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196 outer_headers);
197 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198 outer_headers);
199
200 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
201 struct flow_dissector_key_ports *key =
202 skb_flow_dissector_target(f->dissector,
203 FLOW_DISSECTOR_KEY_ENC_PORTS,
204 f->key);
205 struct flow_dissector_key_ports *mask =
206 skb_flow_dissector_target(f->dissector,
207 FLOW_DISSECTOR_KEY_ENC_PORTS,
208 f->mask);
209
210 /* Full udp dst port must be given */
211 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
212 return -EOPNOTSUPP;
213
214 /* udp src port isn't supported */
215 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
216 return -EOPNOTSUPP;
217
218 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
219 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
220 parse_vxlan_attr(spec, f);
221 else
222 return -EOPNOTSUPP;
223
224 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
225 udp_dport, ntohs(mask->dst));
226 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
227 udp_dport, ntohs(key->dst));
228
229 } else { /* udp dst port must be given */
230 return -EOPNOTSUPP;
231 }
232
233 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
234 struct flow_dissector_key_ipv4_addrs *key =
235 skb_flow_dissector_target(f->dissector,
236 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
237 f->key);
238 struct flow_dissector_key_ipv4_addrs *mask =
239 skb_flow_dissector_target(f->dissector,
240 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
241 f->mask);
242 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
243 src_ipv4_src_ipv6.ipv4_layout.ipv4,
244 ntohl(mask->src));
245 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
246 src_ipv4_src_ipv6.ipv4_layout.ipv4,
247 ntohl(key->src));
248
249 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
251 ntohl(mask->dst));
252 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
254 ntohl(key->dst));
255 }
256
257 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
259
260 /* Enforce DMAC when offloading incoming tunneled flows.
261 * Flow counters require a match on the DMAC.
262 */
263 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
264 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
265 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
266 dmac_47_16), priv->netdev->dev_addr);
267
268 /* let software handle IP fragments */
269 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
270 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
271
272 return 0;
273}
274
c5bb1730 275static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
e3a2b7ed
AV
276 struct tc_cls_flower_offload *f)
277{
c5bb1730
MG
278 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
279 outer_headers);
280 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
281 outer_headers);
e3a2b7ed
AV
282 u16 addr_type = 0;
283 u8 ip_proto = 0;
284
285 if (f->dissector->used_keys &
286 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
287 BIT(FLOW_DISSECTOR_KEY_BASIC) |
288 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 289 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
290 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
291 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
292 BIT(FLOW_DISSECTOR_KEY_PORTS) |
293 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
294 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
295 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
296 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
297 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
298 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
299 f->dissector->used_keys);
300 return -EOPNOTSUPP;
301 }
302
bbd00f7e
HHZ
303 if ((dissector_uses_key(f->dissector,
304 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
305 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
306 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
307 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
308 struct flow_dissector_key_control *key =
309 skb_flow_dissector_target(f->dissector,
310 FLOW_DISSECTOR_KEY_ENC_CONTROL,
311 f->key);
312 switch (key->addr_type) {
313 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
314 if (parse_tunnel_attr(priv, spec, f))
315 return -EOPNOTSUPP;
316 break;
317 default:
318 return -EOPNOTSUPP;
319 }
320
321 /* In decap flow, header pointers should point to the inner
322 * headers, outer header were already set by parse_tunnel_attr
323 */
324 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
325 inner_headers);
326 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
327 inner_headers);
328 }
329
e3a2b7ed
AV
330 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
331 struct flow_dissector_key_control *key =
332 skb_flow_dissector_target(f->dissector,
1dbd0d37 333 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed
AV
334 f->key);
335 addr_type = key->addr_type;
336 }
337
338 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
339 struct flow_dissector_key_basic *key =
340 skb_flow_dissector_target(f->dissector,
341 FLOW_DISSECTOR_KEY_BASIC,
342 f->key);
343 struct flow_dissector_key_basic *mask =
344 skb_flow_dissector_target(f->dissector,
345 FLOW_DISSECTOR_KEY_BASIC,
346 f->mask);
347 ip_proto = key->ip_proto;
348
349 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
350 ntohs(mask->n_proto));
351 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
352 ntohs(key->n_proto));
353
354 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
355 mask->ip_proto);
356 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
357 key->ip_proto);
358 }
359
360 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
361 struct flow_dissector_key_eth_addrs *key =
362 skb_flow_dissector_target(f->dissector,
363 FLOW_DISSECTOR_KEY_ETH_ADDRS,
364 f->key);
365 struct flow_dissector_key_eth_addrs *mask =
366 skb_flow_dissector_target(f->dissector,
367 FLOW_DISSECTOR_KEY_ETH_ADDRS,
368 f->mask);
369
370 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
371 dmac_47_16),
372 mask->dst);
373 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
374 dmac_47_16),
375 key->dst);
376
377 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
378 smac_47_16),
379 mask->src);
380 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
381 smac_47_16),
382 key->src);
383 }
384
095b6cfd
OG
385 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
386 struct flow_dissector_key_vlan *key =
387 skb_flow_dissector_target(f->dissector,
388 FLOW_DISSECTOR_KEY_VLAN,
389 f->key);
390 struct flow_dissector_key_vlan *mask =
391 skb_flow_dissector_target(f->dissector,
392 FLOW_DISSECTOR_KEY_VLAN,
393 f->mask);
394 if (mask->vlan_id) {
395 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
396 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
397
398 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
399 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
400 }
401 }
402
e3a2b7ed
AV
403 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
404 struct flow_dissector_key_ipv4_addrs *key =
405 skb_flow_dissector_target(f->dissector,
406 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
407 f->key);
408 struct flow_dissector_key_ipv4_addrs *mask =
409 skb_flow_dissector_target(f->dissector,
410 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
411 f->mask);
412
413 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
414 src_ipv4_src_ipv6.ipv4_layout.ipv4),
415 &mask->src, sizeof(mask->src));
416 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
417 src_ipv4_src_ipv6.ipv4_layout.ipv4),
418 &key->src, sizeof(key->src));
419 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
420 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
421 &mask->dst, sizeof(mask->dst));
422 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
423 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
424 &key->dst, sizeof(key->dst));
425 }
426
427 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
428 struct flow_dissector_key_ipv6_addrs *key =
429 skb_flow_dissector_target(f->dissector,
430 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
431 f->key);
432 struct flow_dissector_key_ipv6_addrs *mask =
433 skb_flow_dissector_target(f->dissector,
434 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
435 f->mask);
436
437 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
438 src_ipv4_src_ipv6.ipv6_layout.ipv6),
439 &mask->src, sizeof(mask->src));
440 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
441 src_ipv4_src_ipv6.ipv6_layout.ipv6),
442 &key->src, sizeof(key->src));
443
444 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
445 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
446 &mask->dst, sizeof(mask->dst));
447 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
448 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
449 &key->dst, sizeof(key->dst));
450 }
451
452 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
453 struct flow_dissector_key_ports *key =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_PORTS,
456 f->key);
457 struct flow_dissector_key_ports *mask =
458 skb_flow_dissector_target(f->dissector,
459 FLOW_DISSECTOR_KEY_PORTS,
460 f->mask);
461 switch (ip_proto) {
462 case IPPROTO_TCP:
463 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
464 tcp_sport, ntohs(mask->src));
465 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
466 tcp_sport, ntohs(key->src));
467
468 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
469 tcp_dport, ntohs(mask->dst));
470 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
471 tcp_dport, ntohs(key->dst));
472 break;
473
474 case IPPROTO_UDP:
475 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
476 udp_sport, ntohs(mask->src));
477 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
478 udp_sport, ntohs(key->src));
479
480 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
481 udp_dport, ntohs(mask->dst));
482 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
483 udp_dport, ntohs(key->dst));
484 break;
485 default:
486 netdev_err(priv->netdev,
487 "Only UDP and TCP transport are supported\n");
488 return -EINVAL;
489 }
490 }
491
492 return 0;
493}
494
5c40348c
OG
495static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
496 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
497{
498 const struct tc_action *a;
22dc13c8 499 LIST_HEAD(actions);
e3a2b7ed
AV
500
501 if (tc_no_actions(exts))
502 return -EINVAL;
503
504 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
505 *action = 0;
506
22dc13c8
WC
507 tcf_exts_to_list(exts, &actions);
508 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
509 /* Only support a single action per rule */
510 if (*action)
511 return -EINVAL;
512
513 if (is_tcf_gact_shot(a)) {
514 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
515 if (MLX5_CAP_FLOWTABLE(priv->mdev,
516 flow_table_properties_nic_receive.flow_counter))
517 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
518 continue;
519 }
520
521 if (is_tcf_skbedit_mark(a)) {
522 u32 mark = tcf_skbedit_mark(a);
523
524 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
525 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
526 mark);
527 return -EINVAL;
528 }
529
530 *flow_tag = mark;
531 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
532 continue;
533 }
534
535 return -EINVAL;
536 }
537
538 return 0;
539}
540
03a9d11e 541static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
776b12b6 542 struct mlx5_esw_flow_attr *attr)
03a9d11e
OG
543{
544 const struct tc_action *a;
22dc13c8 545 LIST_HEAD(actions);
03a9d11e
OG
546
547 if (tc_no_actions(exts))
548 return -EINVAL;
549
776b12b6
OG
550 memset(attr, 0, sizeof(*attr));
551 attr->in_rep = priv->ppriv;
03a9d11e 552
22dc13c8
WC
553 tcf_exts_to_list(exts, &actions);
554 list_for_each_entry(a, &actions, list) {
03a9d11e 555 if (is_tcf_gact_shot(a)) {
8b32580d
OG
556 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
557 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
558 continue;
559 }
560
5724b8b5 561 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
562 int ifindex = tcf_mirred_ifindex(a);
563 struct net_device *out_dev;
564 struct mlx5e_priv *out_priv;
03a9d11e
OG
565
566 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
567
568 if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
569 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
570 priv->netdev->name, out_dev->name);
571 return -EINVAL;
572 }
573
e37a79e5
MB
574 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
575 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e 576 out_priv = netdev_priv(out_dev);
776b12b6 577 attr->out_rep = out_priv->ppriv;
03a9d11e
OG
578 continue;
579 }
580
8b32580d
OG
581 if (is_tcf_vlan(a)) {
582 if (tcf_vlan_action(a) == VLAN_F_POP) {
583 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
584 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
585 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
586 return -EOPNOTSUPP;
587
588 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
589 attr->vlan = tcf_vlan_push_vid(a);
590 }
591 continue;
592 }
593
bbd00f7e
HHZ
594 if (is_tcf_tunnel_release(a)) {
595 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
596 continue;
597 }
598
03a9d11e
OG
599 return -EINVAL;
600 }
601 return 0;
602}
603
e3a2b7ed
AV
604int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
605 struct tc_cls_flower_offload *f)
606{
acff797c 607 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed 608 int err = 0;
776b12b6
OG
609 bool fdb_flow = false;
610 u32 flow_tag, action;
e3a2b7ed 611 struct mlx5e_tc_flow *flow;
c5bb1730 612 struct mlx5_flow_spec *spec;
74491de9 613 struct mlx5_flow_handle *old = NULL;
d0debb76 614 struct mlx5_esw_flow_attr *old_attr = NULL;
adb4c123 615 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
e3a2b7ed 616
776b12b6
OG
617 if (esw && esw->mode == SRIOV_OFFLOADS)
618 fdb_flow = true;
619
e3a2b7ed
AV
620 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
621 tc->ht_params);
776b12b6 622 if (flow) {
e3a2b7ed 623 old = flow->rule;
8b32580d 624 old_attr = flow->attr;
776b12b6
OG
625 } else {
626 if (fdb_flow)
627 flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
628 GFP_KERNEL);
629 else
630 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
631 }
e3a2b7ed 632
c5bb1730
MG
633 spec = mlx5_vzalloc(sizeof(*spec));
634 if (!spec || !flow) {
e3a2b7ed
AV
635 err = -ENOMEM;
636 goto err_free;
637 }
638
639 flow->cookie = f->cookie;
640
c5bb1730 641 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
642 if (err < 0)
643 goto err_free;
644
776b12b6
OG
645 if (fdb_flow) {
646 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
647 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
adb4c123
OG
648 if (err < 0)
649 goto err_free;
776b12b6 650 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
651 } else {
652 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
653 if (err < 0)
654 goto err_free;
655 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
656 }
e3a2b7ed 657
e3a2b7ed
AV
658 if (IS_ERR(flow->rule)) {
659 err = PTR_ERR(flow->rule);
5c40348c 660 goto err_free;
e3a2b7ed
AV
661 }
662
5c40348c
OG
663 err = rhashtable_insert_fast(&tc->ht, &flow->node,
664 tc->ht_params);
665 if (err)
666 goto err_del_rule;
667
e3a2b7ed 668 if (old)
8b32580d 669 mlx5e_tc_del_flow(priv, old, old_attr);
e3a2b7ed
AV
670
671 goto out;
672
5c40348c 673err_del_rule:
74491de9 674 mlx5_del_flow_rules(flow->rule);
e3a2b7ed
AV
675
676err_free:
677 if (!old)
678 kfree(flow);
679out:
c5bb1730 680 kvfree(spec);
e3a2b7ed
AV
681 return err;
682}
683
684int mlx5e_delete_flower(struct mlx5e_priv *priv,
685 struct tc_cls_flower_offload *f)
686{
687 struct mlx5e_tc_flow *flow;
acff797c 688 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
689
690 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
691 tc->ht_params);
692 if (!flow)
693 return -EINVAL;
694
695 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
696
8b32580d 697 mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
e3a2b7ed
AV
698
699 kfree(flow);
700
701 return 0;
702}
703
aad7e08d
AV
704int mlx5e_stats_flower(struct mlx5e_priv *priv,
705 struct tc_cls_flower_offload *f)
706{
707 struct mlx5e_tc_table *tc = &priv->fs.tc;
708 struct mlx5e_tc_flow *flow;
709 struct tc_action *a;
710 struct mlx5_fc *counter;
22dc13c8 711 LIST_HEAD(actions);
aad7e08d
AV
712 u64 bytes;
713 u64 packets;
714 u64 lastuse;
715
716 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
717 tc->ht_params);
718 if (!flow)
719 return -EINVAL;
720
721 counter = mlx5_flow_rule_counter(flow->rule);
722 if (!counter)
723 return 0;
724
725 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
726
22dc13c8
WC
727 tcf_exts_to_list(f->exts, &actions);
728 list_for_each_entry(a, &actions, list)
aad7e08d
AV
729 tcf_action_stats_update(a, bytes, packets, lastuse);
730
731 return 0;
732}
733
e8f887ac
AV
734static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
735 .head_offset = offsetof(struct mlx5e_tc_flow, node),
736 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
737 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
738 .automatic_shrinking = true,
739};
740
741int mlx5e_tc_init(struct mlx5e_priv *priv)
742{
acff797c 743 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
744
745 tc->ht_params = mlx5e_tc_flow_ht_params;
746 return rhashtable_init(&tc->ht, &tc->ht_params);
747}
748
749static void _mlx5e_tc_del_flow(void *ptr, void *arg)
750{
751 struct mlx5e_tc_flow *flow = ptr;
752 struct mlx5e_priv *priv = arg;
753
8b32580d 754 mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
e8f887ac
AV
755 kfree(flow);
756}
757
758void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
759{
acff797c 760 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
761
762 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
763
acff797c
MG
764 if (!IS_ERR_OR_NULL(tc->t)) {
765 mlx5_destroy_flow_table(tc->t);
766 tc->t = NULL;
e8f887ac
AV
767 }
768}