]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Add intermediate struct for TC flow parsing attributes
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac 50
3bc4b7bf
OG
51struct mlx5_nic_flow_attr {
52 u32 action;
53 u32 flow_tag;
54};
55
65ba8fb7
OG
56enum {
57 MLX5E_TC_FLOW_ESWITCH = BIT(0),
3bc4b7bf 58 MLX5E_TC_FLOW_NIC = BIT(1),
65ba8fb7
OG
59};
60
e8f887ac
AV
61struct mlx5e_tc_flow {
62 struct rhash_head node;
63 u64 cookie;
65ba8fb7 64 u8 flags;
74491de9 65 struct mlx5_flow_handle *rule;
a54e20b4 66 struct list_head encap; /* flows sharing the same encap */
3bc4b7bf
OG
67 union {
68 struct mlx5_esw_flow_attr esw_attr[0];
69 struct mlx5_nic_flow_attr nic_attr[0];
70 };
e8f887ac
AV
71};
72
17091853
OG
73struct mlx5e_tc_flow_parse_attr {
74 struct mlx5_flow_spec spec;
75};
76
a54e20b4
HHZ
77enum {
78 MLX5_HEADER_TYPE_VXLAN = 0x0,
79 MLX5_HEADER_TYPE_NVGRE = 0x1,
80};
81
acff797c
MG
82#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
83#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 84
74491de9
MB
85static struct mlx5_flow_handle *
86mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 87 struct mlx5e_tc_flow_parse_attr *parse_attr,
3bc4b7bf 88 struct mlx5_nic_flow_attr *attr)
e8f887ac 89{
aad7e08d
AV
90 struct mlx5_core_dev *dev = priv->mdev;
91 struct mlx5_flow_destination dest = { 0 };
66958ed9 92 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
93 .action = attr->action,
94 .flow_tag = attr->flow_tag,
66958ed9
HHZ
95 .encap_id = 0,
96 };
aad7e08d 97 struct mlx5_fc *counter = NULL;
74491de9 98 struct mlx5_flow_handle *rule;
e8f887ac
AV
99 bool table_created = false;
100
3bc4b7bf 101 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
aad7e08d
AV
102 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
103 dest.ft = priv->fs.vlan.ft.t;
3bc4b7bf 104 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
105 counter = mlx5_fc_create(dev, true);
106 if (IS_ERR(counter))
107 return ERR_CAST(counter);
108
109 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
110 dest.counter = counter;
111 }
112
acff797c
MG
113 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
114 priv->fs.tc.t =
115 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
116 MLX5E_TC_PRIO,
117 MLX5E_TC_TABLE_NUM_ENTRIES,
118 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 119 0, 0);
acff797c 120 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
121 netdev_err(priv->netdev,
122 "Failed to create tc offload table\n");
aad7e08d
AV
123 rule = ERR_CAST(priv->fs.tc.t);
124 goto err_create_ft;
e8f887ac
AV
125 }
126
127 table_created = true;
128 }
129
17091853
OG
130 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
131 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
132 &flow_act, &dest, 1);
aad7e08d
AV
133
134 if (IS_ERR(rule))
135 goto err_add_rule;
136
137 return rule;
e8f887ac 138
aad7e08d
AV
139err_add_rule:
140 if (table_created) {
acff797c
MG
141 mlx5_destroy_flow_table(priv->fs.tc.t);
142 priv->fs.tc.t = NULL;
e8f887ac 143 }
aad7e08d
AV
144err_create_ft:
145 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
146
147 return rule;
148}
149
d85cdccb
OG
150static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
151 struct mlx5e_tc_flow *flow)
152{
153 struct mlx5_fc *counter = NULL;
154
155 if (!IS_ERR(flow->rule)) {
156 counter = mlx5_flow_rule_counter(flow->rule);
157 mlx5_del_flow_rules(flow->rule);
158 mlx5_fc_destroy(priv->mdev, counter);
159 }
160
161 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
162 mlx5_destroy_flow_table(priv->fs.tc.t);
163 priv->fs.tc.t = NULL;
164 }
165}
166
74491de9
MB
167static struct mlx5_flow_handle *
168mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 169 struct mlx5e_tc_flow_parse_attr *parse_attr,
74491de9 170 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
171{
172 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
173 int err;
174
175 err = mlx5_eswitch_add_vlan_action(esw, attr);
176 if (err)
177 return ERR_PTR(err);
adb4c123 178
17091853 179 return mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
adb4c123
OG
180}
181
5067b602 182static void mlx5e_detach_encap(struct mlx5e_priv *priv,
d85cdccb
OG
183 struct mlx5e_tc_flow *flow);
184
185static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
186 struct mlx5e_tc_flow *flow)
187{
188 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
189
ecf5bb79 190 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
d85cdccb 191
ecf5bb79 192 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
d85cdccb 193
ecf5bb79 194 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
d85cdccb
OG
195 mlx5e_detach_encap(priv, flow);
196}
197
198static void mlx5e_detach_encap(struct mlx5e_priv *priv,
199 struct mlx5e_tc_flow *flow)
200{
5067b602
RD
201 struct list_head *next = flow->encap.next;
202
203 list_del(&flow->encap);
204 if (list_empty(next)) {
205 struct mlx5_encap_entry *e;
206
207 e = list_entry(next, struct mlx5_encap_entry, flows);
208 if (e->n) {
209 mlx5_encap_dealloc(priv->mdev, e->encap_id);
210 neigh_release(e->n);
211 }
212 hlist_del_rcu(&e->encap_hlist);
213 kfree(e);
214 }
215}
216
5e86397a
OG
217/* we get here also when setting rule to the FW failed, etc. It means that the
218 * flow rule itself might not exist, but some offloading related to the actions
219 * should be cleaned.
220 */
e8f887ac 221static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 222 struct mlx5e_tc_flow *flow)
e8f887ac 223{
d85cdccb
OG
224 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
225 mlx5e_tc_del_fdb_flow(priv, flow);
226 else
227 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
228}
229
bbd00f7e
HHZ
230static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
231 struct tc_cls_flower_offload *f)
232{
233 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
234 outer_headers);
235 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
236 outer_headers);
237 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
238 misc_parameters);
239 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
240 misc_parameters);
241
242 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
243 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
244
245 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
246 struct flow_dissector_key_keyid *key =
247 skb_flow_dissector_target(f->dissector,
248 FLOW_DISSECTOR_KEY_ENC_KEYID,
249 f->key);
250 struct flow_dissector_key_keyid *mask =
251 skb_flow_dissector_target(f->dissector,
252 FLOW_DISSECTOR_KEY_ENC_KEYID,
253 f->mask);
254 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
255 be32_to_cpu(mask->keyid));
256 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
257 be32_to_cpu(key->keyid));
258 }
259}
260
261static int parse_tunnel_attr(struct mlx5e_priv *priv,
262 struct mlx5_flow_spec *spec,
263 struct tc_cls_flower_offload *f)
264{
265 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
266 outer_headers);
267 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
268 outer_headers);
269
2e72eb43
OG
270 struct flow_dissector_key_control *enc_control =
271 skb_flow_dissector_target(f->dissector,
272 FLOW_DISSECTOR_KEY_ENC_CONTROL,
273 f->key);
274
bbd00f7e
HHZ
275 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
276 struct flow_dissector_key_ports *key =
277 skb_flow_dissector_target(f->dissector,
278 FLOW_DISSECTOR_KEY_ENC_PORTS,
279 f->key);
280 struct flow_dissector_key_ports *mask =
281 skb_flow_dissector_target(f->dissector,
282 FLOW_DISSECTOR_KEY_ENC_PORTS,
283 f->mask);
1ad9a00a
PB
284 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
285 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
286 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
287
288 /* Full udp dst port must be given */
289 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 290 goto vxlan_match_offload_err;
bbd00f7e 291
1ad9a00a 292 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
293 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
294 parse_vxlan_attr(spec, f);
2fcd82e9
OG
295 else {
296 netdev_warn(priv->netdev,
297 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 298 return -EOPNOTSUPP;
2fcd82e9 299 }
bbd00f7e
HHZ
300
301 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
302 udp_dport, ntohs(mask->dst));
303 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
304 udp_dport, ntohs(key->dst));
305
cd377663
OG
306 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
307 udp_sport, ntohs(mask->src));
308 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
309 udp_sport, ntohs(key->src));
bbd00f7e 310 } else { /* udp dst port must be given */
2fcd82e9
OG
311vxlan_match_offload_err:
312 netdev_warn(priv->netdev,
313 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
314 return -EOPNOTSUPP;
bbd00f7e
HHZ
315 }
316
2e72eb43 317 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
318 struct flow_dissector_key_ipv4_addrs *key =
319 skb_flow_dissector_target(f->dissector,
320 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
321 f->key);
322 struct flow_dissector_key_ipv4_addrs *mask =
323 skb_flow_dissector_target(f->dissector,
324 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
325 f->mask);
326 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
327 src_ipv4_src_ipv6.ipv4_layout.ipv4,
328 ntohl(mask->src));
329 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
330 src_ipv4_src_ipv6.ipv4_layout.ipv4,
331 ntohl(key->src));
332
333 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
334 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
335 ntohl(mask->dst));
336 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
337 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
338 ntohl(key->dst));
bbd00f7e 339
2e72eb43
OG
340 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
341 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
342 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
343 struct flow_dissector_key_ipv6_addrs *key =
344 skb_flow_dissector_target(f->dissector,
345 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
346 f->key);
347 struct flow_dissector_key_ipv6_addrs *mask =
348 skb_flow_dissector_target(f->dissector,
349 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
350 f->mask);
351
352 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
353 src_ipv4_src_ipv6.ipv6_layout.ipv6),
354 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
355 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
356 src_ipv4_src_ipv6.ipv6_layout.ipv6),
357 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
358
359 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
360 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
361 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
362 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
363 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
364 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
365
366 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
367 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 368 }
bbd00f7e
HHZ
369
370 /* Enforce DMAC when offloading incoming tunneled flows.
371 * Flow counters require a match on the DMAC.
372 */
373 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
374 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
375 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
376 dmac_47_16), priv->netdev->dev_addr);
377
378 /* let software handle IP fragments */
379 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
380 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
381
382 return 0;
383}
384
de0af0bf
RD
385static int __parse_cls_flower(struct mlx5e_priv *priv,
386 struct mlx5_flow_spec *spec,
387 struct tc_cls_flower_offload *f,
388 u8 *min_inline)
e3a2b7ed 389{
c5bb1730
MG
390 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
391 outer_headers);
392 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
393 outer_headers);
e3a2b7ed
AV
394 u16 addr_type = 0;
395 u8 ip_proto = 0;
396
de0af0bf
RD
397 *min_inline = MLX5_INLINE_MODE_L2;
398
e3a2b7ed
AV
399 if (f->dissector->used_keys &
400 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
401 BIT(FLOW_DISSECTOR_KEY_BASIC) |
402 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 403 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
404 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
405 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
406 BIT(FLOW_DISSECTOR_KEY_PORTS) |
407 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
408 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
409 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
410 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
411 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
412 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
413 f->dissector->used_keys);
414 return -EOPNOTSUPP;
415 }
416
bbd00f7e
HHZ
417 if ((dissector_uses_key(f->dissector,
418 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
419 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
420 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
421 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
422 struct flow_dissector_key_control *key =
423 skb_flow_dissector_target(f->dissector,
424 FLOW_DISSECTOR_KEY_ENC_CONTROL,
425 f->key);
426 switch (key->addr_type) {
427 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 428 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
429 if (parse_tunnel_attr(priv, spec, f))
430 return -EOPNOTSUPP;
431 break;
432 default:
433 return -EOPNOTSUPP;
434 }
435
436 /* In decap flow, header pointers should point to the inner
437 * headers, outer header were already set by parse_tunnel_attr
438 */
439 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
440 inner_headers);
441 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
442 inner_headers);
443 }
444
e3a2b7ed
AV
445 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
446 struct flow_dissector_key_control *key =
447 skb_flow_dissector_target(f->dissector,
1dbd0d37 448 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 449 f->key);
3f7d0eb4
OG
450
451 struct flow_dissector_key_control *mask =
452 skb_flow_dissector_target(f->dissector,
453 FLOW_DISSECTOR_KEY_CONTROL,
454 f->mask);
e3a2b7ed 455 addr_type = key->addr_type;
3f7d0eb4
OG
456
457 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
458 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
459 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
460 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
461
462 /* the HW doesn't need L3 inline to match on frag=no */
463 if (key->flags & FLOW_DIS_IS_FRAGMENT)
464 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 465 }
e3a2b7ed
AV
466 }
467
468 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
469 struct flow_dissector_key_basic *key =
470 skb_flow_dissector_target(f->dissector,
471 FLOW_DISSECTOR_KEY_BASIC,
472 f->key);
473 struct flow_dissector_key_basic *mask =
474 skb_flow_dissector_target(f->dissector,
475 FLOW_DISSECTOR_KEY_BASIC,
476 f->mask);
477 ip_proto = key->ip_proto;
478
479 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
480 ntohs(mask->n_proto));
481 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
482 ntohs(key->n_proto));
483
484 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
485 mask->ip_proto);
486 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
487 key->ip_proto);
de0af0bf
RD
488
489 if (mask->ip_proto)
490 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
491 }
492
493 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
494 struct flow_dissector_key_eth_addrs *key =
495 skb_flow_dissector_target(f->dissector,
496 FLOW_DISSECTOR_KEY_ETH_ADDRS,
497 f->key);
498 struct flow_dissector_key_eth_addrs *mask =
499 skb_flow_dissector_target(f->dissector,
500 FLOW_DISSECTOR_KEY_ETH_ADDRS,
501 f->mask);
502
503 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
504 dmac_47_16),
505 mask->dst);
506 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
507 dmac_47_16),
508 key->dst);
509
510 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
511 smac_47_16),
512 mask->src);
513 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
514 smac_47_16),
515 key->src);
516 }
517
095b6cfd
OG
518 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
519 struct flow_dissector_key_vlan *key =
520 skb_flow_dissector_target(f->dissector,
521 FLOW_DISSECTOR_KEY_VLAN,
522 f->key);
523 struct flow_dissector_key_vlan *mask =
524 skb_flow_dissector_target(f->dissector,
525 FLOW_DISSECTOR_KEY_VLAN,
526 f->mask);
358d79a4 527 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
528 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
529 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
530
531 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
532 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
533
534 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
535 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
536 }
537 }
538
e3a2b7ed
AV
539 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
540 struct flow_dissector_key_ipv4_addrs *key =
541 skb_flow_dissector_target(f->dissector,
542 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
543 f->key);
544 struct flow_dissector_key_ipv4_addrs *mask =
545 skb_flow_dissector_target(f->dissector,
546 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
547 f->mask);
548
549 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
550 src_ipv4_src_ipv6.ipv4_layout.ipv4),
551 &mask->src, sizeof(mask->src));
552 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
553 src_ipv4_src_ipv6.ipv4_layout.ipv4),
554 &key->src, sizeof(key->src));
555 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
556 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
557 &mask->dst, sizeof(mask->dst));
558 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
559 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
560 &key->dst, sizeof(key->dst));
de0af0bf
RD
561
562 if (mask->src || mask->dst)
563 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
564 }
565
566 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
567 struct flow_dissector_key_ipv6_addrs *key =
568 skb_flow_dissector_target(f->dissector,
569 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
570 f->key);
571 struct flow_dissector_key_ipv6_addrs *mask =
572 skb_flow_dissector_target(f->dissector,
573 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
574 f->mask);
575
576 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
577 src_ipv4_src_ipv6.ipv6_layout.ipv6),
578 &mask->src, sizeof(mask->src));
579 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
580 src_ipv4_src_ipv6.ipv6_layout.ipv6),
581 &key->src, sizeof(key->src));
582
583 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
584 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
585 &mask->dst, sizeof(mask->dst));
586 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
587 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
588 &key->dst, sizeof(key->dst));
de0af0bf
RD
589
590 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
591 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
592 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
593 }
594
595 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
596 struct flow_dissector_key_ports *key =
597 skb_flow_dissector_target(f->dissector,
598 FLOW_DISSECTOR_KEY_PORTS,
599 f->key);
600 struct flow_dissector_key_ports *mask =
601 skb_flow_dissector_target(f->dissector,
602 FLOW_DISSECTOR_KEY_PORTS,
603 f->mask);
604 switch (ip_proto) {
605 case IPPROTO_TCP:
606 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
607 tcp_sport, ntohs(mask->src));
608 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
609 tcp_sport, ntohs(key->src));
610
611 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
612 tcp_dport, ntohs(mask->dst));
613 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
614 tcp_dport, ntohs(key->dst));
615 break;
616
617 case IPPROTO_UDP:
618 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
619 udp_sport, ntohs(mask->src));
620 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
621 udp_sport, ntohs(key->src));
622
623 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
624 udp_dport, ntohs(mask->dst));
625 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
626 udp_dport, ntohs(key->dst));
627 break;
628 default:
629 netdev_err(priv->netdev,
630 "Only UDP and TCP transport are supported\n");
631 return -EINVAL;
632 }
de0af0bf
RD
633
634 if (mask->src || mask->dst)
635 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
636 }
637
638 return 0;
639}
640
de0af0bf 641static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 642 struct mlx5e_tc_flow *flow,
de0af0bf
RD
643 struct mlx5_flow_spec *spec,
644 struct tc_cls_flower_offload *f)
645{
646 struct mlx5_core_dev *dev = priv->mdev;
647 struct mlx5_eswitch *esw = dev->priv.eswitch;
648 struct mlx5_eswitch_rep *rep = priv->ppriv;
649 u8 min_inline;
650 int err;
651
652 err = __parse_cls_flower(priv, spec, f, &min_inline);
653
65ba8fb7 654 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
de0af0bf
RD
655 rep->vport != FDB_UPLINK_VPORT) {
656 if (min_inline > esw->offloads.inline_mode) {
657 netdev_warn(priv->netdev,
658 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
659 min_inline, esw->offloads.inline_mode);
660 return -EOPNOTSUPP;
661 }
662 }
663
664 return err;
665}
666
5c40348c 667static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
3bc4b7bf 668 struct mlx5_nic_flow_attr *attr)
e3a2b7ed
AV
669{
670 const struct tc_action *a;
22dc13c8 671 LIST_HEAD(actions);
e3a2b7ed
AV
672
673 if (tc_no_actions(exts))
674 return -EINVAL;
675
3bc4b7bf
OG
676 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
677 attr->action = 0;
e3a2b7ed 678
22dc13c8
WC
679 tcf_exts_to_list(exts, &actions);
680 list_for_each_entry(a, &actions, list) {
e3a2b7ed 681 /* Only support a single action per rule */
3bc4b7bf 682 if (attr->action)
e3a2b7ed
AV
683 return -EINVAL;
684
685 if (is_tcf_gact_shot(a)) {
3bc4b7bf 686 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
687 if (MLX5_CAP_FLOWTABLE(priv->mdev,
688 flow_table_properties_nic_receive.flow_counter))
3bc4b7bf 689 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
690 continue;
691 }
692
693 if (is_tcf_skbedit_mark(a)) {
694 u32 mark = tcf_skbedit_mark(a);
695
696 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
697 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
698 mark);
699 return -EINVAL;
700 }
701
3bc4b7bf
OG
702 attr->flow_tag = mark;
703 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
704 continue;
705 }
706
707 return -EINVAL;
708 }
709
710 return 0;
711}
712
76f7444d
OG
713static inline int cmp_encap_info(struct ip_tunnel_key *a,
714 struct ip_tunnel_key *b)
a54e20b4
HHZ
715{
716 return memcmp(a, b, sizeof(*a));
717}
718
76f7444d 719static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 720{
76f7444d 721 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
722}
723
724static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
725 struct net_device *mirred_dev,
726 struct net_device **out_dev,
727 struct flowi4 *fl4,
728 struct neighbour **out_n,
a54e20b4
HHZ
729 int *out_ttl)
730{
3e621b19 731 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
732 struct rtable *rt;
733 struct neighbour *n = NULL;
a54e20b4
HHZ
734
735#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
736 int ret;
737
a54e20b4 738 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
739 ret = PTR_ERR_OR_ZERO(rt);
740 if (ret)
741 return ret;
a54e20b4
HHZ
742#else
743 return -EOPNOTSUPP;
744#endif
3e621b19
HHZ
745 /* if the egress device isn't on the same HW e-switch, we use the uplink */
746 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
747 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
748 else
749 *out_dev = rt->dst.dev;
a54e20b4 750
75c33da8 751 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
752 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
753 ip_rt_put(rt);
754 if (!n)
755 return -ENOMEM;
756
757 *out_n = n;
a54e20b4
HHZ
758 return 0;
759}
760
ce99f6b9
OG
761static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
762 struct net_device *mirred_dev,
763 struct net_device **out_dev,
764 struct flowi6 *fl6,
765 struct neighbour **out_n,
766 int *out_ttl)
767{
768 struct neighbour *n = NULL;
769 struct dst_entry *dst;
770
771#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
772 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
773 int ret;
774
775 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
776 ret = dst->error;
777 if (ret) {
ce99f6b9
OG
778 dst_release(dst);
779 return ret;
780 }
781
782 *out_ttl = ip6_dst_hoplimit(dst);
783
784 /* if the egress device isn't on the same HW e-switch, we use the uplink */
785 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
786 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
787 else
788 *out_dev = dst->dev;
789#else
790 return -EOPNOTSUPP;
791#endif
792
793 n = dst_neigh_lookup(dst, &fl6->daddr);
794 dst_release(dst);
795 if (!n)
796 return -ENOMEM;
797
798 *out_n = n;
799 return 0;
800}
801
a54e20b4
HHZ
802static int gen_vxlan_header_ipv4(struct net_device *out_dev,
803 char buf[],
804 unsigned char h_dest[ETH_ALEN],
805 int ttl,
806 __be32 daddr,
807 __be32 saddr,
808 __be16 udp_dst_port,
809 __be32 vx_vni)
810{
811 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
812 struct ethhdr *eth = (struct ethhdr *)buf;
813 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
814 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
815 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
816
817 memset(buf, 0, encap_size);
818
819 ether_addr_copy(eth->h_dest, h_dest);
820 ether_addr_copy(eth->h_source, out_dev->dev_addr);
821 eth->h_proto = htons(ETH_P_IP);
822
823 ip->daddr = daddr;
824 ip->saddr = saddr;
825
826 ip->ttl = ttl;
827 ip->protocol = IPPROTO_UDP;
828 ip->version = 0x4;
829 ip->ihl = 0x5;
830
831 udp->dest = udp_dst_port;
832 vxh->vx_flags = VXLAN_HF_VNI;
833 vxh->vx_vni = vxlan_vni_field(vx_vni);
834
835 return encap_size;
836}
837
ce99f6b9
OG
838static int gen_vxlan_header_ipv6(struct net_device *out_dev,
839 char buf[],
840 unsigned char h_dest[ETH_ALEN],
841 int ttl,
842 struct in6_addr *daddr,
843 struct in6_addr *saddr,
844 __be16 udp_dst_port,
845 __be32 vx_vni)
846{
847 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
848 struct ethhdr *eth = (struct ethhdr *)buf;
849 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
850 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
851 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
852
853 memset(buf, 0, encap_size);
854
855 ether_addr_copy(eth->h_dest, h_dest);
856 ether_addr_copy(eth->h_source, out_dev->dev_addr);
857 eth->h_proto = htons(ETH_P_IPV6);
858
859 ip6_flow_hdr(ip6h, 0, 0);
860 /* the HW fills up ipv6 payload len */
861 ip6h->nexthdr = IPPROTO_UDP;
862 ip6h->hop_limit = ttl;
863 ip6h->daddr = *daddr;
864 ip6h->saddr = *saddr;
865
866 udp->dest = udp_dst_port;
867 vxh->vx_flags = VXLAN_HF_VNI;
868 vxh->vx_vni = vxlan_vni_field(vx_vni);
869
870 return encap_size;
871}
872
a54e20b4
HHZ
873static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
874 struct net_device *mirred_dev,
875 struct mlx5_encap_entry *e,
876 struct net_device **out_dev)
877{
878 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
76f7444d 879 struct ip_tunnel_key *tun_key = &e->tun_info.key;
9a941117 880 int encap_size, ttl, err;
a42485eb 881 struct neighbour *n = NULL;
a54e20b4 882 struct flowi4 fl4 = {};
a54e20b4 883 char *encap_header;
a54e20b4
HHZ
884
885 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
886 if (!encap_header)
887 return -ENOMEM;
888
889 switch (e->tunnel_type) {
890 case MLX5_HEADER_TYPE_VXLAN:
891 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 892 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
893 break;
894 default:
895 err = -EOPNOTSUPP;
896 goto out;
897 }
9a941117 898 fl4.flowi4_tos = tun_key->tos;
76f7444d 899 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 900 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4
HHZ
901
902 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
9a941117 903 &fl4, &n, &ttl);
a54e20b4
HHZ
904 if (err)
905 goto out;
906
a54e20b4 907 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
908 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
909 err = -EOPNOTSUPP;
a54e20b4
HHZ
910 goto out;
911 }
912
75c33da8
OG
913 e->n = n;
914 e->out_dev = *out_dev;
915
a54e20b4
HHZ
916 neigh_ha_snapshot(e->h_dest, n, *out_dev);
917
918 switch (e->tunnel_type) {
919 case MLX5_HEADER_TYPE_VXLAN:
920 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
921 e->h_dest, ttl,
9a941117
OG
922 fl4.daddr,
923 fl4.saddr, tun_key->tp_dst,
76f7444d 924 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
925 break;
926 default:
927 err = -EOPNOTSUPP;
928 goto out;
929 }
930
931 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
932 encap_size, encap_header, &e->encap_id);
933out:
a42485eb
OG
934 if (err && n)
935 neigh_release(n);
a54e20b4
HHZ
936 kfree(encap_header);
937 return err;
938}
939
ce99f6b9
OG
940static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
941 struct net_device *mirred_dev,
942 struct mlx5_encap_entry *e,
943 struct net_device **out_dev)
944
945{
946 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
947 struct ip_tunnel_key *tun_key = &e->tun_info.key;
948 int encap_size, err, ttl = 0;
949 struct neighbour *n = NULL;
950 struct flowi6 fl6 = {};
951 char *encap_header;
952
953 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
954 if (!encap_header)
955 return -ENOMEM;
956
957 switch (e->tunnel_type) {
958 case MLX5_HEADER_TYPE_VXLAN:
959 fl6.flowi6_proto = IPPROTO_UDP;
960 fl6.fl6_dport = tun_key->tp_dst;
961 break;
962 default:
963 err = -EOPNOTSUPP;
964 goto out;
965 }
966
967 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
968 fl6.daddr = tun_key->u.ipv6.dst;
969 fl6.saddr = tun_key->u.ipv6.src;
970
971 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
972 &fl6, &n, &ttl);
973 if (err)
974 goto out;
975
976 if (!(n->nud_state & NUD_VALID)) {
977 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
978 err = -EOPNOTSUPP;
979 goto out;
980 }
981
982 e->n = n;
983 e->out_dev = *out_dev;
984
985 neigh_ha_snapshot(e->h_dest, n, *out_dev);
986
987 switch (e->tunnel_type) {
988 case MLX5_HEADER_TYPE_VXLAN:
989 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
990 e->h_dest, ttl,
991 &fl6.daddr,
992 &fl6.saddr, tun_key->tp_dst,
993 tunnel_id_to_key32(tun_key->tun_id));
994 break;
995 default:
996 err = -EOPNOTSUPP;
997 goto out;
998 }
999
1000 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1001 encap_size, encap_header, &e->encap_id);
1002out:
1003 if (err && n)
1004 neigh_release(n);
1005 kfree(encap_header);
1006 return err;
1007}
1008
a54e20b4
HHZ
1009static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1010 struct ip_tunnel_info *tun_info,
1011 struct net_device *mirred_dev,
1012 struct mlx5_esw_flow_attr *attr)
1013{
1014 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a
PB
1015 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1016 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
a54e20b4
HHZ
1017 unsigned short family = ip_tunnel_info_af(tun_info);
1018 struct ip_tunnel_key *key = &tun_info->key;
a54e20b4
HHZ
1019 struct mlx5_encap_entry *e;
1020 struct net_device *out_dev;
ce99f6b9 1021 int tunnel_type, err = -EOPNOTSUPP;
a54e20b4
HHZ
1022 uintptr_t hash_key;
1023 bool found = false;
a54e20b4 1024
2fcd82e9 1025 /* udp dst port must be set */
a54e20b4 1026 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1027 goto vxlan_encap_offload_err;
a54e20b4 1028
cd377663 1029 /* setting udp src port isn't supported */
2fcd82e9
OG
1030 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1031vxlan_encap_offload_err:
1032 netdev_warn(priv->netdev,
1033 "must set udp dst port and not set udp src port\n");
cd377663 1034 return -EOPNOTSUPP;
2fcd82e9 1035 }
cd377663 1036
1ad9a00a 1037 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1038 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1039 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1040 } else {
2fcd82e9
OG
1041 netdev_warn(priv->netdev,
1042 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1043 return -EOPNOTSUPP;
1044 }
1045
76f7444d 1046 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1047
1048 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1049 encap_hlist, hash_key) {
76f7444d 1050 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1051 found = true;
1052 break;
1053 }
1054 }
1055
1056 if (found) {
1057 attr->encap = e;
1058 return 0;
1059 }
1060
1061 e = kzalloc(sizeof(*e), GFP_KERNEL);
1062 if (!e)
1063 return -ENOMEM;
1064
76f7444d 1065 e->tun_info = *tun_info;
a54e20b4
HHZ
1066 e->tunnel_type = tunnel_type;
1067 INIT_LIST_HEAD(&e->flows);
1068
ce99f6b9
OG
1069 if (family == AF_INET)
1070 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1071 else if (family == AF_INET6)
1072 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1073
a54e20b4
HHZ
1074 if (err)
1075 goto out_err;
1076
1077 attr->encap = e;
1078 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1079
1080 return err;
1081
1082out_err:
1083 kfree(e);
1084 return err;
1085}
1086
03a9d11e 1087static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 1088 struct mlx5e_tc_flow *flow)
03a9d11e 1089{
ecf5bb79 1090 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 1091 struct ip_tunnel_info *info = NULL;
03a9d11e 1092 const struct tc_action *a;
22dc13c8 1093 LIST_HEAD(actions);
a54e20b4
HHZ
1094 bool encap = false;
1095 int err;
03a9d11e
OG
1096
1097 if (tc_no_actions(exts))
1098 return -EINVAL;
1099
776b12b6
OG
1100 memset(attr, 0, sizeof(*attr));
1101 attr->in_rep = priv->ppriv;
03a9d11e 1102
22dc13c8
WC
1103 tcf_exts_to_list(exts, &actions);
1104 list_for_each_entry(a, &actions, list) {
03a9d11e 1105 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1106 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1107 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1108 continue;
1109 }
1110
5724b8b5 1111 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
1112 int ifindex = tcf_mirred_ifindex(a);
1113 struct net_device *out_dev;
1114 struct mlx5e_priv *out_priv;
03a9d11e
OG
1115
1116 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1117
a54e20b4
HHZ
1118 if (switchdev_port_same_parent_id(priv->netdev,
1119 out_dev)) {
1120 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1121 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1122 out_priv = netdev_priv(out_dev);
1123 attr->out_rep = out_priv->ppriv;
1124 } else if (encap) {
1125 err = mlx5e_attach_encap(priv, info,
1126 out_dev, attr);
1127 if (err)
1128 return err;
1129 list_add(&flow->encap, &attr->encap->flows);
1130 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1131 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1132 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1133 out_priv = netdev_priv(attr->encap->out_dev);
1134 attr->out_rep = out_priv->ppriv;
1135 } else {
03a9d11e
OG
1136 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1137 priv->netdev->name, out_dev->name);
1138 return -EINVAL;
1139 }
a54e20b4
HHZ
1140 continue;
1141 }
03a9d11e 1142
a54e20b4
HHZ
1143 if (is_tcf_tunnel_set(a)) {
1144 info = tcf_tunnel_info(a);
1145 if (info)
1146 encap = true;
1147 else
1148 return -EOPNOTSUPP;
03a9d11e
OG
1149 continue;
1150 }
1151
8b32580d 1152 if (is_tcf_vlan(a)) {
09c91ddf 1153 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1154 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1155 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1156 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1157 return -EOPNOTSUPP;
1158
1159 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1160 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1161 } else { /* action is TCA_VLAN_ACT_MODIFY */
1162 return -EOPNOTSUPP;
8b32580d
OG
1163 }
1164 continue;
1165 }
1166
bbd00f7e
HHZ
1167 if (is_tcf_tunnel_release(a)) {
1168 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1169 continue;
1170 }
1171
03a9d11e
OG
1172 return -EINVAL;
1173 }
1174 return 0;
1175}
1176
e3a2b7ed
AV
1177int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1178 struct tc_cls_flower_offload *f)
1179{
3bc4b7bf 1180 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
17091853 1181 struct mlx5e_tc_flow_parse_attr *parse_attr;
acff797c 1182 struct mlx5e_tc_table *tc = &priv->fs.tc;
3bc4b7bf
OG
1183 struct mlx5e_tc_flow *flow;
1184 int attr_size, err = 0;
65ba8fb7 1185 u8 flow_flags = 0;
e3a2b7ed 1186
65ba8fb7
OG
1187 if (esw && esw->mode == SRIOV_OFFLOADS) {
1188 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1189 attr_size = sizeof(struct mlx5_esw_flow_attr);
3bc4b7bf
OG
1190 } else {
1191 flow_flags = MLX5E_TC_FLOW_NIC;
1192 attr_size = sizeof(struct mlx5_nic_flow_attr);
65ba8fb7 1193 }
e3a2b7ed 1194
65ba8fb7 1195 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
17091853
OG
1196 parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1197 if (!parse_attr || !flow) {
e3a2b7ed
AV
1198 err = -ENOMEM;
1199 goto err_free;
1200 }
1201
1202 flow->cookie = f->cookie;
65ba8fb7 1203 flow->flags = flow_flags;
e3a2b7ed 1204
17091853 1205 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
e3a2b7ed
AV
1206 if (err < 0)
1207 goto err_free;
1208
65ba8fb7 1209 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
a54e20b4 1210 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1211 if (err < 0)
1212 goto err_free;
17091853 1213 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow->esw_attr);
adb4c123 1214 } else {
3bc4b7bf 1215 err = parse_tc_nic_actions(priv, f->exts, flow->nic_attr);
adb4c123
OG
1216 if (err < 0)
1217 goto err_free;
17091853 1218 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow->nic_attr);
adb4c123 1219 }
e3a2b7ed 1220
e3a2b7ed
AV
1221 if (IS_ERR(flow->rule)) {
1222 err = PTR_ERR(flow->rule);
5e86397a 1223 goto err_del_rule;
e3a2b7ed
AV
1224 }
1225
5c40348c
OG
1226 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1227 tc->ht_params);
1228 if (err)
1229 goto err_del_rule;
1230
e3a2b7ed
AV
1231 goto out;
1232
5c40348c 1233err_del_rule:
5e86397a 1234 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1235
1236err_free:
53636068 1237 kfree(flow);
e3a2b7ed 1238out:
17091853 1239 kvfree(parse_attr);
e3a2b7ed
AV
1240 return err;
1241}
1242
1243int mlx5e_delete_flower(struct mlx5e_priv *priv,
1244 struct tc_cls_flower_offload *f)
1245{
1246 struct mlx5e_tc_flow *flow;
acff797c 1247 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1248
1249 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1250 tc->ht_params);
1251 if (!flow)
1252 return -EINVAL;
1253
1254 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1255
961e8979 1256 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1257
a54e20b4 1258
e3a2b7ed
AV
1259 kfree(flow);
1260
1261 return 0;
1262}
1263
aad7e08d
AV
1264int mlx5e_stats_flower(struct mlx5e_priv *priv,
1265 struct tc_cls_flower_offload *f)
1266{
1267 struct mlx5e_tc_table *tc = &priv->fs.tc;
1268 struct mlx5e_tc_flow *flow;
1269 struct tc_action *a;
1270 struct mlx5_fc *counter;
22dc13c8 1271 LIST_HEAD(actions);
aad7e08d
AV
1272 u64 bytes;
1273 u64 packets;
1274 u64 lastuse;
1275
1276 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1277 tc->ht_params);
1278 if (!flow)
1279 return -EINVAL;
1280
1281 counter = mlx5_flow_rule_counter(flow->rule);
1282 if (!counter)
1283 return 0;
1284
1285 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1286
fed06ee8
OG
1287 preempt_disable();
1288
22dc13c8
WC
1289 tcf_exts_to_list(f->exts, &actions);
1290 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1291 tcf_action_stats_update(a, bytes, packets, lastuse);
1292
fed06ee8
OG
1293 preempt_enable();
1294
aad7e08d
AV
1295 return 0;
1296}
1297
e8f887ac
AV
1298static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1299 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1300 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1301 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1302 .automatic_shrinking = true,
1303};
1304
1305int mlx5e_tc_init(struct mlx5e_priv *priv)
1306{
acff797c 1307 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1308
1309 tc->ht_params = mlx5e_tc_flow_ht_params;
1310 return rhashtable_init(&tc->ht, &tc->ht_params);
1311}
1312
1313static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1314{
1315 struct mlx5e_tc_flow *flow = ptr;
1316 struct mlx5e_priv *priv = arg;
1317
961e8979 1318 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1319 kfree(flow);
1320}
1321
1322void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1323{
acff797c 1324 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1325
1326 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1327
acff797c
MG
1328 if (!IS_ERR_OR_NULL(tc->t)) {
1329 mlx5_destroy_flow_table(tc->t);
1330 tc->t = NULL;
e8f887ac
AV
1331 }
1332}