]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Use the proper UAPI values when offloading TC vlan actions
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac 50
65ba8fb7
OG
51enum {
52 MLX5E_TC_FLOW_ESWITCH = BIT(0),
53};
54
e8f887ac
AV
55struct mlx5e_tc_flow {
56 struct rhash_head node;
57 u64 cookie;
65ba8fb7 58 u8 flags;
74491de9 59 struct mlx5_flow_handle *rule;
a54e20b4 60 struct list_head encap; /* flows sharing the same encap */
776b12b6 61 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
62};
63
a54e20b4
HHZ
64enum {
65 MLX5_HEADER_TYPE_VXLAN = 0x0,
66 MLX5_HEADER_TYPE_NVGRE = 0x1,
67};
68
acff797c
MG
69#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 71
74491de9
MB
72static struct mlx5_flow_handle *
73mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
74 struct mlx5_flow_spec *spec,
75 u32 action, u32 flow_tag)
e8f887ac 76{
aad7e08d
AV
77 struct mlx5_core_dev *dev = priv->mdev;
78 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
79 struct mlx5_flow_act flow_act = {
80 .action = action,
81 .flow_tag = flow_tag,
82 .encap_id = 0,
83 };
aad7e08d 84 struct mlx5_fc *counter = NULL;
74491de9 85 struct mlx5_flow_handle *rule;
e8f887ac
AV
86 bool table_created = false;
87
aad7e08d
AV
88 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
89 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
90 dest.ft = priv->fs.vlan.ft.t;
55130287 91 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
92 counter = mlx5_fc_create(dev, true);
93 if (IS_ERR(counter))
94 return ERR_CAST(counter);
95
96 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
97 dest.counter = counter;
98 }
99
acff797c
MG
100 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
101 priv->fs.tc.t =
102 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
103 MLX5E_TC_PRIO,
104 MLX5E_TC_TABLE_NUM_ENTRIES,
105 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 106 0, 0);
acff797c 107 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
108 netdev_err(priv->netdev,
109 "Failed to create tc offload table\n");
aad7e08d
AV
110 rule = ERR_CAST(priv->fs.tc.t);
111 goto err_create_ft;
e8f887ac
AV
112 }
113
114 table_created = true;
115 }
116
c5bb1730 117 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 118 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
119
120 if (IS_ERR(rule))
121 goto err_add_rule;
122
123 return rule;
e8f887ac 124
aad7e08d
AV
125err_add_rule:
126 if (table_created) {
acff797c
MG
127 mlx5_destroy_flow_table(priv->fs.tc.t);
128 priv->fs.tc.t = NULL;
e8f887ac 129 }
aad7e08d
AV
130err_create_ft:
131 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
132
133 return rule;
134}
135
d85cdccb
OG
136static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
137 struct mlx5e_tc_flow *flow)
138{
139 struct mlx5_fc *counter = NULL;
140
141 if (!IS_ERR(flow->rule)) {
142 counter = mlx5_flow_rule_counter(flow->rule);
143 mlx5_del_flow_rules(flow->rule);
144 mlx5_fc_destroy(priv->mdev, counter);
145 }
146
147 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148 mlx5_destroy_flow_table(priv->fs.tc.t);
149 priv->fs.tc.t = NULL;
150 }
151}
152
74491de9
MB
153static struct mlx5_flow_handle *
154mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
155 struct mlx5_flow_spec *spec,
156 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
157{
158 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
159 int err;
160
161 err = mlx5_eswitch_add_vlan_action(esw, attr);
162 if (err)
163 return ERR_PTR(err);
adb4c123 164
776b12b6 165 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
166}
167
5067b602 168static void mlx5e_detach_encap(struct mlx5e_priv *priv,
d85cdccb
OG
169 struct mlx5e_tc_flow *flow);
170
171static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
172 struct mlx5e_tc_flow *flow)
173{
174 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
175
176 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
177
178 mlx5_eswitch_del_vlan_action(esw, flow->attr);
179
180 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
181 mlx5e_detach_encap(priv, flow);
182}
183
184static void mlx5e_detach_encap(struct mlx5e_priv *priv,
185 struct mlx5e_tc_flow *flow)
186{
5067b602
RD
187 struct list_head *next = flow->encap.next;
188
189 list_del(&flow->encap);
190 if (list_empty(next)) {
191 struct mlx5_encap_entry *e;
192
193 e = list_entry(next, struct mlx5_encap_entry, flows);
194 if (e->n) {
195 mlx5_encap_dealloc(priv->mdev, e->encap_id);
196 neigh_release(e->n);
197 }
198 hlist_del_rcu(&e->encap_hlist);
199 kfree(e);
200 }
201}
202
5e86397a
OG
203/* we get here also when setting rule to the FW failed, etc. It means that the
204 * flow rule itself might not exist, but some offloading related to the actions
205 * should be cleaned.
206 */
e8f887ac 207static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 208 struct mlx5e_tc_flow *flow)
e8f887ac 209{
d85cdccb
OG
210 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
211 mlx5e_tc_del_fdb_flow(priv, flow);
212 else
213 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
214}
215
bbd00f7e
HHZ
216static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
217 struct tc_cls_flower_offload *f)
218{
219 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
220 outer_headers);
221 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
222 outer_headers);
223 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224 misc_parameters);
225 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226 misc_parameters);
227
228 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
229 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
230
231 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
232 struct flow_dissector_key_keyid *key =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_KEYID,
235 f->key);
236 struct flow_dissector_key_keyid *mask =
237 skb_flow_dissector_target(f->dissector,
238 FLOW_DISSECTOR_KEY_ENC_KEYID,
239 f->mask);
240 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
241 be32_to_cpu(mask->keyid));
242 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
243 be32_to_cpu(key->keyid));
244 }
245}
246
247static int parse_tunnel_attr(struct mlx5e_priv *priv,
248 struct mlx5_flow_spec *spec,
249 struct tc_cls_flower_offload *f)
250{
251 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
252 outer_headers);
253 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
254 outer_headers);
255
2e72eb43
OG
256 struct flow_dissector_key_control *enc_control =
257 skb_flow_dissector_target(f->dissector,
258 FLOW_DISSECTOR_KEY_ENC_CONTROL,
259 f->key);
260
bbd00f7e
HHZ
261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
262 struct flow_dissector_key_ports *key =
263 skb_flow_dissector_target(f->dissector,
264 FLOW_DISSECTOR_KEY_ENC_PORTS,
265 f->key);
266 struct flow_dissector_key_ports *mask =
267 skb_flow_dissector_target(f->dissector,
268 FLOW_DISSECTOR_KEY_ENC_PORTS,
269 f->mask);
270
271 /* Full udp dst port must be given */
272 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 273 goto vxlan_match_offload_err;
bbd00f7e 274
bbd00f7e
HHZ
275 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
276 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
277 parse_vxlan_attr(spec, f);
2fcd82e9
OG
278 else {
279 netdev_warn(priv->netdev,
280 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 281 return -EOPNOTSUPP;
2fcd82e9 282 }
bbd00f7e
HHZ
283
284 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
285 udp_dport, ntohs(mask->dst));
286 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
287 udp_dport, ntohs(key->dst));
288
cd377663
OG
289 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
290 udp_sport, ntohs(mask->src));
291 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
292 udp_sport, ntohs(key->src));
bbd00f7e 293 } else { /* udp dst port must be given */
2fcd82e9
OG
294vxlan_match_offload_err:
295 netdev_warn(priv->netdev,
296 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
297 return -EOPNOTSUPP;
bbd00f7e
HHZ
298 }
299
2e72eb43 300 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
301 struct flow_dissector_key_ipv4_addrs *key =
302 skb_flow_dissector_target(f->dissector,
303 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
304 f->key);
305 struct flow_dissector_key_ipv4_addrs *mask =
306 skb_flow_dissector_target(f->dissector,
307 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
308 f->mask);
309 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
310 src_ipv4_src_ipv6.ipv4_layout.ipv4,
311 ntohl(mask->src));
312 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
313 src_ipv4_src_ipv6.ipv4_layout.ipv4,
314 ntohl(key->src));
315
316 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
317 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
318 ntohl(mask->dst));
319 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
320 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
321 ntohl(key->dst));
bbd00f7e 322
2e72eb43
OG
323 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
324 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
325 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
326 struct flow_dissector_key_ipv6_addrs *key =
327 skb_flow_dissector_target(f->dissector,
328 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
329 f->key);
330 struct flow_dissector_key_ipv6_addrs *mask =
331 skb_flow_dissector_target(f->dissector,
332 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
333 f->mask);
334
335 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
336 src_ipv4_src_ipv6.ipv6_layout.ipv6),
337 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
338 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
339 src_ipv4_src_ipv6.ipv6_layout.ipv6),
340 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
341
342 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
343 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
344 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
347 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
348
349 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
350 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 351 }
bbd00f7e
HHZ
352
353 /* Enforce DMAC when offloading incoming tunneled flows.
354 * Flow counters require a match on the DMAC.
355 */
356 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
357 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
358 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
359 dmac_47_16), priv->netdev->dev_addr);
360
361 /* let software handle IP fragments */
362 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
363 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
364
365 return 0;
366}
367
de0af0bf
RD
368static int __parse_cls_flower(struct mlx5e_priv *priv,
369 struct mlx5_flow_spec *spec,
370 struct tc_cls_flower_offload *f,
371 u8 *min_inline)
e3a2b7ed 372{
c5bb1730
MG
373 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
374 outer_headers);
375 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
376 outer_headers);
e3a2b7ed
AV
377 u16 addr_type = 0;
378 u8 ip_proto = 0;
379
de0af0bf
RD
380 *min_inline = MLX5_INLINE_MODE_L2;
381
e3a2b7ed
AV
382 if (f->dissector->used_keys &
383 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
384 BIT(FLOW_DISSECTOR_KEY_BASIC) |
385 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 386 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
387 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
388 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
389 BIT(FLOW_DISSECTOR_KEY_PORTS) |
390 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
391 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
392 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
393 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
394 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
395 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
396 f->dissector->used_keys);
397 return -EOPNOTSUPP;
398 }
399
bbd00f7e
HHZ
400 if ((dissector_uses_key(f->dissector,
401 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
402 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
403 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
404 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
405 struct flow_dissector_key_control *key =
406 skb_flow_dissector_target(f->dissector,
407 FLOW_DISSECTOR_KEY_ENC_CONTROL,
408 f->key);
409 switch (key->addr_type) {
410 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 411 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
412 if (parse_tunnel_attr(priv, spec, f))
413 return -EOPNOTSUPP;
414 break;
415 default:
416 return -EOPNOTSUPP;
417 }
418
419 /* In decap flow, header pointers should point to the inner
420 * headers, outer header were already set by parse_tunnel_attr
421 */
422 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
423 inner_headers);
424 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
425 inner_headers);
426 }
427
e3a2b7ed
AV
428 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
429 struct flow_dissector_key_control *key =
430 skb_flow_dissector_target(f->dissector,
1dbd0d37 431 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 432 f->key);
3f7d0eb4
OG
433
434 struct flow_dissector_key_control *mask =
435 skb_flow_dissector_target(f->dissector,
436 FLOW_DISSECTOR_KEY_CONTROL,
437 f->mask);
e3a2b7ed 438 addr_type = key->addr_type;
3f7d0eb4
OG
439
440 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
441 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
442 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
443 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
444
445 /* the HW doesn't need L3 inline to match on frag=no */
446 if (key->flags & FLOW_DIS_IS_FRAGMENT)
447 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 448 }
e3a2b7ed
AV
449 }
450
451 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
452 struct flow_dissector_key_basic *key =
453 skb_flow_dissector_target(f->dissector,
454 FLOW_DISSECTOR_KEY_BASIC,
455 f->key);
456 struct flow_dissector_key_basic *mask =
457 skb_flow_dissector_target(f->dissector,
458 FLOW_DISSECTOR_KEY_BASIC,
459 f->mask);
460 ip_proto = key->ip_proto;
461
462 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
463 ntohs(mask->n_proto));
464 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
465 ntohs(key->n_proto));
466
467 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
468 mask->ip_proto);
469 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
470 key->ip_proto);
de0af0bf
RD
471
472 if (mask->ip_proto)
473 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
474 }
475
476 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
477 struct flow_dissector_key_eth_addrs *key =
478 skb_flow_dissector_target(f->dissector,
479 FLOW_DISSECTOR_KEY_ETH_ADDRS,
480 f->key);
481 struct flow_dissector_key_eth_addrs *mask =
482 skb_flow_dissector_target(f->dissector,
483 FLOW_DISSECTOR_KEY_ETH_ADDRS,
484 f->mask);
485
486 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
487 dmac_47_16),
488 mask->dst);
489 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
490 dmac_47_16),
491 key->dst);
492
493 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
494 smac_47_16),
495 mask->src);
496 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
497 smac_47_16),
498 key->src);
499 }
500
095b6cfd
OG
501 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
502 struct flow_dissector_key_vlan *key =
503 skb_flow_dissector_target(f->dissector,
504 FLOW_DISSECTOR_KEY_VLAN,
505 f->key);
506 struct flow_dissector_key_vlan *mask =
507 skb_flow_dissector_target(f->dissector,
508 FLOW_DISSECTOR_KEY_VLAN,
509 f->mask);
358d79a4 510 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
511 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
512 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
513
514 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
515 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
516
517 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
518 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
519 }
520 }
521
e3a2b7ed
AV
522 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
523 struct flow_dissector_key_ipv4_addrs *key =
524 skb_flow_dissector_target(f->dissector,
525 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
526 f->key);
527 struct flow_dissector_key_ipv4_addrs *mask =
528 skb_flow_dissector_target(f->dissector,
529 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
530 f->mask);
531
532 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533 src_ipv4_src_ipv6.ipv4_layout.ipv4),
534 &mask->src, sizeof(mask->src));
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536 src_ipv4_src_ipv6.ipv4_layout.ipv4),
537 &key->src, sizeof(key->src));
538 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
539 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
540 &mask->dst, sizeof(mask->dst));
541 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
542 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
543 &key->dst, sizeof(key->dst));
de0af0bf
RD
544
545 if (mask->src || mask->dst)
546 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
547 }
548
549 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
550 struct flow_dissector_key_ipv6_addrs *key =
551 skb_flow_dissector_target(f->dissector,
552 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
553 f->key);
554 struct flow_dissector_key_ipv6_addrs *mask =
555 skb_flow_dissector_target(f->dissector,
556 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
557 f->mask);
558
559 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
560 src_ipv4_src_ipv6.ipv6_layout.ipv6),
561 &mask->src, sizeof(mask->src));
562 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
563 src_ipv4_src_ipv6.ipv6_layout.ipv6),
564 &key->src, sizeof(key->src));
565
566 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
567 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
568 &mask->dst, sizeof(mask->dst));
569 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
570 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
571 &key->dst, sizeof(key->dst));
de0af0bf
RD
572
573 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
574 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
575 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
576 }
577
578 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
579 struct flow_dissector_key_ports *key =
580 skb_flow_dissector_target(f->dissector,
581 FLOW_DISSECTOR_KEY_PORTS,
582 f->key);
583 struct flow_dissector_key_ports *mask =
584 skb_flow_dissector_target(f->dissector,
585 FLOW_DISSECTOR_KEY_PORTS,
586 f->mask);
587 switch (ip_proto) {
588 case IPPROTO_TCP:
589 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
590 tcp_sport, ntohs(mask->src));
591 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
592 tcp_sport, ntohs(key->src));
593
594 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
595 tcp_dport, ntohs(mask->dst));
596 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
597 tcp_dport, ntohs(key->dst));
598 break;
599
600 case IPPROTO_UDP:
601 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
602 udp_sport, ntohs(mask->src));
603 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
604 udp_sport, ntohs(key->src));
605
606 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
607 udp_dport, ntohs(mask->dst));
608 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
609 udp_dport, ntohs(key->dst));
610 break;
611 default:
612 netdev_err(priv->netdev,
613 "Only UDP and TCP transport are supported\n");
614 return -EINVAL;
615 }
de0af0bf
RD
616
617 if (mask->src || mask->dst)
618 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
619 }
620
621 return 0;
622}
623
de0af0bf 624static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 625 struct mlx5e_tc_flow *flow,
de0af0bf
RD
626 struct mlx5_flow_spec *spec,
627 struct tc_cls_flower_offload *f)
628{
629 struct mlx5_core_dev *dev = priv->mdev;
630 struct mlx5_eswitch *esw = dev->priv.eswitch;
631 struct mlx5_eswitch_rep *rep = priv->ppriv;
632 u8 min_inline;
633 int err;
634
635 err = __parse_cls_flower(priv, spec, f, &min_inline);
636
65ba8fb7 637 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
de0af0bf
RD
638 rep->vport != FDB_UPLINK_VPORT) {
639 if (min_inline > esw->offloads.inline_mode) {
640 netdev_warn(priv->netdev,
641 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
642 min_inline, esw->offloads.inline_mode);
643 return -EOPNOTSUPP;
644 }
645 }
646
647 return err;
648}
649
5c40348c
OG
650static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
651 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
652{
653 const struct tc_action *a;
22dc13c8 654 LIST_HEAD(actions);
e3a2b7ed
AV
655
656 if (tc_no_actions(exts))
657 return -EINVAL;
658
659 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
660 *action = 0;
661
22dc13c8
WC
662 tcf_exts_to_list(exts, &actions);
663 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
664 /* Only support a single action per rule */
665 if (*action)
666 return -EINVAL;
667
668 if (is_tcf_gact_shot(a)) {
669 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
670 if (MLX5_CAP_FLOWTABLE(priv->mdev,
671 flow_table_properties_nic_receive.flow_counter))
672 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
673 continue;
674 }
675
676 if (is_tcf_skbedit_mark(a)) {
677 u32 mark = tcf_skbedit_mark(a);
678
679 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
680 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
681 mark);
682 return -EINVAL;
683 }
684
685 *flow_tag = mark;
686 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
687 continue;
688 }
689
690 return -EINVAL;
691 }
692
693 return 0;
694}
695
76f7444d
OG
696static inline int cmp_encap_info(struct ip_tunnel_key *a,
697 struct ip_tunnel_key *b)
a54e20b4
HHZ
698{
699 return memcmp(a, b, sizeof(*a));
700}
701
76f7444d 702static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 703{
76f7444d 704 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
705}
706
707static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
708 struct net_device *mirred_dev,
709 struct net_device **out_dev,
710 struct flowi4 *fl4,
711 struct neighbour **out_n,
a54e20b4
HHZ
712 int *out_ttl)
713{
3e621b19 714 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
715 struct rtable *rt;
716 struct neighbour *n = NULL;
a54e20b4
HHZ
717
718#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
719 int ret;
720
a54e20b4 721 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
722 ret = PTR_ERR_OR_ZERO(rt);
723 if (ret)
724 return ret;
a54e20b4
HHZ
725#else
726 return -EOPNOTSUPP;
727#endif
3e621b19
HHZ
728 /* if the egress device isn't on the same HW e-switch, we use the uplink */
729 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
730 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
731 else
732 *out_dev = rt->dst.dev;
a54e20b4 733
75c33da8 734 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
735 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
736 ip_rt_put(rt);
737 if (!n)
738 return -ENOMEM;
739
740 *out_n = n;
a54e20b4
HHZ
741 return 0;
742}
743
ce99f6b9
OG
744static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
745 struct net_device *mirred_dev,
746 struct net_device **out_dev,
747 struct flowi6 *fl6,
748 struct neighbour **out_n,
749 int *out_ttl)
750{
751 struct neighbour *n = NULL;
752 struct dst_entry *dst;
753
754#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
755 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
756 int ret;
757
758 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
759 ret = dst->error;
760 if (ret) {
ce99f6b9
OG
761 dst_release(dst);
762 return ret;
763 }
764
765 *out_ttl = ip6_dst_hoplimit(dst);
766
767 /* if the egress device isn't on the same HW e-switch, we use the uplink */
768 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
769 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
770 else
771 *out_dev = dst->dev;
772#else
773 return -EOPNOTSUPP;
774#endif
775
776 n = dst_neigh_lookup(dst, &fl6->daddr);
777 dst_release(dst);
778 if (!n)
779 return -ENOMEM;
780
781 *out_n = n;
782 return 0;
783}
784
a54e20b4
HHZ
785static int gen_vxlan_header_ipv4(struct net_device *out_dev,
786 char buf[],
787 unsigned char h_dest[ETH_ALEN],
788 int ttl,
789 __be32 daddr,
790 __be32 saddr,
791 __be16 udp_dst_port,
792 __be32 vx_vni)
793{
794 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
795 struct ethhdr *eth = (struct ethhdr *)buf;
796 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
797 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
798 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
799
800 memset(buf, 0, encap_size);
801
802 ether_addr_copy(eth->h_dest, h_dest);
803 ether_addr_copy(eth->h_source, out_dev->dev_addr);
804 eth->h_proto = htons(ETH_P_IP);
805
806 ip->daddr = daddr;
807 ip->saddr = saddr;
808
809 ip->ttl = ttl;
810 ip->protocol = IPPROTO_UDP;
811 ip->version = 0x4;
812 ip->ihl = 0x5;
813
814 udp->dest = udp_dst_port;
815 vxh->vx_flags = VXLAN_HF_VNI;
816 vxh->vx_vni = vxlan_vni_field(vx_vni);
817
818 return encap_size;
819}
820
ce99f6b9
OG
821static int gen_vxlan_header_ipv6(struct net_device *out_dev,
822 char buf[],
823 unsigned char h_dest[ETH_ALEN],
824 int ttl,
825 struct in6_addr *daddr,
826 struct in6_addr *saddr,
827 __be16 udp_dst_port,
828 __be32 vx_vni)
829{
830 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
831 struct ethhdr *eth = (struct ethhdr *)buf;
832 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
833 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
834 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
835
836 memset(buf, 0, encap_size);
837
838 ether_addr_copy(eth->h_dest, h_dest);
839 ether_addr_copy(eth->h_source, out_dev->dev_addr);
840 eth->h_proto = htons(ETH_P_IPV6);
841
842 ip6_flow_hdr(ip6h, 0, 0);
843 /* the HW fills up ipv6 payload len */
844 ip6h->nexthdr = IPPROTO_UDP;
845 ip6h->hop_limit = ttl;
846 ip6h->daddr = *daddr;
847 ip6h->saddr = *saddr;
848
849 udp->dest = udp_dst_port;
850 vxh->vx_flags = VXLAN_HF_VNI;
851 vxh->vx_vni = vxlan_vni_field(vx_vni);
852
853 return encap_size;
854}
855
a54e20b4
HHZ
856static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
857 struct net_device *mirred_dev,
858 struct mlx5_encap_entry *e,
859 struct net_device **out_dev)
860{
861 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
76f7444d 862 struct ip_tunnel_key *tun_key = &e->tun_info.key;
9a941117 863 int encap_size, ttl, err;
a42485eb 864 struct neighbour *n = NULL;
a54e20b4 865 struct flowi4 fl4 = {};
a54e20b4 866 char *encap_header;
a54e20b4
HHZ
867
868 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
869 if (!encap_header)
870 return -ENOMEM;
871
872 switch (e->tunnel_type) {
873 case MLX5_HEADER_TYPE_VXLAN:
874 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 875 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
876 break;
877 default:
878 err = -EOPNOTSUPP;
879 goto out;
880 }
9a941117 881 fl4.flowi4_tos = tun_key->tos;
76f7444d 882 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 883 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4
HHZ
884
885 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
9a941117 886 &fl4, &n, &ttl);
a54e20b4
HHZ
887 if (err)
888 goto out;
889
a54e20b4 890 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
891 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
892 err = -EOPNOTSUPP;
a54e20b4
HHZ
893 goto out;
894 }
895
75c33da8
OG
896 e->n = n;
897 e->out_dev = *out_dev;
898
a54e20b4
HHZ
899 neigh_ha_snapshot(e->h_dest, n, *out_dev);
900
901 switch (e->tunnel_type) {
902 case MLX5_HEADER_TYPE_VXLAN:
903 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
904 e->h_dest, ttl,
9a941117
OG
905 fl4.daddr,
906 fl4.saddr, tun_key->tp_dst,
76f7444d 907 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
908 break;
909 default:
910 err = -EOPNOTSUPP;
911 goto out;
912 }
913
914 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
915 encap_size, encap_header, &e->encap_id);
916out:
a42485eb
OG
917 if (err && n)
918 neigh_release(n);
a54e20b4
HHZ
919 kfree(encap_header);
920 return err;
921}
922
ce99f6b9
OG
923static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
924 struct net_device *mirred_dev,
925 struct mlx5_encap_entry *e,
926 struct net_device **out_dev)
927
928{
929 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
930 struct ip_tunnel_key *tun_key = &e->tun_info.key;
931 int encap_size, err, ttl = 0;
932 struct neighbour *n = NULL;
933 struct flowi6 fl6 = {};
934 char *encap_header;
935
936 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
937 if (!encap_header)
938 return -ENOMEM;
939
940 switch (e->tunnel_type) {
941 case MLX5_HEADER_TYPE_VXLAN:
942 fl6.flowi6_proto = IPPROTO_UDP;
943 fl6.fl6_dport = tun_key->tp_dst;
944 break;
945 default:
946 err = -EOPNOTSUPP;
947 goto out;
948 }
949
950 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
951 fl6.daddr = tun_key->u.ipv6.dst;
952 fl6.saddr = tun_key->u.ipv6.src;
953
954 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
955 &fl6, &n, &ttl);
956 if (err)
957 goto out;
958
959 if (!(n->nud_state & NUD_VALID)) {
960 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
961 err = -EOPNOTSUPP;
962 goto out;
963 }
964
965 e->n = n;
966 e->out_dev = *out_dev;
967
968 neigh_ha_snapshot(e->h_dest, n, *out_dev);
969
970 switch (e->tunnel_type) {
971 case MLX5_HEADER_TYPE_VXLAN:
972 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
973 e->h_dest, ttl,
974 &fl6.daddr,
975 &fl6.saddr, tun_key->tp_dst,
976 tunnel_id_to_key32(tun_key->tun_id));
977 break;
978 default:
979 err = -EOPNOTSUPP;
980 goto out;
981 }
982
983 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
984 encap_size, encap_header, &e->encap_id);
985out:
986 if (err && n)
987 neigh_release(n);
988 kfree(encap_header);
989 return err;
990}
991
a54e20b4
HHZ
992static int mlx5e_attach_encap(struct mlx5e_priv *priv,
993 struct ip_tunnel_info *tun_info,
994 struct net_device *mirred_dev,
995 struct mlx5_esw_flow_attr *attr)
996{
997 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
998 unsigned short family = ip_tunnel_info_af(tun_info);
999 struct ip_tunnel_key *key = &tun_info->key;
a54e20b4
HHZ
1000 struct mlx5_encap_entry *e;
1001 struct net_device *out_dev;
ce99f6b9 1002 int tunnel_type, err = -EOPNOTSUPP;
a54e20b4
HHZ
1003 uintptr_t hash_key;
1004 bool found = false;
a54e20b4 1005
2fcd82e9 1006 /* udp dst port must be set */
a54e20b4 1007 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1008 goto vxlan_encap_offload_err;
a54e20b4 1009
cd377663 1010 /* setting udp src port isn't supported */
2fcd82e9
OG
1011 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1012vxlan_encap_offload_err:
1013 netdev_warn(priv->netdev,
1014 "must set udp dst port and not set udp src port\n");
cd377663 1015 return -EOPNOTSUPP;
2fcd82e9 1016 }
cd377663 1017
a54e20b4
HHZ
1018 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
1019 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1020 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1021 } else {
2fcd82e9
OG
1022 netdev_warn(priv->netdev,
1023 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1024 return -EOPNOTSUPP;
1025 }
1026
76f7444d 1027 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1028
1029 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1030 encap_hlist, hash_key) {
76f7444d 1031 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1032 found = true;
1033 break;
1034 }
1035 }
1036
1037 if (found) {
1038 attr->encap = e;
1039 return 0;
1040 }
1041
1042 e = kzalloc(sizeof(*e), GFP_KERNEL);
1043 if (!e)
1044 return -ENOMEM;
1045
76f7444d 1046 e->tun_info = *tun_info;
a54e20b4
HHZ
1047 e->tunnel_type = tunnel_type;
1048 INIT_LIST_HEAD(&e->flows);
1049
ce99f6b9
OG
1050 if (family == AF_INET)
1051 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1052 else if (family == AF_INET6)
1053 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1054
a54e20b4
HHZ
1055 if (err)
1056 goto out_err;
1057
1058 attr->encap = e;
1059 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1060
1061 return err;
1062
1063out_err:
1064 kfree(e);
1065 return err;
1066}
1067
03a9d11e 1068static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 1069 struct mlx5e_tc_flow *flow)
03a9d11e 1070{
a54e20b4
HHZ
1071 struct mlx5_esw_flow_attr *attr = flow->attr;
1072 struct ip_tunnel_info *info = NULL;
03a9d11e 1073 const struct tc_action *a;
22dc13c8 1074 LIST_HEAD(actions);
a54e20b4
HHZ
1075 bool encap = false;
1076 int err;
03a9d11e
OG
1077
1078 if (tc_no_actions(exts))
1079 return -EINVAL;
1080
776b12b6
OG
1081 memset(attr, 0, sizeof(*attr));
1082 attr->in_rep = priv->ppriv;
03a9d11e 1083
22dc13c8
WC
1084 tcf_exts_to_list(exts, &actions);
1085 list_for_each_entry(a, &actions, list) {
03a9d11e 1086 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1087 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1088 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1089 continue;
1090 }
1091
5724b8b5 1092 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
1093 int ifindex = tcf_mirred_ifindex(a);
1094 struct net_device *out_dev;
1095 struct mlx5e_priv *out_priv;
03a9d11e
OG
1096
1097 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1098
a54e20b4
HHZ
1099 if (switchdev_port_same_parent_id(priv->netdev,
1100 out_dev)) {
1101 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1102 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1103 out_priv = netdev_priv(out_dev);
1104 attr->out_rep = out_priv->ppriv;
1105 } else if (encap) {
1106 err = mlx5e_attach_encap(priv, info,
1107 out_dev, attr);
1108 if (err)
1109 return err;
1110 list_add(&flow->encap, &attr->encap->flows);
1111 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1112 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1113 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1114 out_priv = netdev_priv(attr->encap->out_dev);
1115 attr->out_rep = out_priv->ppriv;
1116 } else {
03a9d11e
OG
1117 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1118 priv->netdev->name, out_dev->name);
1119 return -EINVAL;
1120 }
a54e20b4
HHZ
1121 continue;
1122 }
03a9d11e 1123
a54e20b4
HHZ
1124 if (is_tcf_tunnel_set(a)) {
1125 info = tcf_tunnel_info(a);
1126 if (info)
1127 encap = true;
1128 else
1129 return -EOPNOTSUPP;
03a9d11e
OG
1130 continue;
1131 }
1132
8b32580d 1133 if (is_tcf_vlan(a)) {
09c91ddf 1134 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1135 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1136 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1137 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1138 return -EOPNOTSUPP;
1139
1140 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1141 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1142 } else { /* action is TCA_VLAN_ACT_MODIFY */
1143 return -EOPNOTSUPP;
8b32580d
OG
1144 }
1145 continue;
1146 }
1147
bbd00f7e
HHZ
1148 if (is_tcf_tunnel_release(a)) {
1149 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1150 continue;
1151 }
1152
03a9d11e
OG
1153 return -EINVAL;
1154 }
1155 return 0;
1156}
1157
e3a2b7ed
AV
1158int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1159 struct tc_cls_flower_offload *f)
1160{
acff797c 1161 struct mlx5e_tc_table *tc = &priv->fs.tc;
65ba8fb7 1162 int err, attr_size = 0;
776b12b6 1163 u32 flow_tag, action;
e3a2b7ed 1164 struct mlx5e_tc_flow *flow;
c5bb1730 1165 struct mlx5_flow_spec *spec;
adb4c123 1166 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
65ba8fb7 1167 u8 flow_flags = 0;
e3a2b7ed 1168
65ba8fb7
OG
1169 if (esw && esw->mode == SRIOV_OFFLOADS) {
1170 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1171 attr_size = sizeof(struct mlx5_esw_flow_attr);
1172 }
e3a2b7ed 1173
65ba8fb7 1174 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
c5bb1730
MG
1175 spec = mlx5_vzalloc(sizeof(*spec));
1176 if (!spec || !flow) {
e3a2b7ed
AV
1177 err = -ENOMEM;
1178 goto err_free;
1179 }
1180
1181 flow->cookie = f->cookie;
65ba8fb7 1182 flow->flags = flow_flags;
e3a2b7ed 1183
65ba8fb7 1184 err = parse_cls_flower(priv, flow, spec, f);
e3a2b7ed
AV
1185 if (err < 0)
1186 goto err_free;
1187
65ba8fb7 1188 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
776b12b6 1189 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 1190 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1191 if (err < 0)
1192 goto err_free;
776b12b6 1193 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
1194 } else {
1195 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1196 if (err < 0)
1197 goto err_free;
1198 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1199 }
e3a2b7ed 1200
e3a2b7ed
AV
1201 if (IS_ERR(flow->rule)) {
1202 err = PTR_ERR(flow->rule);
5e86397a 1203 goto err_del_rule;
e3a2b7ed
AV
1204 }
1205
5c40348c
OG
1206 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1207 tc->ht_params);
1208 if (err)
1209 goto err_del_rule;
1210
e3a2b7ed
AV
1211 goto out;
1212
5c40348c 1213err_del_rule:
5e86397a 1214 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1215
1216err_free:
53636068 1217 kfree(flow);
e3a2b7ed 1218out:
c5bb1730 1219 kvfree(spec);
e3a2b7ed
AV
1220 return err;
1221}
1222
1223int mlx5e_delete_flower(struct mlx5e_priv *priv,
1224 struct tc_cls_flower_offload *f)
1225{
1226 struct mlx5e_tc_flow *flow;
acff797c 1227 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1228
1229 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1230 tc->ht_params);
1231 if (!flow)
1232 return -EINVAL;
1233
1234 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1235
961e8979 1236 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1237
a54e20b4 1238
e3a2b7ed
AV
1239 kfree(flow);
1240
1241 return 0;
1242}
1243
aad7e08d
AV
1244int mlx5e_stats_flower(struct mlx5e_priv *priv,
1245 struct tc_cls_flower_offload *f)
1246{
1247 struct mlx5e_tc_table *tc = &priv->fs.tc;
1248 struct mlx5e_tc_flow *flow;
1249 struct tc_action *a;
1250 struct mlx5_fc *counter;
22dc13c8 1251 LIST_HEAD(actions);
aad7e08d
AV
1252 u64 bytes;
1253 u64 packets;
1254 u64 lastuse;
1255
1256 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1257 tc->ht_params);
1258 if (!flow)
1259 return -EINVAL;
1260
1261 counter = mlx5_flow_rule_counter(flow->rule);
1262 if (!counter)
1263 return 0;
1264
1265 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1266
fed06ee8
OG
1267 preempt_disable();
1268
22dc13c8
WC
1269 tcf_exts_to_list(f->exts, &actions);
1270 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1271 tcf_action_stats_update(a, bytes, packets, lastuse);
1272
fed06ee8
OG
1273 preempt_enable();
1274
aad7e08d
AV
1275 return 0;
1276}
1277
e8f887ac
AV
1278static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1279 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1280 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1281 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1282 .automatic_shrinking = true,
1283};
1284
1285int mlx5e_tc_init(struct mlx5e_priv *priv)
1286{
acff797c 1287 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1288
1289 tc->ht_params = mlx5e_tc_flow_ht_params;
1290 return rhashtable_init(&tc->ht, &tc->ht_params);
1291}
1292
1293static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1294{
1295 struct mlx5e_tc_flow *flow = ptr;
1296 struct mlx5e_priv *priv = arg;
1297
961e8979 1298 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1299 kfree(flow);
1300}
1301
1302void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1303{
acff797c 1304 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1305
1306 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1307
acff797c
MG
1308 if (!IS_ERR_OR_NULL(tc->t)) {
1309 mlx5_destroy_flow_table(tc->t);
1310 tc->t = NULL;
e8f887ac
AV
1311 }
1312}