]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Support VLAN modify action
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e 41#include <net/tc_act/tc_mirred.h>
776b12b6 42#include <net/tc_act/tc_vlan.h>
bbd00f7e 43#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 44#include <net/tc_act/tc_pedit.h>
26c02749 45#include <net/tc_act/tc_csum.h>
f6dfb4c3 46#include <net/arp.h>
e8f887ac 47#include "en.h"
1d447a39 48#include "en_rep.h"
232c0013 49#include "en_tc.h"
03a9d11e 50#include "eswitch.h"
3f6d08d1 51#include "fs_core.h"
2c81bfd5 52#include "en/port.h"
101f4de9 53#include "en/tc_tun.h"
04de7dda 54#include "lib/devcom.h"
e8f887ac 55
3bc4b7bf
OG
56struct mlx5_nic_flow_attr {
57 u32 action;
58 u32 flow_tag;
2f4fe4ca 59 u32 mod_hdr_id;
5c65c564 60 u32 hairpin_tirn;
38aa51c1 61 u8 match_level;
3f6d08d1 62 struct mlx5_flow_table *hairpin_ft;
b8aee822 63 struct mlx5_fc *counter;
3bc4b7bf
OG
64};
65
60bd4af8
OG
66#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
67
65ba8fb7 68enum {
60bd4af8
OG
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
d9ee0491
OG
71 MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
72 MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
76 MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
77 MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
ef06c9ee 78 MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5),
65ba8fb7
OG
79};
80
e4ad91f2
CM
81#define MLX5E_TC_MAX_SPLITS 1
82
79baaec7
EB
83/* Helper struct for accessing a struct containing list_head array.
84 * Containing struct
85 * |- Helper array
86 * [0] Helper item 0
87 * |- list_head item 0
88 * |- index (0)
89 * [1] Helper item 1
90 * |- list_head item 1
91 * |- index (1)
92 * To access the containing struct from one of the list_head items:
93 * 1. Get the helper item from the list_head item using
94 * helper item =
95 * container_of(list_head item, helper struct type, list_head field)
96 * 2. Get the contining struct from the helper item and its index in the array:
97 * containing struct =
98 * container_of(helper item, containing struct type, helper field[index])
99 */
100struct encap_flow_item {
101 struct list_head list;
102 int index;
103};
104
e8f887ac
AV
105struct mlx5e_tc_flow {
106 struct rhash_head node;
655dc3d2 107 struct mlx5e_priv *priv;
e8f887ac 108 u64 cookie;
5dbe906f 109 u16 flags;
e4ad91f2 110 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
79baaec7
EB
111 /* Flow can be associated with multiple encap IDs.
112 * The number of encaps is bounded by the number of supported
113 * destinations.
114 */
115 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
04de7dda 116 struct mlx5e_tc_flow *peer_flow;
11c9c548 117 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
5c65c564 118 struct list_head hairpin; /* flows sharing the same hairpin */
04de7dda 119 struct list_head peer; /* flows with peer flow */
b4a23329 120 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
3bc4b7bf
OG
121 union {
122 struct mlx5_esw_flow_attr esw_attr[0];
123 struct mlx5_nic_flow_attr nic_attr[0];
124 };
e8f887ac
AV
125};
126
17091853 127struct mlx5e_tc_flow_parse_attr {
98b66cb1 128 struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
d11afc26 129 struct net_device *filter_dev;
17091853 130 struct mlx5_flow_spec spec;
d79b6df6 131 int num_mod_hdr_actions;
218d05ce 132 int max_mod_hdr_actions;
d79b6df6 133 void *mod_hdr_actions;
98b66cb1 134 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
17091853
OG
135};
136
acff797c 137#define MLX5E_TC_TABLE_NUM_GROUPS 4
b3a433de 138#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
e8f887ac 139
77ab67b7
OG
140struct mlx5e_hairpin {
141 struct mlx5_hairpin *pair;
142
143 struct mlx5_core_dev *func_mdev;
3f6d08d1 144 struct mlx5e_priv *func_priv;
77ab67b7
OG
145 u32 tdn;
146 u32 tirn;
3f6d08d1
OG
147
148 int num_channels;
149 struct mlx5e_rqt indir_rqt;
150 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
151 struct mlx5e_ttc_table ttc;
77ab67b7
OG
152};
153
5c65c564
OG
154struct mlx5e_hairpin_entry {
155 /* a node of a hash table which keeps all the hairpin entries */
156 struct hlist_node hairpin_hlist;
157
158 /* flows sharing the same hairpin */
159 struct list_head flows;
160
d8822868 161 u16 peer_vhca_id;
106be53b 162 u8 prio;
5c65c564
OG
163 struct mlx5e_hairpin *hp;
164};
165
11c9c548
OG
166struct mod_hdr_key {
167 int num_actions;
168 void *actions;
169};
170
171struct mlx5e_mod_hdr_entry {
172 /* a node of a hash table which keeps all the mod_hdr entries */
173 struct hlist_node mod_hdr_hlist;
174
175 /* flows sharing the same mod_hdr entry */
176 struct list_head flows;
177
178 struct mod_hdr_key key;
179
180 u32 mod_hdr_id;
181};
182
183#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
184
185static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
186{
187 return jhash(key->actions,
188 key->num_actions * MLX5_MH_ACT_SZ, 0);
189}
190
191static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
192 struct mod_hdr_key *b)
193{
194 if (a->num_actions != b->num_actions)
195 return 1;
196
197 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
198}
199
200static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
201 struct mlx5e_tc_flow *flow,
202 struct mlx5e_tc_flow_parse_attr *parse_attr)
203{
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
205 int num_actions, actions_size, namespace, err;
206 struct mlx5e_mod_hdr_entry *mh;
207 struct mod_hdr_key key;
208 bool found = false;
209 u32 hash_key;
210
211 num_actions = parse_attr->num_mod_hdr_actions;
212 actions_size = MLX5_MH_ACT_SZ * num_actions;
213
214 key.actions = parse_attr->mod_hdr_actions;
215 key.num_actions = num_actions;
216
217 hash_key = hash_mod_hdr_info(&key);
218
219 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
220 namespace = MLX5_FLOW_NAMESPACE_FDB;
221 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
222 mod_hdr_hlist, hash_key) {
223 if (!cmp_mod_hdr_info(&mh->key, &key)) {
224 found = true;
225 break;
226 }
227 }
228 } else {
229 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
230 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
231 mod_hdr_hlist, hash_key) {
232 if (!cmp_mod_hdr_info(&mh->key, &key)) {
233 found = true;
234 break;
235 }
236 }
237 }
238
239 if (found)
240 goto attach_flow;
241
242 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
243 if (!mh)
244 return -ENOMEM;
245
246 mh->key.actions = (void *)mh + sizeof(*mh);
247 memcpy(mh->key.actions, key.actions, actions_size);
248 mh->key.num_actions = num_actions;
249 INIT_LIST_HEAD(&mh->flows);
250
251 err = mlx5_modify_header_alloc(priv->mdev, namespace,
252 mh->key.num_actions,
253 mh->key.actions,
254 &mh->mod_hdr_id);
255 if (err)
256 goto out_err;
257
258 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
259 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
260 else
261 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
262
263attach_flow:
264 list_add(&flow->mod_hdr, &mh->flows);
265 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
266 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
267 else
268 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
269
270 return 0;
271
272out_err:
273 kfree(mh);
274 return err;
275}
276
277static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
278 struct mlx5e_tc_flow *flow)
279{
280 struct list_head *next = flow->mod_hdr.next;
281
282 list_del(&flow->mod_hdr);
283
284 if (list_empty(next)) {
285 struct mlx5e_mod_hdr_entry *mh;
286
287 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
288
289 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
290 hash_del(&mh->mod_hdr_hlist);
291 kfree(mh);
292 }
293}
294
77ab67b7
OG
295static
296struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
297{
298 struct net_device *netdev;
299 struct mlx5e_priv *priv;
300
301 netdev = __dev_get_by_index(net, ifindex);
302 priv = netdev_priv(netdev);
303 return priv->mdev;
304}
305
306static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
307{
308 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
309 void *tirc;
310 int err;
311
312 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
313 if (err)
314 goto alloc_tdn_err;
315
316 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
317
318 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
ddae74ac 319 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
77ab67b7
OG
320 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
321
322 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
323 if (err)
324 goto create_tir_err;
325
326 return 0;
327
328create_tir_err:
329 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
330alloc_tdn_err:
331 return err;
332}
333
334static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
335{
336 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
337 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
338}
339
3f6d08d1
OG
340static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
341{
342 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
343 struct mlx5e_priv *priv = hp->func_priv;
344 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
345
346 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
347 hp->num_channels);
348
349 for (i = 0; i < sz; i++) {
350 ix = i;
bbeb53b8 351 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
3f6d08d1
OG
352 ix = mlx5e_bits_invert(i, ilog2(sz));
353 ix = indirection_rqt[ix];
354 rqn = hp->pair->rqn[ix];
355 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
356 }
357}
358
359static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
360{
361 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
362 struct mlx5e_priv *priv = hp->func_priv;
363 struct mlx5_core_dev *mdev = priv->mdev;
364 void *rqtc;
365 u32 *in;
366
367 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
368 in = kvzalloc(inlen, GFP_KERNEL);
369 if (!in)
370 return -ENOMEM;
371
372 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
373
374 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
375 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
376
377 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
378
379 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
380 if (!err)
381 hp->indir_rqt.enabled = true;
382
383 kvfree(in);
384 return err;
385}
386
387static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
388{
389 struct mlx5e_priv *priv = hp->func_priv;
390 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
391 int tt, i, err;
392 void *tirc;
393
394 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
d930ac79
AL
395 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
396
3f6d08d1
OG
397 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
398 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
399
400 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
401 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
402 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
bbeb53b8
AL
403 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
404
3f6d08d1
OG
405 err = mlx5_core_create_tir(hp->func_mdev, in,
406 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
407 if (err) {
408 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
409 goto err_destroy_tirs;
410 }
411 }
412 return 0;
413
414err_destroy_tirs:
415 for (i = 0; i < tt; i++)
416 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
417 return err;
418}
419
420static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
421{
422 int tt;
423
424 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
425 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
426}
427
428static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
429 struct ttc_params *ttc_params)
430{
431 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
432 int tt;
433
434 memset(ttc_params, 0, sizeof(*ttc_params));
435
436 ttc_params->any_tt_tirn = hp->tirn;
437
438 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
439 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
440
441 ft_attr->max_fte = MLX5E_NUM_TT;
442 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
443 ft_attr->prio = MLX5E_TC_PRIO;
444}
445
446static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
447{
448 struct mlx5e_priv *priv = hp->func_priv;
449 struct ttc_params ttc_params;
450 int err;
451
452 err = mlx5e_hairpin_create_indirect_rqt(hp);
453 if (err)
454 return err;
455
456 err = mlx5e_hairpin_create_indirect_tirs(hp);
457 if (err)
458 goto err_create_indirect_tirs;
459
460 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
461 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
462 if (err)
463 goto err_create_ttc_table;
464
465 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
466 hp->num_channels, hp->ttc.ft.t->id);
467
468 return 0;
469
470err_create_ttc_table:
471 mlx5e_hairpin_destroy_indirect_tirs(hp);
472err_create_indirect_tirs:
473 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
474
475 return err;
476}
477
478static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
479{
480 struct mlx5e_priv *priv = hp->func_priv;
481
482 mlx5e_destroy_ttc_table(priv, &hp->ttc);
483 mlx5e_hairpin_destroy_indirect_tirs(hp);
484 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
485}
486
77ab67b7
OG
487static struct mlx5e_hairpin *
488mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
489 int peer_ifindex)
490{
491 struct mlx5_core_dev *func_mdev, *peer_mdev;
492 struct mlx5e_hairpin *hp;
493 struct mlx5_hairpin *pair;
494 int err;
495
496 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
497 if (!hp)
498 return ERR_PTR(-ENOMEM);
499
500 func_mdev = priv->mdev;
501 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
502
503 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
504 if (IS_ERR(pair)) {
505 err = PTR_ERR(pair);
506 goto create_pair_err;
507 }
508 hp->pair = pair;
509 hp->func_mdev = func_mdev;
3f6d08d1
OG
510 hp->func_priv = priv;
511 hp->num_channels = params->num_channels;
77ab67b7
OG
512
513 err = mlx5e_hairpin_create_transport(hp);
514 if (err)
515 goto create_transport_err;
516
3f6d08d1
OG
517 if (hp->num_channels > 1) {
518 err = mlx5e_hairpin_rss_init(hp);
519 if (err)
520 goto rss_init_err;
521 }
522
77ab67b7
OG
523 return hp;
524
3f6d08d1
OG
525rss_init_err:
526 mlx5e_hairpin_destroy_transport(hp);
77ab67b7
OG
527create_transport_err:
528 mlx5_core_hairpin_destroy(hp->pair);
529create_pair_err:
530 kfree(hp);
531 return ERR_PTR(err);
532}
533
534static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
535{
3f6d08d1
OG
536 if (hp->num_channels > 1)
537 mlx5e_hairpin_rss_cleanup(hp);
77ab67b7
OG
538 mlx5e_hairpin_destroy_transport(hp);
539 mlx5_core_hairpin_destroy(hp->pair);
540 kvfree(hp);
541}
542
106be53b
OG
543static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
544{
545 return (peer_vhca_id << 16 | prio);
546}
547
5c65c564 548static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
106be53b 549 u16 peer_vhca_id, u8 prio)
5c65c564
OG
550{
551 struct mlx5e_hairpin_entry *hpe;
106be53b 552 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
5c65c564
OG
553
554 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
106be53b
OG
555 hairpin_hlist, hash_key) {
556 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
5c65c564
OG
557 return hpe;
558 }
559
560 return NULL;
561}
562
106be53b
OG
563#define UNKNOWN_MATCH_PRIO 8
564
565static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
e98bedf5
EB
566 struct mlx5_flow_spec *spec, u8 *match_prio,
567 struct netlink_ext_ack *extack)
106be53b
OG
568{
569 void *headers_c, *headers_v;
570 u8 prio_val, prio_mask = 0;
571 bool vlan_present;
572
573#ifdef CONFIG_MLX5_CORE_EN_DCB
574 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
e98bedf5
EB
575 NL_SET_ERR_MSG_MOD(extack,
576 "only PCP trust state supported for hairpin");
106be53b
OG
577 return -EOPNOTSUPP;
578 }
579#endif
580 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
581 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
582
583 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
584 if (vlan_present) {
585 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
586 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
587 }
588
589 if (!vlan_present || !prio_mask) {
590 prio_val = UNKNOWN_MATCH_PRIO;
591 } else if (prio_mask != 0x7) {
e98bedf5
EB
592 NL_SET_ERR_MSG_MOD(extack,
593 "masked priority match not supported for hairpin");
106be53b
OG
594 return -EOPNOTSUPP;
595 }
596
597 *match_prio = prio_val;
598 return 0;
599}
600
5c65c564
OG
601static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
602 struct mlx5e_tc_flow *flow,
e98bedf5
EB
603 struct mlx5e_tc_flow_parse_attr *parse_attr,
604 struct netlink_ext_ack *extack)
5c65c564 605{
98b66cb1 606 int peer_ifindex = parse_attr->mirred_ifindex[0];
5c65c564 607 struct mlx5_hairpin_params params;
d8822868 608 struct mlx5_core_dev *peer_mdev;
5c65c564
OG
609 struct mlx5e_hairpin_entry *hpe;
610 struct mlx5e_hairpin *hp;
3f6d08d1
OG
611 u64 link_speed64;
612 u32 link_speed;
106be53b 613 u8 match_prio;
d8822868 614 u16 peer_id;
5c65c564
OG
615 int err;
616
d8822868
OG
617 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
618 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
e98bedf5 619 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
5c65c564
OG
620 return -EOPNOTSUPP;
621 }
622
d8822868 623 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
e98bedf5
EB
624 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
625 extack);
106be53b
OG
626 if (err)
627 return err;
628 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
5c65c564
OG
629 if (hpe)
630 goto attach_flow;
631
632 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
633 if (!hpe)
634 return -ENOMEM;
635
636 INIT_LIST_HEAD(&hpe->flows);
d8822868 637 hpe->peer_vhca_id = peer_id;
106be53b 638 hpe->prio = match_prio;
5c65c564
OG
639
640 params.log_data_size = 15;
641 params.log_data_size = min_t(u8, params.log_data_size,
642 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
643 params.log_data_size = max_t(u8, params.log_data_size,
644 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
5c65c564 645
eb9180f7
OG
646 params.log_num_packets = params.log_data_size -
647 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
648 params.log_num_packets = min_t(u8, params.log_num_packets,
649 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
650
651 params.q_counter = priv->q_counter;
3f6d08d1 652 /* set hairpin pair per each 50Gbs share of the link */
2c81bfd5 653 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
3f6d08d1
OG
654 link_speed = max_t(u32, link_speed, 50000);
655 link_speed64 = link_speed;
656 do_div(link_speed64, 50000);
657 params.num_channels = link_speed64;
658
5c65c564
OG
659 hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
660 if (IS_ERR(hp)) {
661 err = PTR_ERR(hp);
662 goto create_hairpin_err;
663 }
664
eb9180f7 665 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
ddae74ac 666 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
eb9180f7 667 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
5c65c564
OG
668
669 hpe->hp = hp;
106be53b
OG
670 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
671 hash_hairpin_info(peer_id, match_prio));
5c65c564
OG
672
673attach_flow:
3f6d08d1
OG
674 if (hpe->hp->num_channels > 1) {
675 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
676 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
677 } else {
678 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
679 }
5c65c564 680 list_add(&flow->hairpin, &hpe->flows);
3f6d08d1 681
5c65c564
OG
682 return 0;
683
684create_hairpin_err:
685 kfree(hpe);
686 return err;
687}
688
689static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
690 struct mlx5e_tc_flow *flow)
691{
692 struct list_head *next = flow->hairpin.next;
693
694 list_del(&flow->hairpin);
695
696 /* no more hairpin flows for us, release the hairpin pair */
697 if (list_empty(next)) {
698 struct mlx5e_hairpin_entry *hpe;
699
700 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
701
702 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
703 hpe->hp->pair->peer_mdev->priv.name);
704
705 mlx5e_hairpin_destroy(hpe->hp);
706 hash_del(&hpe->hairpin_hlist);
707 kfree(hpe);
708 }
709}
710
c83954ab 711static int
74491de9 712mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 713 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
714 struct mlx5e_tc_flow *flow,
715 struct netlink_ext_ack *extack)
e8f887ac 716{
aa0cbbae 717 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 718 struct mlx5_core_dev *dev = priv->mdev;
5c65c564 719 struct mlx5_flow_destination dest[2] = {};
66958ed9 720 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
721 .action = attr->action,
722 .flow_tag = attr->flow_tag,
60786f09 723 .reformat_id = 0,
42f7ad67 724 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
66958ed9 725 };
aad7e08d 726 struct mlx5_fc *counter = NULL;
e8f887ac 727 bool table_created = false;
5c65c564 728 int err, dest_ix = 0;
e8f887ac 729
3f6d08d1 730 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
e98bedf5 731 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
3f6d08d1 732 if (err) {
3f6d08d1
OG
733 goto err_add_hairpin_flow;
734 }
735 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
736 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
737 dest[dest_ix].ft = attr->hairpin_ft;
738 } else {
5c65c564
OG
739 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
740 dest[dest_ix].tir_num = attr->hairpin_tirn;
5c65c564
OG
741 }
742 dest_ix++;
3f6d08d1
OG
743 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
744 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
745 dest[dest_ix].ft = priv->fs.vlan.ft.t;
746 dest_ix++;
5c65c564 747 }
aad7e08d 748
5c65c564
OG
749 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
750 counter = mlx5_fc_create(dev, true);
751 if (IS_ERR(counter)) {
c83954ab 752 err = PTR_ERR(counter);
5c65c564
OG
753 goto err_fc_create;
754 }
755 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 756 dest[dest_ix].counter_id = mlx5_fc_id(counter);
5c65c564 757 dest_ix++;
b8aee822 758 attr->counter = counter;
aad7e08d
AV
759 }
760
2f4fe4ca 761 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3099eb5a 762 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
d7e75a32 763 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca 764 kfree(parse_attr->mod_hdr_actions);
c83954ab 765 if (err)
2f4fe4ca 766 goto err_create_mod_hdr_id;
2f4fe4ca
OG
767 }
768
acff797c 769 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
21b9c144
OG
770 int tc_grp_size, tc_tbl_size;
771 u32 max_flow_counter;
772
773 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
774 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
775
776 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
777
778 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
779 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
780
acff797c
MG
781 priv->fs.tc.t =
782 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
783 MLX5E_TC_PRIO,
21b9c144 784 tc_tbl_size,
acff797c 785 MLX5E_TC_TABLE_NUM_GROUPS,
3f6d08d1 786 MLX5E_TC_FT_LEVEL, 0);
acff797c 787 if (IS_ERR(priv->fs.tc.t)) {
e98bedf5
EB
788 NL_SET_ERR_MSG_MOD(extack,
789 "Failed to create tc offload table\n");
e8f887ac
AV
790 netdev_err(priv->netdev,
791 "Failed to create tc offload table\n");
c83954ab 792 err = PTR_ERR(priv->fs.tc.t);
aad7e08d 793 goto err_create_ft;
e8f887ac
AV
794 }
795
796 table_created = true;
797 }
798
38aa51c1
OG
799 if (attr->match_level != MLX5_MATCH_NONE)
800 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
801
c83954ab
RL
802 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
803 &flow_act, dest, dest_ix);
aad7e08d 804
c83954ab
RL
805 if (IS_ERR(flow->rule[0])) {
806 err = PTR_ERR(flow->rule[0]);
aad7e08d 807 goto err_add_rule;
c83954ab 808 }
aad7e08d 809
c83954ab 810 return 0;
e8f887ac 811
aad7e08d
AV
812err_add_rule:
813 if (table_created) {
acff797c
MG
814 mlx5_destroy_flow_table(priv->fs.tc.t);
815 priv->fs.tc.t = NULL;
e8f887ac 816 }
aad7e08d 817err_create_ft:
2f4fe4ca 818 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3099eb5a 819 mlx5e_detach_mod_hdr(priv, flow);
2f4fe4ca 820err_create_mod_hdr_id:
aad7e08d 821 mlx5_fc_destroy(dev, counter);
5c65c564
OG
822err_fc_create:
823 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
824 mlx5e_hairpin_flow_del(priv, flow);
825err_add_hairpin_flow:
c83954ab 826 return err;
e8f887ac
AV
827}
828
d85cdccb
OG
829static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
830 struct mlx5e_tc_flow *flow)
831{
513f8f7f 832 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
d85cdccb
OG
833 struct mlx5_fc *counter = NULL;
834
b8aee822 835 counter = attr->counter;
e4ad91f2 836 mlx5_del_flow_rules(flow->rule[0]);
aa0cbbae 837 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb 838
d9ee0491 839 if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
d85cdccb
OG
840 mlx5_destroy_flow_table(priv->fs.tc.t);
841 priv->fs.tc.t = NULL;
842 }
2f4fe4ca 843
513f8f7f 844 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3099eb5a 845 mlx5e_detach_mod_hdr(priv, flow);
5c65c564
OG
846
847 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
848 mlx5e_hairpin_flow_del(priv, flow);
d85cdccb
OG
849}
850
aa0cbbae 851static void mlx5e_detach_encap(struct mlx5e_priv *priv,
8c4dc42b 852 struct mlx5e_tc_flow *flow, int out_index);
aa0cbbae 853
3c37745e 854static int mlx5e_attach_encap(struct mlx5e_priv *priv,
e98bedf5 855 struct mlx5e_tc_flow *flow,
733d4f36
RD
856 struct net_device *mirred_dev,
857 int out_index,
8c4dc42b 858 struct netlink_ext_ack *extack,
0ad060ee
RD
859 struct net_device **encap_dev,
860 bool *encap_valid);
3c37745e 861
6d2a3ed0
OG
862static struct mlx5_flow_handle *
863mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
864 struct mlx5e_tc_flow *flow,
865 struct mlx5_flow_spec *spec,
866 struct mlx5_esw_flow_attr *attr)
867{
868 struct mlx5_flow_handle *rule;
869
870 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
871 if (IS_ERR(rule))
872 return rule;
873
e85e02ba 874 if (attr->split_count) {
6d2a3ed0
OG
875 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
876 if (IS_ERR(flow->rule[1])) {
877 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
878 return flow->rule[1];
879 }
880 }
881
882 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
883 return rule;
884}
885
886static void
887mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
888 struct mlx5e_tc_flow *flow,
889 struct mlx5_esw_flow_attr *attr)
890{
891 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
892
e85e02ba 893 if (attr->split_count)
6d2a3ed0
OG
894 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
895
896 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
897}
898
5dbe906f
PB
899static struct mlx5_flow_handle *
900mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
901 struct mlx5e_tc_flow *flow,
902 struct mlx5_flow_spec *spec,
903 struct mlx5_esw_flow_attr *slow_attr)
904{
905 struct mlx5_flow_handle *rule;
906
907 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
154e62ab 908 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2be09de7 909 slow_attr->split_count = 0;
154e62ab 910 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
5dbe906f
PB
911
912 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
913 if (!IS_ERR(rule))
914 flow->flags |= MLX5E_TC_FLOW_SLOW;
915
916 return rule;
917}
918
919static void
920mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
921 struct mlx5e_tc_flow *flow,
922 struct mlx5_esw_flow_attr *slow_attr)
923{
924 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
154e62ab 925 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2be09de7 926 slow_attr->split_count = 0;
154e62ab 927 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
5dbe906f
PB
928 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
929 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
930}
931
b4a23329
RD
932static void add_unready_flow(struct mlx5e_tc_flow *flow)
933{
934 struct mlx5_rep_uplink_priv *uplink_priv;
935 struct mlx5e_rep_priv *rpriv;
936 struct mlx5_eswitch *esw;
937
938 esw = flow->priv->mdev->priv.eswitch;
939 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
940 uplink_priv = &rpriv->uplink_priv;
941
942 flow->flags |= MLX5E_TC_FLOW_NOT_READY;
943 list_add_tail(&flow->unready, &uplink_priv->unready_flows);
944}
945
946static void remove_unready_flow(struct mlx5e_tc_flow *flow)
947{
948 list_del(&flow->unready);
949 flow->flags &= ~MLX5E_TC_FLOW_NOT_READY;
950}
951
c83954ab 952static int
74491de9 953mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
e98bedf5
EB
954 struct mlx5e_tc_flow *flow,
955 struct netlink_ext_ack *extack)
adb4c123
OG
956{
957 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
bf07aa73 958 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
aa0cbbae 959 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
7040632d 960 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
bf07aa73 961 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
3c37745e 962 struct net_device *out_dev, *encap_dev = NULL;
b8aee822 963 struct mlx5_fc *counter = NULL;
3c37745e
OG
964 struct mlx5e_rep_priv *rpriv;
965 struct mlx5e_priv *out_priv;
0ad060ee
RD
966 bool encap_valid = true;
967 int err = 0;
f493f155 968 int out_index;
8b32580d 969
d14f6f2a
OG
970 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
971 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
972 return -EOPNOTSUPP;
973 }
bf07aa73
PB
974
975 if (attr->chain > max_chain) {
976 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
977 err = -EOPNOTSUPP;
978 goto err_max_prio_chain;
979 }
980
981 if (attr->prio > max_prio) {
982 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
983 err = -EOPNOTSUPP;
984 goto err_max_prio_chain;
985 }
e52c2802 986
f493f155 987 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
8c4dc42b
EB
988 int mirred_ifindex;
989
f493f155
EB
990 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
991 continue;
992
7040632d 993 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
3c37745e 994 out_dev = __dev_get_by_index(dev_net(priv->netdev),
8c4dc42b 995 mirred_ifindex);
733d4f36 996 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
0ad060ee
RD
997 extack, &encap_dev, &encap_valid);
998 if (err)
c83954ab 999 goto err_attach_encap;
0ad060ee 1000
3c37745e
OG
1001 out_priv = netdev_priv(encap_dev);
1002 rpriv = out_priv->ppriv;
1cc26d74
EB
1003 attr->dests[out_index].rep = rpriv->rep;
1004 attr->dests[out_index].mdev = out_priv->mdev;
3c37745e
OG
1005 }
1006
8b32580d 1007 err = mlx5_eswitch_add_vlan_action(esw, attr);
c83954ab 1008 if (err)
aa0cbbae 1009 goto err_add_vlan;
adb4c123 1010
d7e75a32 1011 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1a9527bb 1012 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
d7e75a32 1013 kfree(parse_attr->mod_hdr_actions);
c83954ab 1014 if (err)
d7e75a32 1015 goto err_mod_hdr;
d7e75a32
OG
1016 }
1017
b8aee822 1018 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
f9392795 1019 counter = mlx5_fc_create(attr->counter_dev, true);
b8aee822 1020 if (IS_ERR(counter)) {
c83954ab 1021 err = PTR_ERR(counter);
b8aee822
MB
1022 goto err_create_counter;
1023 }
1024
1025 attr->counter = counter;
1026 }
1027
0ad060ee
RD
1028 /* we get here if one of the following takes place:
1029 * (1) there's no error
1030 * (2) there's an encap action and we don't have valid neigh
3c37745e 1031 */
0ad060ee 1032 if (!encap_valid) {
5dbe906f
PB
1033 /* continue with goto slow path rule instead */
1034 struct mlx5_esw_flow_attr slow_attr;
1035
1036 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1037 } else {
6d2a3ed0 1038 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
3c37745e 1039 }
c83954ab 1040
5dbe906f
PB
1041 if (IS_ERR(flow->rule[0])) {
1042 err = PTR_ERR(flow->rule[0]);
1043 goto err_add_rule;
1044 }
1045
1046 return 0;
aa0cbbae
OG
1047
1048err_add_rule:
f9392795 1049 mlx5_fc_destroy(attr->counter_dev, counter);
b8aee822 1050err_create_counter:
513f8f7f 1051 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1a9527bb 1052 mlx5e_detach_mod_hdr(priv, flow);
d7e75a32 1053err_mod_hdr:
aa0cbbae
OG
1054 mlx5_eswitch_del_vlan_action(esw, attr);
1055err_add_vlan:
f493f155 1056 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
8c4dc42b
EB
1057 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1058 mlx5e_detach_encap(priv, flow, out_index);
3c37745e 1059err_attach_encap:
bf07aa73 1060err_max_prio_chain:
c83954ab 1061 return err;
aa0cbbae 1062}
d85cdccb
OG
1063
1064static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1065 struct mlx5e_tc_flow *flow)
1066{
1067 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 1068 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
5dbe906f 1069 struct mlx5_esw_flow_attr slow_attr;
f493f155 1070 int out_index;
d85cdccb 1071
ef06c9ee 1072 if (flow->flags & MLX5E_TC_FLOW_NOT_READY) {
b4a23329 1073 remove_unready_flow(flow);
ef06c9ee
RD
1074 kvfree(attr->parse_attr);
1075 return;
1076 }
1077
5dbe906f
PB
1078 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1079 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1080 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1081 else
1082 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1083 }
d85cdccb 1084
513f8f7f 1085 mlx5_eswitch_del_vlan_action(esw, attr);
d85cdccb 1086
f493f155 1087 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
8c4dc42b
EB
1088 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1089 mlx5e_detach_encap(priv, flow, out_index);
f493f155 1090 kvfree(attr->parse_attr);
d7e75a32 1091
513f8f7f 1092 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1a9527bb 1093 mlx5e_detach_mod_hdr(priv, flow);
b8aee822
MB
1094
1095 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
f9392795 1096 mlx5_fc_destroy(attr->counter_dev, attr->counter);
d85cdccb
OG
1097}
1098
232c0013
HHZ
1099void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1100 struct mlx5e_encap_entry *e)
1101{
3c37745e 1102 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5dbe906f 1103 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
6d2a3ed0
OG
1104 struct mlx5_flow_handle *rule;
1105 struct mlx5_flow_spec *spec;
79baaec7 1106 struct encap_flow_item *efi;
232c0013
HHZ
1107 struct mlx5e_tc_flow *flow;
1108 int err;
1109
54c177ca
OS
1110 err = mlx5_packet_reformat_alloc(priv->mdev,
1111 e->reformat_type,
60786f09 1112 e->encap_size, e->encap_header,
31ca3648 1113 MLX5_FLOW_NAMESPACE_FDB,
60786f09 1114 &e->encap_id);
232c0013
HHZ
1115 if (err) {
1116 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1117 err);
1118 return;
1119 }
1120 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1121 mlx5e_rep_queue_neigh_stats_work(priv);
232c0013 1122
79baaec7 1123 list_for_each_entry(efi, &e->flows, list) {
8c4dc42b
EB
1124 bool all_flow_encaps_valid = true;
1125 int i;
1126
79baaec7 1127 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
3c37745e 1128 esw_attr = flow->esw_attr;
6d2a3ed0
OG
1129 spec = &esw_attr->parse_attr->spec;
1130
8c4dc42b
EB
1131 esw_attr->dests[efi->index].encap_id = e->encap_id;
1132 esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1133 /* Flow can be associated with multiple encap entries.
1134 * Before offloading the flow verify that all of them have
1135 * a valid neighbour.
1136 */
1137 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1138 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1139 continue;
1140 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1141 all_flow_encaps_valid = false;
1142 break;
1143 }
1144 }
1145 /* Do not offload flows with unresolved neighbors */
1146 if (!all_flow_encaps_valid)
1147 continue;
5dbe906f 1148 /* update from slow path rule to encap rule */
6d2a3ed0
OG
1149 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1150 if (IS_ERR(rule)) {
1151 err = PTR_ERR(rule);
232c0013
HHZ
1152 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1153 err);
1154 continue;
1155 }
5dbe906f
PB
1156
1157 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1158 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
6d2a3ed0 1159 flow->rule[0] = rule;
232c0013
HHZ
1160 }
1161}
1162
1163void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1164 struct mlx5e_encap_entry *e)
1165{
3c37745e 1166 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5dbe906f
PB
1167 struct mlx5_esw_flow_attr slow_attr;
1168 struct mlx5_flow_handle *rule;
1169 struct mlx5_flow_spec *spec;
79baaec7 1170 struct encap_flow_item *efi;
232c0013 1171 struct mlx5e_tc_flow *flow;
5dbe906f 1172 int err;
232c0013 1173
79baaec7
EB
1174 list_for_each_entry(efi, &e->flows, list) {
1175 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
5dbe906f
PB
1176 spec = &flow->esw_attr->parse_attr->spec;
1177
1178 /* update from encap rule to slow path rule */
1179 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
8c4dc42b
EB
1180 /* mark the flow's encap dest as non-valid */
1181 flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
5dbe906f
PB
1182
1183 if (IS_ERR(rule)) {
1184 err = PTR_ERR(rule);
1185 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1186 err);
1187 continue;
1188 }
1189
1190 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1191 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1192 flow->rule[0] = rule;
232c0013
HHZ
1193 }
1194
61c806da
OG
1195 /* we know that the encap is valid */
1196 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1197 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
232c0013
HHZ
1198}
1199
b8aee822
MB
1200static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1201{
1202 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1203 return flow->esw_attr->counter;
1204 else
1205 return flow->nic_attr->counter;
1206}
1207
f6dfb4c3
HHZ
1208void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1209{
1210 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1211 u64 bytes, packets, lastuse = 0;
1212 struct mlx5e_tc_flow *flow;
1213 struct mlx5e_encap_entry *e;
1214 struct mlx5_fc *counter;
1215 struct neigh_table *tbl;
1216 bool neigh_used = false;
1217 struct neighbour *n;
1218
1219 if (m_neigh->family == AF_INET)
1220 tbl = &arp_tbl;
1221#if IS_ENABLED(CONFIG_IPV6)
1222 else if (m_neigh->family == AF_INET6)
423c9db2 1223 tbl = &nd_tbl;
f6dfb4c3
HHZ
1224#endif
1225 else
1226 return;
1227
1228 list_for_each_entry(e, &nhe->encap_list, encap_list) {
79baaec7 1229 struct encap_flow_item *efi;
f6dfb4c3
HHZ
1230 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1231 continue;
79baaec7
EB
1232 list_for_each_entry(efi, &e->flows, list) {
1233 flow = container_of(efi, struct mlx5e_tc_flow,
1234 encaps[efi->index]);
f6dfb4c3 1235 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
b8aee822 1236 counter = mlx5e_tc_get_counter(flow);
f6dfb4c3
HHZ
1237 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1238 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1239 neigh_used = true;
1240 break;
1241 }
1242 }
1243 }
e36d4810
RD
1244 if (neigh_used)
1245 break;
f6dfb4c3
HHZ
1246 }
1247
1248 if (neigh_used) {
1249 nhe->reported_lastuse = jiffies;
1250
1251 /* find the relevant neigh according to the cached device and
1252 * dst ip pair
1253 */
1254 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
c7f7ba8d 1255 if (!n)
f6dfb4c3 1256 return;
f6dfb4c3
HHZ
1257
1258 neigh_event_send(n, NULL);
1259 neigh_release(n);
1260 }
1261}
1262
d85cdccb 1263static void mlx5e_detach_encap(struct mlx5e_priv *priv,
8c4dc42b 1264 struct mlx5e_tc_flow *flow, int out_index)
d85cdccb 1265{
8c4dc42b 1266 struct list_head *next = flow->encaps[out_index].list.next;
5067b602 1267
8c4dc42b 1268 list_del(&flow->encaps[out_index].list);
5067b602 1269 if (list_empty(next)) {
c1ae1152 1270 struct mlx5e_encap_entry *e;
5067b602 1271
c1ae1152 1272 e = list_entry(next, struct mlx5e_encap_entry, flows);
232c0013
HHZ
1273 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1274
1275 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
60786f09 1276 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
232c0013 1277
cdc5a7f3 1278 hash_del_rcu(&e->encap_hlist);
232c0013 1279 kfree(e->encap_header);
5067b602
RD
1280 kfree(e);
1281 }
1282}
1283
04de7dda
RD
1284static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1285{
1286 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1287
1288 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
1289 !(flow->flags & MLX5E_TC_FLOW_DUP))
1290 return;
1291
1292 mutex_lock(&esw->offloads.peer_mutex);
1293 list_del(&flow->peer);
1294 mutex_unlock(&esw->offloads.peer_mutex);
1295
1296 flow->flags &= ~MLX5E_TC_FLOW_DUP;
1297
1298 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1299 kvfree(flow->peer_flow);
1300 flow->peer_flow = NULL;
1301}
1302
1303static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1304{
1305 struct mlx5_core_dev *dev = flow->priv->mdev;
1306 struct mlx5_devcom *devcom = dev->priv.devcom;
1307 struct mlx5_eswitch *peer_esw;
1308
1309 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1310 if (!peer_esw)
1311 return;
1312
1313 __mlx5e_tc_del_fdb_peer_flow(flow);
1314 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1315}
1316
e8f887ac 1317static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 1318 struct mlx5e_tc_flow *flow)
e8f887ac 1319{
04de7dda
RD
1320 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1321 mlx5e_tc_del_fdb_peer_flow(flow);
d85cdccb 1322 mlx5e_tc_del_fdb_flow(priv, flow);
04de7dda 1323 } else {
d85cdccb 1324 mlx5e_tc_del_nic_flow(priv, flow);
04de7dda 1325 }
e8f887ac
AV
1326}
1327
bbd00f7e
HHZ
1328
1329static int parse_tunnel_attr(struct mlx5e_priv *priv,
1330 struct mlx5_flow_spec *spec,
54c177ca 1331 struct tc_cls_flower_offload *f,
6363651d 1332 struct net_device *filter_dev, u8 *match_level)
bbd00f7e 1333{
e98bedf5 1334 struct netlink_ext_ack *extack = f->common.extack;
bbd00f7e
HHZ
1335 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1336 outer_headers);
1337 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1338 outer_headers);
8f256622
PNA
1339 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
1340 struct flow_match_control enc_control;
1341 int err;
2e72eb43 1342
101f4de9 1343 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
6363651d 1344 headers_c, headers_v, match_level);
54c177ca
OS
1345 if (err) {
1346 NL_SET_ERR_MSG_MOD(extack,
1347 "failed to parse tunnel attributes");
101f4de9 1348 return err;
bbd00f7e
HHZ
1349 }
1350
8f256622
PNA
1351 flow_rule_match_enc_control(rule, &enc_control);
1352
1353 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1354 struct flow_match_ipv4_addrs match;
1355
1356 flow_rule_match_enc_ipv4_addrs(rule, &match);
bbd00f7e
HHZ
1357 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1358 src_ipv4_src_ipv6.ipv4_layout.ipv4,
8f256622 1359 ntohl(match.mask->src));
bbd00f7e
HHZ
1360 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1361 src_ipv4_src_ipv6.ipv4_layout.ipv4,
8f256622 1362 ntohl(match.key->src));
bbd00f7e
HHZ
1363
1364 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1365 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
8f256622 1366 ntohl(match.mask->dst));
bbd00f7e
HHZ
1367 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1368 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
8f256622 1369 ntohl(match.key->dst));
bbd00f7e 1370
2e72eb43
OG
1371 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1372 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
8f256622
PNA
1373 } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1374 struct flow_match_ipv6_addrs match;
19f44401 1375
8f256622 1376 flow_rule_match_enc_ipv6_addrs(rule, &match);
19f44401
OG
1377 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1378 src_ipv4_src_ipv6.ipv6_layout.ipv6),
8f256622 1379 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
19f44401
OG
1380 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1381 src_ipv4_src_ipv6.ipv6_layout.ipv6),
8f256622 1382 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
19f44401
OG
1383
1384 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1385 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
8f256622 1386 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
19f44401
OG
1387 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1388 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
8f256622 1389 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
19f44401
OG
1390
1391 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 1393 }
bbd00f7e 1394
8f256622
PNA
1395 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1396 struct flow_match_ip match;
bcef735c 1397
8f256622
PNA
1398 flow_rule_match_enc_ip(rule, &match);
1399 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1400 match.mask->tos & 0x3);
1401 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1402 match.key->tos & 0x3);
bcef735c 1403
8f256622
PNA
1404 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1405 match.mask->tos >> 2);
1406 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1407 match.key->tos >> 2);
bcef735c 1408
8f256622
PNA
1409 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1410 match.mask->ttl);
1411 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1412 match.key->ttl);
e98bedf5 1413
8f256622 1414 if (match.mask->ttl &&
e98bedf5
EB
1415 !MLX5_CAP_ESW_FLOWTABLE_FDB
1416 (priv->mdev,
1417 ft_field_support.outer_ipv4_ttl)) {
1418 NL_SET_ERR_MSG_MOD(extack,
1419 "Matching on TTL is not supported");
1420 return -EOPNOTSUPP;
1421 }
1422
bcef735c
OG
1423 }
1424
bbd00f7e
HHZ
1425 /* Enforce DMAC when offloading incoming tunneled flows.
1426 * Flow counters require a match on the DMAC.
1427 */
1428 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1429 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1430 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1431 dmac_47_16), priv->netdev->dev_addr);
1432
1433 /* let software handle IP fragments */
1434 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1435 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1436
1437 return 0;
1438}
1439
de0af0bf
RD
1440static int __parse_cls_flower(struct mlx5e_priv *priv,
1441 struct mlx5_flow_spec *spec,
1442 struct tc_cls_flower_offload *f,
54c177ca 1443 struct net_device *filter_dev,
6363651d 1444 u8 *match_level, u8 *tunnel_match_level)
e3a2b7ed 1445{
e98bedf5 1446 struct netlink_ext_ack *extack = f->common.extack;
c5bb1730
MG
1447 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1448 outer_headers);
1449 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1450 outer_headers);
699e96dd
JL
1451 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1452 misc_parameters);
1453 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1454 misc_parameters);
8f256622
PNA
1455 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
1456 struct flow_dissector *dissector = rule->match.dissector;
e3a2b7ed
AV
1457 u16 addr_type = 0;
1458 u8 ip_proto = 0;
1459
d708f902 1460 *match_level = MLX5_MATCH_NONE;
de0af0bf 1461
8f256622 1462 if (dissector->used_keys &
e3a2b7ed
AV
1463 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1464 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1465 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 1466 BIT(FLOW_DISSECTOR_KEY_VLAN) |
699e96dd 1467 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
e3a2b7ed
AV
1468 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1469 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
1470 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1471 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1472 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1473 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1474 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
e77834ec 1475 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
fd7da28b 1476 BIT(FLOW_DISSECTOR_KEY_TCP) |
bcef735c
OG
1477 BIT(FLOW_DISSECTOR_KEY_IP) |
1478 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
e98bedf5 1479 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
e3a2b7ed 1480 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
8f256622 1481 dissector->used_keys);
e3a2b7ed
AV
1482 return -EOPNOTSUPP;
1483 }
1484
8f256622
PNA
1485 if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1486 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1487 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1488 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1489 struct flow_match_control match;
1490
1491 flow_rule_match_enc_control(rule, &match);
1492 switch (match.key->addr_type) {
bbd00f7e 1493 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 1494 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
6363651d 1495 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
bbd00f7e
HHZ
1496 return -EOPNOTSUPP;
1497 break;
1498 default:
1499 return -EOPNOTSUPP;
1500 }
1501
1502 /* In decap flow, header pointers should point to the inner
1503 * headers, outer header were already set by parse_tunnel_attr
1504 */
1505 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1506 inner_headers);
1507 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1508 inner_headers);
1509 }
1510
8f256622
PNA
1511 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1512 struct flow_match_basic match;
1513
1514 flow_rule_match_basic(rule, &match);
d3a80bb5 1515 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
8f256622 1516 ntohs(match.mask->n_proto));
d3a80bb5 1517 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
8f256622 1518 ntohs(match.key->n_proto));
e3a2b7ed 1519
8f256622 1520 if (match.mask->n_proto)
d708f902 1521 *match_level = MLX5_MATCH_L2;
e3a2b7ed
AV
1522 }
1523
8f256622
PNA
1524 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1525 struct flow_match_vlan match;
1526
1527 flow_rule_match_vlan(rule, &match);
1528 if (match.mask->vlan_id ||
1529 match.mask->vlan_priority ||
1530 match.mask->vlan_tpid) {
1531 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
699e96dd
JL
1532 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1533 svlan_tag, 1);
1534 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1535 svlan_tag, 1);
1536 } else {
1537 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1538 cvlan_tag, 1);
1539 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1540 cvlan_tag, 1);
1541 }
095b6cfd 1542
8f256622
PNA
1543 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
1544 match.mask->vlan_id);
1545 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
1546 match.key->vlan_id);
358d79a4 1547
8f256622
PNA
1548 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
1549 match.mask->vlan_priority);
1550 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
1551 match.key->vlan_priority);
54782900 1552
d708f902 1553 *match_level = MLX5_MATCH_L2;
54782900 1554 }
d3a80bb5 1555 } else if (*match_level != MLX5_MATCH_NONE) {
cee26487
JL
1556 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1557 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
d3a80bb5 1558 *match_level = MLX5_MATCH_L2;
54782900
OG
1559 }
1560
8f256622
PNA
1561 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1562 struct flow_match_vlan match;
1563
1564 flow_rule_match_vlan(rule, &match);
1565 if (match.mask->vlan_id ||
1566 match.mask->vlan_priority ||
1567 match.mask->vlan_tpid) {
1568 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
699e96dd
JL
1569 MLX5_SET(fte_match_set_misc, misc_c,
1570 outer_second_svlan_tag, 1);
1571 MLX5_SET(fte_match_set_misc, misc_v,
1572 outer_second_svlan_tag, 1);
1573 } else {
1574 MLX5_SET(fte_match_set_misc, misc_c,
1575 outer_second_cvlan_tag, 1);
1576 MLX5_SET(fte_match_set_misc, misc_v,
1577 outer_second_cvlan_tag, 1);
1578 }
1579
1580 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
8f256622 1581 match.mask->vlan_id);
699e96dd 1582 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
8f256622 1583 match.key->vlan_id);
699e96dd 1584 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
8f256622 1585 match.mask->vlan_priority);
699e96dd 1586 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
8f256622 1587 match.key->vlan_priority);
699e96dd
JL
1588
1589 *match_level = MLX5_MATCH_L2;
1590 }
1591 }
1592
8f256622
PNA
1593 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1594 struct flow_match_eth_addrs match;
54782900 1595
8f256622 1596 flow_rule_match_eth_addrs(rule, &match);
d3a80bb5
OG
1597 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1598 dmac_47_16),
8f256622 1599 match.mask->dst);
d3a80bb5
OG
1600 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1601 dmac_47_16),
8f256622 1602 match.key->dst);
d3a80bb5
OG
1603
1604 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1605 smac_47_16),
8f256622 1606 match.mask->src);
d3a80bb5
OG
1607 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1608 smac_47_16),
8f256622 1609 match.key->src);
d3a80bb5 1610
8f256622
PNA
1611 if (!is_zero_ether_addr(match.mask->src) ||
1612 !is_zero_ether_addr(match.mask->dst))
d708f902 1613 *match_level = MLX5_MATCH_L2;
54782900
OG
1614 }
1615
8f256622
PNA
1616 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1617 struct flow_match_control match;
54782900 1618
8f256622
PNA
1619 flow_rule_match_control(rule, &match);
1620 addr_type = match.key->addr_type;
54782900
OG
1621
1622 /* the HW doesn't support frag first/later */
8f256622 1623 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
54782900
OG
1624 return -EOPNOTSUPP;
1625
8f256622 1626 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
54782900
OG
1627 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1628 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8f256622 1629 match.key->flags & FLOW_DIS_IS_FRAGMENT);
54782900
OG
1630
1631 /* the HW doesn't need L3 inline to match on frag=no */
8f256622 1632 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
83621b7d 1633 *match_level = MLX5_MATCH_L2;
54782900
OG
1634 /* *** L2 attributes parsing up to here *** */
1635 else
83621b7d 1636 *match_level = MLX5_MATCH_L3;
095b6cfd
OG
1637 }
1638 }
1639
8f256622
PNA
1640 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1641 struct flow_match_basic match;
1642
1643 flow_rule_match_basic(rule, &match);
1644 ip_proto = match.key->ip_proto;
54782900
OG
1645
1646 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
8f256622 1647 match.mask->ip_proto);
54782900 1648 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8f256622 1649 match.key->ip_proto);
54782900 1650
8f256622 1651 if (match.mask->ip_proto)
d708f902 1652 *match_level = MLX5_MATCH_L3;
54782900
OG
1653 }
1654
e3a2b7ed 1655 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8f256622 1656 struct flow_match_ipv4_addrs match;
e3a2b7ed 1657
8f256622 1658 flow_rule_match_ipv4_addrs(rule, &match);
e3a2b7ed
AV
1659 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1660 src_ipv4_src_ipv6.ipv4_layout.ipv4),
8f256622 1661 &match.mask->src, sizeof(match.mask->src));
e3a2b7ed
AV
1662 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1663 src_ipv4_src_ipv6.ipv4_layout.ipv4),
8f256622 1664 &match.key->src, sizeof(match.key->src));
e3a2b7ed
AV
1665 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1666 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
8f256622 1667 &match.mask->dst, sizeof(match.mask->dst));
e3a2b7ed
AV
1668 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1669 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
8f256622 1670 &match.key->dst, sizeof(match.key->dst));
de0af0bf 1671
8f256622 1672 if (match.mask->src || match.mask->dst)
d708f902 1673 *match_level = MLX5_MATCH_L3;
e3a2b7ed
AV
1674 }
1675
1676 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8f256622 1677 struct flow_match_ipv6_addrs match;
e3a2b7ed 1678
8f256622 1679 flow_rule_match_ipv6_addrs(rule, &match);
e3a2b7ed
AV
1680 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1681 src_ipv4_src_ipv6.ipv6_layout.ipv6),
8f256622 1682 &match.mask->src, sizeof(match.mask->src));
e3a2b7ed
AV
1683 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1684 src_ipv4_src_ipv6.ipv6_layout.ipv6),
8f256622 1685 &match.key->src, sizeof(match.key->src));
e3a2b7ed
AV
1686
1687 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1688 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
8f256622 1689 &match.mask->dst, sizeof(match.mask->dst));
e3a2b7ed
AV
1690 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1691 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
8f256622 1692 &match.key->dst, sizeof(match.key->dst));
de0af0bf 1693
8f256622
PNA
1694 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
1695 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
d708f902 1696 *match_level = MLX5_MATCH_L3;
e3a2b7ed
AV
1697 }
1698
8f256622
PNA
1699 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1700 struct flow_match_ip match;
1f97a526 1701
8f256622
PNA
1702 flow_rule_match_ip(rule, &match);
1703 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1704 match.mask->tos & 0x3);
1705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1706 match.key->tos & 0x3);
1f97a526 1707
8f256622
PNA
1708 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1709 match.mask->tos >> 2);
1710 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1711 match.key->tos >> 2);
1f97a526 1712
8f256622
PNA
1713 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1714 match.mask->ttl);
1715 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1716 match.key->ttl);
1f97a526 1717
8f256622 1718 if (match.mask->ttl &&
a8ade55f 1719 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
e98bedf5
EB
1720 ft_field_support.outer_ipv4_ttl)) {
1721 NL_SET_ERR_MSG_MOD(extack,
1722 "Matching on TTL is not supported");
1f97a526 1723 return -EOPNOTSUPP;
e98bedf5 1724 }
a8ade55f 1725
8f256622 1726 if (match.mask->tos || match.mask->ttl)
d708f902 1727 *match_level = MLX5_MATCH_L3;
1f97a526
OG
1728 }
1729
54782900
OG
1730 /* *** L3 attributes parsing up to here *** */
1731
8f256622
PNA
1732 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1733 struct flow_match_ports match;
1734
1735 flow_rule_match_ports(rule, &match);
e3a2b7ed
AV
1736 switch (ip_proto) {
1737 case IPPROTO_TCP:
1738 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
8f256622 1739 tcp_sport, ntohs(match.mask->src));
e3a2b7ed 1740 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
8f256622 1741 tcp_sport, ntohs(match.key->src));
e3a2b7ed
AV
1742
1743 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
8f256622 1744 tcp_dport, ntohs(match.mask->dst));
e3a2b7ed 1745 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
8f256622 1746 tcp_dport, ntohs(match.key->dst));
e3a2b7ed
AV
1747 break;
1748
1749 case IPPROTO_UDP:
1750 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
8f256622 1751 udp_sport, ntohs(match.mask->src));
e3a2b7ed 1752 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
8f256622 1753 udp_sport, ntohs(match.key->src));
e3a2b7ed
AV
1754
1755 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
8f256622 1756 udp_dport, ntohs(match.mask->dst));
e3a2b7ed 1757 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
8f256622 1758 udp_dport, ntohs(match.key->dst));
e3a2b7ed
AV
1759 break;
1760 default:
e98bedf5
EB
1761 NL_SET_ERR_MSG_MOD(extack,
1762 "Only UDP and TCP transports are supported for L4 matching");
e3a2b7ed
AV
1763 netdev_err(priv->netdev,
1764 "Only UDP and TCP transport are supported\n");
1765 return -EINVAL;
1766 }
de0af0bf 1767
8f256622 1768 if (match.mask->src || match.mask->dst)
d708f902 1769 *match_level = MLX5_MATCH_L4;
e3a2b7ed
AV
1770 }
1771
8f256622
PNA
1772 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
1773 struct flow_match_tcp match;
e77834ec 1774
8f256622 1775 flow_rule_match_tcp(rule, &match);
e77834ec 1776 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
8f256622 1777 ntohs(match.mask->flags));
e77834ec 1778 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8f256622 1779 ntohs(match.key->flags));
e77834ec 1780
8f256622 1781 if (match.mask->flags)
d708f902 1782 *match_level = MLX5_MATCH_L4;
e77834ec
OG
1783 }
1784
e3a2b7ed
AV
1785 return 0;
1786}
1787
de0af0bf 1788static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 1789 struct mlx5e_tc_flow *flow,
de0af0bf 1790 struct mlx5_flow_spec *spec,
54c177ca
OS
1791 struct tc_cls_flower_offload *f,
1792 struct net_device *filter_dev)
de0af0bf 1793{
e98bedf5 1794 struct netlink_ext_ack *extack = f->common.extack;
de0af0bf
RD
1795 struct mlx5_core_dev *dev = priv->mdev;
1796 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39 1797 struct mlx5e_rep_priv *rpriv = priv->ppriv;
6363651d 1798 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1d447a39 1799 struct mlx5_eswitch_rep *rep;
de0af0bf
RD
1800 int err;
1801
6363651d 1802 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
de0af0bf 1803
1d447a39
SM
1804 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1805 rep = rpriv->rep;
b05af6aa 1806 if (rep->vport != MLX5_VPORT_UPLINK &&
1d447a39 1807 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
d708f902 1808 esw->offloads.inline_mode < match_level)) {
e98bedf5
EB
1809 NL_SET_ERR_MSG_MOD(extack,
1810 "Flow is not offloaded due to min inline setting");
de0af0bf
RD
1811 netdev_warn(priv->netdev,
1812 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
d708f902 1813 match_level, esw->offloads.inline_mode);
de0af0bf
RD
1814 return -EOPNOTSUPP;
1815 }
1816 }
1817
6363651d 1818 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
38aa51c1 1819 flow->esw_attr->match_level = match_level;
6363651d
OG
1820 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1821 } else {
38aa51c1 1822 flow->nic_attr->match_level = match_level;
6363651d 1823 }
38aa51c1 1824
de0af0bf
RD
1825 return err;
1826}
1827
d79b6df6
OG
1828struct pedit_headers {
1829 struct ethhdr eth;
0eb69bb9 1830 struct vlan_hdr vlan;
d79b6df6
OG
1831 struct iphdr ip4;
1832 struct ipv6hdr ip6;
1833 struct tcphdr tcp;
1834 struct udphdr udp;
1835};
1836
c500c86b
PNA
1837struct pedit_headers_action {
1838 struct pedit_headers vals;
1839 struct pedit_headers masks;
1840 u32 pedits;
1841};
1842
d79b6df6 1843static int pedit_header_offsets[] = {
73867881
PNA
1844 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1845 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1846 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1847 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1848 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
d79b6df6
OG
1849};
1850
1851#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1852
1853static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
c500c86b 1854 struct pedit_headers_action *hdrs)
d79b6df6
OG
1855{
1856 u32 *curr_pmask, *curr_pval;
1857
c500c86b
PNA
1858 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
1859 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
d79b6df6
OG
1860
1861 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1862 goto out_err;
1863
1864 *curr_pmask |= mask;
1865 *curr_pval |= (val & mask);
1866
1867 return 0;
1868
1869out_err:
1870 return -EOPNOTSUPP;
1871}
1872
1873struct mlx5_fields {
1874 u8 field;
1875 u8 size;
1876 u32 offset;
1877};
1878
a8e4f0c4
OG
1879#define OFFLOAD(fw_field, size, field, off) \
1880 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1881
d79b6df6 1882static struct mlx5_fields fields[] = {
a8e4f0c4
OG
1883 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1884 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1885 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1886 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1887 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
0eb69bb9 1888 OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0),
a8e4f0c4
OG
1889
1890 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1891 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1892 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1893
1894 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1895 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1896 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1897 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1898 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1899 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1900 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1901 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
0c0316f5 1902 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
a8e4f0c4
OG
1903
1904 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1905 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1906 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1907
1908 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1909 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
d79b6df6
OG
1910};
1911
218d05ce
TZ
1912/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1913 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1914 * says how many HW actions were actually parsed.
d79b6df6 1915 */
c500c86b 1916static int offload_pedit_fields(struct pedit_headers_action *hdrs,
e98bedf5
EB
1917 struct mlx5e_tc_flow_parse_attr *parse_attr,
1918 struct netlink_ext_ack *extack)
d79b6df6
OG
1919{
1920 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2b64beba 1921 int i, action_size, nactions, max_actions, first, last, next_z;
d79b6df6 1922 void *s_masks_p, *a_masks_p, *vals_p;
d79b6df6
OG
1923 struct mlx5_fields *f;
1924 u8 cmd, field_bsize;
e3ca4e05 1925 u32 s_mask, a_mask;
d79b6df6 1926 unsigned long mask;
2b64beba
OG
1927 __be32 mask_be32;
1928 __be16 mask_be16;
d79b6df6
OG
1929 void *action;
1930
73867881
PNA
1931 set_masks = &hdrs[0].masks;
1932 add_masks = &hdrs[1].masks;
1933 set_vals = &hdrs[0].vals;
1934 add_vals = &hdrs[1].vals;
d79b6df6
OG
1935
1936 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
218d05ce
TZ
1937 action = parse_attr->mod_hdr_actions +
1938 parse_attr->num_mod_hdr_actions * action_size;
1939
1940 max_actions = parse_attr->max_mod_hdr_actions;
1941 nactions = parse_attr->num_mod_hdr_actions;
d79b6df6
OG
1942
1943 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1944 f = &fields[i];
1945 /* avoid seeing bits set from previous iterations */
e3ca4e05
OG
1946 s_mask = 0;
1947 a_mask = 0;
d79b6df6
OG
1948
1949 s_masks_p = (void *)set_masks + f->offset;
1950 a_masks_p = (void *)add_masks + f->offset;
1951
1952 memcpy(&s_mask, s_masks_p, f->size);
1953 memcpy(&a_mask, a_masks_p, f->size);
1954
1955 if (!s_mask && !a_mask) /* nothing to offload here */
1956 continue;
1957
1958 if (s_mask && a_mask) {
e98bedf5
EB
1959 NL_SET_ERR_MSG_MOD(extack,
1960 "can't set and add to the same HW field");
d79b6df6
OG
1961 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1962 return -EOPNOTSUPP;
1963 }
1964
1965 if (nactions == max_actions) {
e98bedf5
EB
1966 NL_SET_ERR_MSG_MOD(extack,
1967 "too many pedit actions, can't offload");
d79b6df6
OG
1968 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1969 return -EOPNOTSUPP;
1970 }
1971
1972 if (s_mask) {
1973 cmd = MLX5_ACTION_TYPE_SET;
1974 mask = s_mask;
1975 vals_p = (void *)set_vals + f->offset;
1976 /* clear to denote we consumed this field */
1977 memset(s_masks_p, 0, f->size);
1978 } else {
1979 cmd = MLX5_ACTION_TYPE_ADD;
1980 mask = a_mask;
1981 vals_p = (void *)add_vals + f->offset;
1982 /* clear to denote we consumed this field */
1983 memset(a_masks_p, 0, f->size);
1984 }
1985
d79b6df6 1986 field_bsize = f->size * BITS_PER_BYTE;
e3ca4e05 1987
2b64beba
OG
1988 if (field_bsize == 32) {
1989 mask_be32 = *(__be32 *)&mask;
1990 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1991 } else if (field_bsize == 16) {
1992 mask_be16 = *(__be16 *)&mask;
1993 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1994 }
1995
d79b6df6 1996 first = find_first_bit(&mask, field_bsize);
2b64beba 1997 next_z = find_next_zero_bit(&mask, field_bsize, first);
d79b6df6 1998 last = find_last_bit(&mask, field_bsize);
2b64beba 1999 if (first < next_z && next_z < last) {
e98bedf5
EB
2000 NL_SET_ERR_MSG_MOD(extack,
2001 "rewrite of few sub-fields isn't supported");
2b64beba 2002 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
d79b6df6
OG
2003 mask);
2004 return -EOPNOTSUPP;
2005 }
2006
2007 MLX5_SET(set_action_in, action, action_type, cmd);
2008 MLX5_SET(set_action_in, action, field, f->field);
2009
2010 if (cmd == MLX5_ACTION_TYPE_SET) {
2b64beba 2011 MLX5_SET(set_action_in, action, offset, first);
d79b6df6 2012 /* length is num of bits to be written, zero means length of 32 */
2b64beba 2013 MLX5_SET(set_action_in, action, length, (last - first + 1));
d79b6df6
OG
2014 }
2015
2016 if (field_bsize == 32)
2b64beba 2017 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
d79b6df6 2018 else if (field_bsize == 16)
2b64beba 2019 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
d79b6df6 2020 else if (field_bsize == 8)
2b64beba 2021 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
d79b6df6
OG
2022
2023 action += action_size;
2024 nactions++;
2025 }
2026
2027 parse_attr->num_mod_hdr_actions = nactions;
2028 return 0;
2029}
2030
2031static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
c500c86b
PNA
2032 struct pedit_headers_action *hdrs,
2033 int namespace,
d79b6df6
OG
2034 struct mlx5e_tc_flow_parse_attr *parse_attr)
2035{
2036 int nkeys, action_size, max_actions;
2037
c500c86b
PNA
2038 nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
2039 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
d79b6df6
OG
2040 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2041
2042 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2043 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2044 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2045 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2046
2047 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2048 max_actions = min(max_actions, nkeys * 16);
2049
2050 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2051 if (!parse_attr->mod_hdr_actions)
2052 return -ENOMEM;
2053
218d05ce 2054 parse_attr->max_mod_hdr_actions = max_actions;
d79b6df6
OG
2055 return 0;
2056}
2057
2058static const struct pedit_headers zero_masks = {};
2059
2060static int parse_tc_pedit_action(struct mlx5e_priv *priv,
73867881 2061 const struct flow_action_entry *act, int namespace,
e98bedf5 2062 struct mlx5e_tc_flow_parse_attr *parse_attr,
c500c86b 2063 struct pedit_headers_action *hdrs,
e98bedf5 2064 struct netlink_ext_ack *extack)
d79b6df6 2065{
73867881
PNA
2066 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2067 int err = -EOPNOTSUPP;
d79b6df6 2068 u32 mask, val, offset;
73867881 2069 u8 htype;
d79b6df6 2070
73867881
PNA
2071 htype = act->mangle.htype;
2072 err = -EOPNOTSUPP; /* can't be all optimistic */
d79b6df6 2073
73867881
PNA
2074 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2075 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2076 goto out_err;
2077 }
d79b6df6 2078
73867881
PNA
2079 mask = act->mangle.mask;
2080 val = act->mangle.val;
2081 offset = act->mangle.offset;
d79b6df6 2082
73867881
PNA
2083 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2084 if (err)
2085 goto out_err;
c500c86b 2086
73867881 2087 hdrs[cmd].pedits++;
d79b6df6 2088
c500c86b
PNA
2089 return 0;
2090out_err:
2091 return err;
2092}
2093
2094static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2095 struct mlx5e_tc_flow_parse_attr *parse_attr,
2096 struct pedit_headers_action *hdrs,
2097 struct netlink_ext_ack *extack)
2098{
2099 struct pedit_headers *cmd_masks;
2100 int err;
2101 u8 cmd;
2102
218d05ce 2103 if (!parse_attr->mod_hdr_actions) {
a655fe9f 2104 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
218d05ce
TZ
2105 if (err)
2106 goto out_err;
2107 }
d79b6df6 2108
c500c86b 2109 err = offload_pedit_fields(hdrs, parse_attr, extack);
d79b6df6
OG
2110 if (err < 0)
2111 goto out_dealloc_parsed_actions;
2112
2113 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
c500c86b 2114 cmd_masks = &hdrs[cmd].masks;
d79b6df6 2115 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
e98bedf5
EB
2116 NL_SET_ERR_MSG_MOD(extack,
2117 "attempt to offload an unsupported field");
b3a433de 2118 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
d79b6df6
OG
2119 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2120 16, 1, cmd_masks, sizeof(zero_masks), true);
2121 err = -EOPNOTSUPP;
2122 goto out_dealloc_parsed_actions;
2123 }
2124 }
2125
2126 return 0;
2127
2128out_dealloc_parsed_actions:
2129 kfree(parse_attr->mod_hdr_actions);
2130out_err:
2131 return err;
2132}
2133
e98bedf5
EB
2134static bool csum_offload_supported(struct mlx5e_priv *priv,
2135 u32 action,
2136 u32 update_flags,
2137 struct netlink_ext_ack *extack)
26c02749
OG
2138{
2139 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2140 TCA_CSUM_UPDATE_FLAG_UDP;
2141
2142 /* The HW recalcs checksums only if re-writing headers */
2143 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
e98bedf5
EB
2144 NL_SET_ERR_MSG_MOD(extack,
2145 "TC csum action is only offloaded with pedit");
26c02749
OG
2146 netdev_warn(priv->netdev,
2147 "TC csum action is only offloaded with pedit\n");
2148 return false;
2149 }
2150
2151 if (update_flags & ~prot_flags) {
e98bedf5
EB
2152 NL_SET_ERR_MSG_MOD(extack,
2153 "can't offload TC csum action for some header/s");
26c02749
OG
2154 netdev_warn(priv->netdev,
2155 "can't offload TC csum action for some header/s - flags %#x\n",
2156 update_flags);
2157 return false;
2158 }
2159
2160 return true;
2161}
2162
bdd66ac0 2163static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
73867881 2164 struct flow_action *flow_action,
1651925d 2165 u32 actions,
e98bedf5 2166 struct netlink_ext_ack *extack)
bdd66ac0 2167{
73867881 2168 const struct flow_action_entry *act;
bdd66ac0 2169 bool modify_ip_header;
bdd66ac0
OG
2170 u8 htype, ip_proto;
2171 void *headers_v;
2172 u16 ethertype;
73867881 2173 int i;
bdd66ac0 2174
1651925d
GS
2175 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2176 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2177 else
2178 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2179
bdd66ac0
OG
2180 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2181
2182 /* for non-IP we only re-write MACs, so we're okay */
2183 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2184 goto out_ok;
2185
2186 modify_ip_header = false;
73867881
PNA
2187 flow_action_for_each(i, act, flow_action) {
2188 if (act->id != FLOW_ACTION_MANGLE &&
2189 act->id != FLOW_ACTION_ADD)
bdd66ac0
OG
2190 continue;
2191
73867881
PNA
2192 htype = act->mangle.htype;
2193 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
2194 htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2195 modify_ip_header = true;
2196 break;
bdd66ac0
OG
2197 }
2198 }
2199
2200 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1ccef350
JL
2201 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2202 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
e98bedf5
EB
2203 NL_SET_ERR_MSG_MOD(extack,
2204 "can't offload re-write of non TCP/UDP");
bdd66ac0
OG
2205 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2206 return false;
2207 }
2208
2209out_ok:
2210 return true;
2211}
2212
2213static bool actions_match_supported(struct mlx5e_priv *priv,
73867881 2214 struct flow_action *flow_action,
bdd66ac0 2215 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2216 struct mlx5e_tc_flow *flow,
2217 struct netlink_ext_ack *extack)
bdd66ac0
OG
2218{
2219 u32 actions;
2220
2221 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2222 actions = flow->esw_attr->action;
2223 else
2224 actions = flow->nic_attr->action;
2225
7e29392e
RD
2226 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2227 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2228 return false;
2229
bdd66ac0 2230 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
73867881 2231 return modify_header_match_supported(&parse_attr->spec,
a655fe9f 2232 flow_action, actions,
e98bedf5 2233 extack);
bdd66ac0
OG
2234
2235 return true;
2236}
2237
5c65c564
OG
2238static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2239{
2240 struct mlx5_core_dev *fmdev, *pmdev;
816f6706 2241 u64 fsystem_guid, psystem_guid;
5c65c564
OG
2242
2243 fmdev = priv->mdev;
2244 pmdev = peer_priv->mdev;
2245
59c9d35e
AH
2246 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2247 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
5c65c564 2248
816f6706 2249 return (fsystem_guid == psystem_guid);
5c65c564
OG
2250}
2251
bdc837ee
EB
2252static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
2253 const struct flow_action_entry *act,
2254 struct mlx5e_tc_flow_parse_attr *parse_attr,
2255 struct pedit_headers_action *hdrs,
2256 u32 *action, struct netlink_ext_ack *extack)
2257{
2258 u16 mask16 = VLAN_VID_MASK;
2259 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
2260 const struct flow_action_entry pedit_act = {
2261 .id = FLOW_ACTION_MANGLE,
2262 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
2263 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
2264 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
2265 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
2266 };
2267 int err;
2268
2269 if (act->vlan.prio) {
2270 NL_SET_ERR_MSG_MOD(extack, "Setting VLAN prio is not supported");
2271 return -EOPNOTSUPP;
2272 }
2273
2274 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
2275 hdrs, NULL);
2276 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2277
2278 return err;
2279}
2280
73867881
PNA
2281static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2282 struct flow_action *flow_action,
aa0cbbae 2283 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2284 struct mlx5e_tc_flow *flow,
2285 struct netlink_ext_ack *extack)
e3a2b7ed 2286{
aa0cbbae 2287 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
73867881
PNA
2288 struct pedit_headers_action hdrs[2] = {};
2289 const struct flow_action_entry *act;
1cab1cd7 2290 u32 action = 0;
244cd96a 2291 int err, i;
e3a2b7ed 2292
73867881 2293 if (!flow_action_has_entries(flow_action))
e3a2b7ed
AV
2294 return -EINVAL;
2295
3bc4b7bf 2296 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
e3a2b7ed 2297
73867881
PNA
2298 flow_action_for_each(i, act, flow_action) {
2299 switch (act->id) {
2300 case FLOW_ACTION_DROP:
1cab1cd7 2301 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
2302 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2303 flow_table_properties_nic_receive.flow_counter))
1cab1cd7 2304 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
73867881
PNA
2305 break;
2306 case FLOW_ACTION_MANGLE:
2307 case FLOW_ACTION_ADD:
2308 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
c500c86b 2309 parse_attr, hdrs, extack);
2f4fe4ca
OG
2310 if (err)
2311 return err;
2312
1cab1cd7
OG
2313 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2314 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
73867881 2315 break;
bdc837ee
EB
2316 case FLOW_ACTION_VLAN_MANGLE:
2317 err = add_vlan_rewrite_action(priv,
2318 MLX5_FLOW_NAMESPACE_KERNEL,
2319 act, parse_attr, hdrs,
2320 &action, extack);
2321 if (err)
2322 return err;
2323
2324 break;
73867881 2325 case FLOW_ACTION_CSUM:
1cab1cd7 2326 if (csum_offload_supported(priv, action,
73867881 2327 act->csum_flags,
e98bedf5 2328 extack))
73867881 2329 break;
26c02749
OG
2330
2331 return -EOPNOTSUPP;
73867881
PNA
2332 case FLOW_ACTION_REDIRECT: {
2333 struct net_device *peer_dev = act->dev;
5c65c564
OG
2334
2335 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2336 same_hw_devs(priv, netdev_priv(peer_dev))) {
98b66cb1 2337 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
5c65c564 2338 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
1cab1cd7
OG
2339 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2340 MLX5_FLOW_CONTEXT_ACTION_COUNT;
5c65c564 2341 } else {
e98bedf5
EB
2342 NL_SET_ERR_MSG_MOD(extack,
2343 "device is not on same HW, can't offload");
5c65c564
OG
2344 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2345 peer_dev->name);
2346 return -EINVAL;
2347 }
73867881
PNA
2348 }
2349 break;
2350 case FLOW_ACTION_MARK: {
2351 u32 mark = act->mark;
e3a2b7ed
AV
2352
2353 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
e98bedf5
EB
2354 NL_SET_ERR_MSG_MOD(extack,
2355 "Bad flow mark - only 16 bit is supported");
e3a2b7ed
AV
2356 return -EINVAL;
2357 }
2358
3bc4b7bf 2359 attr->flow_tag = mark;
1cab1cd7 2360 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
73867881
PNA
2361 }
2362 break;
2363 default:
2364 return -EINVAL;
e3a2b7ed 2365 }
e3a2b7ed
AV
2366 }
2367
c500c86b
PNA
2368 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2369 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2370 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2371 parse_attr, hdrs, extack);
2372 if (err)
2373 return err;
2374 }
2375
1cab1cd7 2376 attr->action = action;
73867881 2377 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
bdd66ac0
OG
2378 return -EOPNOTSUPP;
2379
e3a2b7ed
AV
2380 return 0;
2381}
2382
76f7444d
OG
2383static inline int cmp_encap_info(struct ip_tunnel_key *a,
2384 struct ip_tunnel_key *b)
a54e20b4
HHZ
2385{
2386 return memcmp(a, b, sizeof(*a));
2387}
2388
76f7444d 2389static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 2390{
76f7444d 2391 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
2392}
2393
a54e20b4 2394
b1d90e6b
RL
2395static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2396 struct net_device *peer_netdev)
2397{
2398 struct mlx5e_priv *peer_priv;
2399
2400 peer_priv = netdev_priv(peer_netdev);
2401
2402 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
68931c7d
RD
2403 mlx5e_eswitch_rep(priv->netdev) &&
2404 mlx5e_eswitch_rep(peer_netdev) &&
2405 same_hw_devs(priv, peer_priv));
b1d90e6b
RL
2406}
2407
32f3671f 2408
f5bc2c5d 2409
a54e20b4 2410static int mlx5e_attach_encap(struct mlx5e_priv *priv,
e98bedf5 2411 struct mlx5e_tc_flow *flow,
733d4f36
RD
2412 struct net_device *mirred_dev,
2413 int out_index,
8c4dc42b 2414 struct netlink_ext_ack *extack,
0ad060ee
RD
2415 struct net_device **encap_dev,
2416 bool *encap_valid)
a54e20b4
HHZ
2417{
2418 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
45247bf2 2419 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
733d4f36
RD
2420 struct mlx5e_tc_flow_parse_attr *parse_attr;
2421 struct ip_tunnel_info *tun_info;
2422 struct ip_tunnel_key *key;
c1ae1152 2423 struct mlx5e_encap_entry *e;
733d4f36 2424 unsigned short family;
a54e20b4
HHZ
2425 uintptr_t hash_key;
2426 bool found = false;
54c177ca 2427 int err = 0;
a54e20b4 2428
733d4f36
RD
2429 parse_attr = attr->parse_attr;
2430 tun_info = &parse_attr->tun_info[out_index];
2431 family = ip_tunnel_info_af(tun_info);
2432 key = &tun_info->key;
2433
76f7444d 2434 hash_key = hash_encap_info(key);
a54e20b4
HHZ
2435
2436 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2437 encap_hlist, hash_key) {
76f7444d 2438 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
2439 found = true;
2440 break;
2441 }
2442 }
2443
b2812089 2444 /* must verify if encap is valid or not */
45247bf2
OG
2445 if (found)
2446 goto attach_flow;
a54e20b4
HHZ
2447
2448 e = kzalloc(sizeof(*e), GFP_KERNEL);
2449 if (!e)
2450 return -ENOMEM;
2451
76f7444d 2452 e->tun_info = *tun_info;
101f4de9 2453 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
54c177ca
OS
2454 if (err)
2455 goto out_err;
2456
a54e20b4
HHZ
2457 INIT_LIST_HEAD(&e->flows);
2458
ce99f6b9 2459 if (family == AF_INET)
101f4de9 2460 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
ce99f6b9 2461 else if (family == AF_INET6)
101f4de9 2462 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
ce99f6b9 2463
0ad060ee 2464 if (err)
a54e20b4
HHZ
2465 goto out_err;
2466
a54e20b4
HHZ
2467 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2468
45247bf2 2469attach_flow:
8c4dc42b
EB
2470 list_add(&flow->encaps[out_index].list, &e->flows);
2471 flow->encaps[out_index].index = out_index;
45247bf2 2472 *encap_dev = e->out_dev;
8c4dc42b
EB
2473 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
2474 attr->dests[out_index].encap_id = e->encap_id;
2475 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
0ad060ee 2476 *encap_valid = true;
8c4dc42b 2477 } else {
0ad060ee 2478 *encap_valid = false;
8c4dc42b 2479 }
45247bf2 2480
232c0013 2481 return err;
a54e20b4
HHZ
2482
2483out_err:
2484 kfree(e);
2485 return err;
2486}
2487
1482bd3d 2488static int parse_tc_vlan_action(struct mlx5e_priv *priv,
73867881 2489 const struct flow_action_entry *act,
1482bd3d
JL
2490 struct mlx5_esw_flow_attr *attr,
2491 u32 *action)
2492{
cc495188
JL
2493 u8 vlan_idx = attr->total_vlan;
2494
2495 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2496 return -EOPNOTSUPP;
2497
73867881
PNA
2498 switch (act->id) {
2499 case FLOW_ACTION_VLAN_POP:
cc495188
JL
2500 if (vlan_idx) {
2501 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2502 MLX5_FS_VLAN_DEPTH))
2503 return -EOPNOTSUPP;
2504
2505 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2506 } else {
2507 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2508 }
73867881
PNA
2509 break;
2510 case FLOW_ACTION_VLAN_PUSH:
2511 attr->vlan_vid[vlan_idx] = act->vlan.vid;
2512 attr->vlan_prio[vlan_idx] = act->vlan.prio;
2513 attr->vlan_proto[vlan_idx] = act->vlan.proto;
cc495188
JL
2514 if (!attr->vlan_proto[vlan_idx])
2515 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2516
2517 if (vlan_idx) {
2518 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2519 MLX5_FS_VLAN_DEPTH))
2520 return -EOPNOTSUPP;
2521
2522 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2523 } else {
2524 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
73867881
PNA
2525 (act->vlan.proto != htons(ETH_P_8021Q) ||
2526 act->vlan.prio))
cc495188
JL
2527 return -EOPNOTSUPP;
2528
2529 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1482bd3d 2530 }
73867881
PNA
2531 break;
2532 default:
bdc837ee 2533 return -EINVAL;
1482bd3d
JL
2534 }
2535
cc495188
JL
2536 attr->total_vlan = vlan_idx + 1;
2537
1482bd3d
JL
2538 return 0;
2539}
2540
73867881
PNA
2541static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2542 struct flow_action *flow_action,
d7e75a32 2543 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2544 struct mlx5e_tc_flow *flow,
2545 struct netlink_ext_ack *extack)
03a9d11e 2546{
73867881 2547 struct pedit_headers_action hdrs[2] = {};
bf07aa73 2548 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
ecf5bb79 2549 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 2550 struct mlx5e_rep_priv *rpriv = priv->ppriv;
73867881
PNA
2551 const struct ip_tunnel_info *info = NULL;
2552 const struct flow_action_entry *act;
a54e20b4 2553 bool encap = false;
1cab1cd7 2554 u32 action = 0;
244cd96a 2555 int err, i;
03a9d11e 2556
73867881 2557 if (!flow_action_has_entries(flow_action))
03a9d11e
OG
2558 return -EINVAL;
2559
1d447a39 2560 attr->in_rep = rpriv->rep;
10ff5359 2561 attr->in_mdev = priv->mdev;
03a9d11e 2562
73867881
PNA
2563 flow_action_for_each(i, act, flow_action) {
2564 switch (act->id) {
2565 case FLOW_ACTION_DROP:
1cab1cd7
OG
2566 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2567 MLX5_FLOW_CONTEXT_ACTION_COUNT;
73867881
PNA
2568 break;
2569 case FLOW_ACTION_MANGLE:
2570 case FLOW_ACTION_ADD:
2571 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
c500c86b 2572 parse_attr, hdrs, extack);
d7e75a32
OG
2573 if (err)
2574 return err;
2575
1cab1cd7 2576 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
e85e02ba 2577 attr->split_count = attr->out_count;
73867881
PNA
2578 break;
2579 case FLOW_ACTION_CSUM:
1cab1cd7 2580 if (csum_offload_supported(priv, action,
73867881
PNA
2581 act->csum_flags, extack))
2582 break;
26c02749
OG
2583
2584 return -EOPNOTSUPP;
73867881
PNA
2585 case FLOW_ACTION_REDIRECT:
2586 case FLOW_ACTION_MIRRED: {
03a9d11e 2587 struct mlx5e_priv *out_priv;
592d3651 2588 struct net_device *out_dev;
03a9d11e 2589
73867881 2590 out_dev = act->dev;
ef381359
OS
2591 if (!out_dev) {
2592 /* out_dev is NULL when filters with
2593 * non-existing mirred device are replayed to
2594 * the driver.
2595 */
2596 return -EINVAL;
2597 }
03a9d11e 2598
592d3651 2599 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
e98bedf5
EB
2600 NL_SET_ERR_MSG_MOD(extack,
2601 "can't support more output ports, can't offload forwarding");
592d3651
CM
2602 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2603 attr->out_count);
2604 return -EOPNOTSUPP;
2605 }
2606
f493f155
EB
2607 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2608 MLX5_FLOW_CONTEXT_ACTION_COUNT;
6dcfa234
FF
2609 if (netdev_port_same_parent_id(priv->netdev,
2610 out_dev) ||
b1d90e6b 2611 is_merged_eswitch_dev(priv, out_dev)) {
7ba58ba7
RL
2612 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2613 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
2614 struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
2615
2616 if (uplink_upper &&
2617 netif_is_lag_master(uplink_upper) &&
2618 uplink_upper == out_dev)
2619 out_dev = uplink_dev;
2620
a0646c88
EB
2621 if (!mlx5e_eswitch_rep(out_dev))
2622 return -EOPNOTSUPP;
2623
a54e20b4 2624 out_priv = netdev_priv(out_dev);
1d447a39 2625 rpriv = out_priv->ppriv;
df65a573
EB
2626 attr->dests[attr->out_count].rep = rpriv->rep;
2627 attr->dests[attr->out_count].mdev = out_priv->mdev;
2628 attr->out_count++;
a54e20b4 2629 } else if (encap) {
8c4dc42b
EB
2630 parse_attr->mirred_ifindex[attr->out_count] =
2631 out_dev->ifindex;
2632 parse_attr->tun_info[attr->out_count] = *info;
2633 encap = false;
3c37745e 2634 attr->parse_attr = parse_attr;
f493f155
EB
2635 attr->dests[attr->out_count].flags |=
2636 MLX5_ESW_DEST_ENCAP;
1cc26d74 2637 attr->out_count++;
df65a573
EB
2638 /* attr->dests[].rep is resolved when we
2639 * handle encap
2640 */
ef381359
OS
2641 } else if (parse_attr->filter_dev != priv->netdev) {
2642 /* All mlx5 devices are called to configure
2643 * high level device filters. Therefore, the
2644 * *attempt* to install a filter on invalid
2645 * eswitch should not trigger an explicit error
2646 */
2647 return -EINVAL;
a54e20b4 2648 } else {
e98bedf5
EB
2649 NL_SET_ERR_MSG_MOD(extack,
2650 "devices are not on same switch HW, can't offload forwarding");
03a9d11e
OG
2651 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2652 priv->netdev->name, out_dev->name);
2653 return -EINVAL;
2654 }
73867881
PNA
2655 }
2656 break;
2657 case FLOW_ACTION_TUNNEL_ENCAP:
2658 info = act->tunnel;
a54e20b4
HHZ
2659 if (info)
2660 encap = true;
2661 else
2662 return -EOPNOTSUPP;
1482bd3d 2663
73867881
PNA
2664 break;
2665 case FLOW_ACTION_VLAN_PUSH:
2666 case FLOW_ACTION_VLAN_POP:
2667 err = parse_tc_vlan_action(priv, act, attr, &action);
1482bd3d
JL
2668 if (err)
2669 return err;
2670
bdc837ee
EB
2671 attr->split_count = attr->out_count;
2672 break;
2673 case FLOW_ACTION_VLAN_MANGLE:
2674 err = add_vlan_rewrite_action(priv,
2675 MLX5_FLOW_NAMESPACE_FDB,
2676 act, parse_attr, hdrs,
2677 &action, extack);
2678 if (err)
2679 return err;
2680
e85e02ba 2681 attr->split_count = attr->out_count;
73867881
PNA
2682 break;
2683 case FLOW_ACTION_TUNNEL_DECAP:
1cab1cd7 2684 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
73867881
PNA
2685 break;
2686 case FLOW_ACTION_GOTO: {
2687 u32 dest_chain = act->chain_index;
bf07aa73
PB
2688 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2689
2690 if (dest_chain <= attr->chain) {
2691 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2692 return -EOPNOTSUPP;
2693 }
2694 if (dest_chain > max_chain) {
2695 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2696 return -EOPNOTSUPP;
2697 }
e88afe75 2698 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
bf07aa73 2699 attr->dest_chain = dest_chain;
73867881
PNA
2700 break;
2701 }
2702 default:
2703 return -EINVAL;
bf07aa73 2704 }
03a9d11e 2705 }
bdd66ac0 2706
c500c86b
PNA
2707 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2708 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2709 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2710 parse_attr, hdrs, extack);
2711 if (err)
2712 return err;
2713 }
2714
1cab1cd7 2715 attr->action = action;
73867881 2716 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
bdd66ac0
OG
2717 return -EOPNOTSUPP;
2718
e88afe75
OG
2719 if (attr->dest_chain) {
2720 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2721 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
2722 return -EOPNOTSUPP;
2723 }
2724 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2725 }
2726
e85e02ba 2727 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
e98bedf5
EB
2728 NL_SET_ERR_MSG_MOD(extack,
2729 "current firmware doesn't support split rule for port mirroring");
592d3651
CM
2730 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2731 return -EOPNOTSUPP;
2732 }
2733
31c8eba5 2734 return 0;
03a9d11e
OG
2735}
2736
5dbe906f 2737static void get_flags(int flags, u16 *flow_flags)
60bd4af8 2738{
5dbe906f 2739 u16 __flow_flags = 0;
60bd4af8
OG
2740
2741 if (flags & MLX5E_TC_INGRESS)
2742 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2743 if (flags & MLX5E_TC_EGRESS)
2744 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2745
d9ee0491
OG
2746 if (flags & MLX5E_TC_ESW_OFFLOAD)
2747 __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2748 if (flags & MLX5E_TC_NIC_OFFLOAD)
2749 __flow_flags |= MLX5E_TC_FLOW_NIC;
2750
60bd4af8
OG
2751 *flow_flags = __flow_flags;
2752}
2753
05866c82
OG
2754static const struct rhashtable_params tc_ht_params = {
2755 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2756 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2757 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2758 .automatic_shrinking = true,
2759};
2760
d9ee0491 2761static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
05866c82 2762{
655dc3d2
OG
2763 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2764 struct mlx5e_rep_priv *uplink_rpriv;
2765
d9ee0491 2766 if (flags & MLX5E_TC_ESW_OFFLOAD) {
655dc3d2 2767 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
ec1366c2 2768 return &uplink_rpriv->uplink_priv.tc_ht;
d9ee0491 2769 } else /* NIC offload */
655dc3d2 2770 return &priv->fs.tc.ht;
05866c82
OG
2771}
2772
04de7dda
RD
2773static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
2774{
1418ddd9 2775 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
b05af6aa 2776 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
1418ddd9
AH
2777 flow->flags & MLX5E_TC_FLOW_INGRESS;
2778 bool act_is_encap = !!(attr->action &
2779 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
2780 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
2781 MLX5_DEVCOM_ESW_OFFLOADS);
2782
10fbb1cd
RD
2783 if (!esw_paired)
2784 return false;
2785
2786 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
2787 mlx5_lag_is_multipath(attr->in_mdev)) &&
2788 (is_rep_ingress || act_is_encap))
2789 return true;
2790
2791 return false;
04de7dda
RD
2792}
2793
a88780a9
RD
2794static int
2795mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
5dbe906f 2796 struct tc_cls_flower_offload *f, u16 flow_flags,
a88780a9
RD
2797 struct mlx5e_tc_flow_parse_attr **__parse_attr,
2798 struct mlx5e_tc_flow **__flow)
e3a2b7ed 2799{
17091853 2800 struct mlx5e_tc_flow_parse_attr *parse_attr;
3bc4b7bf 2801 struct mlx5e_tc_flow *flow;
a88780a9 2802 int err;
e3a2b7ed 2803
65ba8fb7 2804 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1b9a07ee 2805 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
17091853 2806 if (!parse_attr || !flow) {
e3a2b7ed
AV
2807 err = -ENOMEM;
2808 goto err_free;
2809 }
2810
2811 flow->cookie = f->cookie;
65ba8fb7 2812 flow->flags = flow_flags;
655dc3d2 2813 flow->priv = priv;
e3a2b7ed 2814
a88780a9
RD
2815 *__flow = flow;
2816 *__parse_attr = parse_attr;
2817
2818 return 0;
2819
2820err_free:
2821 kfree(flow);
2822 kvfree(parse_attr);
2823 return err;
2824}
2825
988ab9c7
TZ
2826static void
2827mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
2828 struct mlx5e_priv *priv,
2829 struct mlx5e_tc_flow_parse_attr *parse_attr,
2830 struct tc_cls_flower_offload *f,
2831 struct mlx5_eswitch_rep *in_rep,
2832 struct mlx5_core_dev *in_mdev)
2833{
2834 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2835
2836 esw_attr->parse_attr = parse_attr;
2837 esw_attr->chain = f->common.chain_index;
2838 esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
2839
2840 esw_attr->in_rep = in_rep;
2841 esw_attr->in_mdev = in_mdev;
2842
2843 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
2844 MLX5_COUNTER_SOURCE_ESWITCH)
2845 esw_attr->counter_dev = in_mdev;
2846 else
2847 esw_attr->counter_dev = priv->mdev;
2848}
2849
71129676 2850static struct mlx5e_tc_flow *
04de7dda
RD
2851__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2852 struct tc_cls_flower_offload *f,
2853 u16 flow_flags,
2854 struct net_device *filter_dev,
2855 struct mlx5_eswitch_rep *in_rep,
71129676 2856 struct mlx5_core_dev *in_mdev)
a88780a9 2857{
73867881 2858 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
a88780a9
RD
2859 struct netlink_ext_ack *extack = f->common.extack;
2860 struct mlx5e_tc_flow_parse_attr *parse_attr;
2861 struct mlx5e_tc_flow *flow;
2862 int attr_size, err;
e3a2b7ed 2863
a88780a9
RD
2864 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2865 attr_size = sizeof(struct mlx5_esw_flow_attr);
2866 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2867 &parse_attr, &flow);
2868 if (err)
2869 goto out;
988ab9c7 2870
d11afc26 2871 parse_attr->filter_dev = filter_dev;
988ab9c7
TZ
2872 mlx5e_flow_esw_attr_init(flow->esw_attr,
2873 priv, parse_attr,
2874 f, in_rep, in_mdev);
2875
54c177ca
OS
2876 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2877 f, filter_dev);
d11afc26
OS
2878 if (err)
2879 goto err_free;
a88780a9 2880
73867881 2881 err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
a88780a9
RD
2882 if (err)
2883 goto err_free;
2884
7040632d 2885 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
ef06c9ee
RD
2886 if (err) {
2887 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
2888 goto err_free;
2889
b4a23329 2890 add_unready_flow(flow);
ef06c9ee 2891 }
e3a2b7ed 2892
71129676 2893 return flow;
a88780a9
RD
2894
2895err_free:
2896 kfree(flow);
2897 kvfree(parse_attr);
2898out:
71129676 2899 return ERR_PTR(err);
a88780a9
RD
2900}
2901
04de7dda 2902static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
95dc1902
RD
2903 struct mlx5e_tc_flow *flow,
2904 u16 flow_flags)
04de7dda
RD
2905{
2906 struct mlx5e_priv *priv = flow->priv, *peer_priv;
2907 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
2908 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
2909 struct mlx5e_tc_flow_parse_attr *parse_attr;
2910 struct mlx5e_rep_priv *peer_urpriv;
2911 struct mlx5e_tc_flow *peer_flow;
2912 struct mlx5_core_dev *in_mdev;
2913 int err = 0;
2914
2915 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2916 if (!peer_esw)
2917 return -ENODEV;
2918
2919 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
2920 peer_priv = netdev_priv(peer_urpriv->netdev);
2921
2922 /* in_mdev is assigned of which the packet originated from.
2923 * So packets redirected to uplink use the same mdev of the
2924 * original flow and packets redirected from uplink use the
2925 * peer mdev.
2926 */
b05af6aa 2927 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
04de7dda
RD
2928 in_mdev = peer_priv->mdev;
2929 else
2930 in_mdev = priv->mdev;
2931
2932 parse_attr = flow->esw_attr->parse_attr;
95dc1902 2933 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
71129676
JG
2934 parse_attr->filter_dev,
2935 flow->esw_attr->in_rep, in_mdev);
2936 if (IS_ERR(peer_flow)) {
2937 err = PTR_ERR(peer_flow);
04de7dda 2938 goto out;
71129676 2939 }
04de7dda
RD
2940
2941 flow->peer_flow = peer_flow;
2942 flow->flags |= MLX5E_TC_FLOW_DUP;
2943 mutex_lock(&esw->offloads.peer_mutex);
2944 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
2945 mutex_unlock(&esw->offloads.peer_mutex);
2946
2947out:
2948 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2949 return err;
2950}
2951
2952static int
2953mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2954 struct tc_cls_flower_offload *f,
2955 u16 flow_flags,
2956 struct net_device *filter_dev,
2957 struct mlx5e_tc_flow **__flow)
2958{
2959 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2960 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
2961 struct mlx5_core_dev *in_mdev = priv->mdev;
2962 struct mlx5e_tc_flow *flow;
2963 int err;
2964
71129676
JG
2965 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
2966 in_mdev);
2967 if (IS_ERR(flow))
2968 return PTR_ERR(flow);
04de7dda
RD
2969
2970 if (is_peer_flow_needed(flow)) {
95dc1902 2971 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
04de7dda
RD
2972 if (err) {
2973 mlx5e_tc_del_fdb_flow(priv, flow);
2974 goto out;
2975 }
2976 }
2977
2978 *__flow = flow;
2979
2980 return 0;
2981
2982out:
2983 return err;
2984}
2985
a88780a9
RD
2986static int
2987mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2988 struct tc_cls_flower_offload *f,
5dbe906f 2989 u16 flow_flags,
d11afc26 2990 struct net_device *filter_dev,
a88780a9
RD
2991 struct mlx5e_tc_flow **__flow)
2992{
73867881 2993 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
a88780a9
RD
2994 struct netlink_ext_ack *extack = f->common.extack;
2995 struct mlx5e_tc_flow_parse_attr *parse_attr;
2996 struct mlx5e_tc_flow *flow;
2997 int attr_size, err;
2998
bf07aa73
PB
2999 /* multi-chain not supported for NIC rules */
3000 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3001 return -EOPNOTSUPP;
3002
a88780a9
RD
3003 flow_flags |= MLX5E_TC_FLOW_NIC;
3004 attr_size = sizeof(struct mlx5_nic_flow_attr);
3005 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3006 &parse_attr, &flow);
3007 if (err)
3008 goto out;
3009
d11afc26 3010 parse_attr->filter_dev = filter_dev;
54c177ca
OS
3011 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3012 f, filter_dev);
d11afc26
OS
3013 if (err)
3014 goto err_free;
3015
73867881 3016 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
a88780a9
RD
3017 if (err)
3018 goto err_free;
3019
3020 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3021 if (err)
3022 goto err_free;
3023
3024 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
3025 kvfree(parse_attr);
3026 *__flow = flow;
3027
3028 return 0;
e3a2b7ed 3029
e3a2b7ed 3030err_free:
a88780a9 3031 kfree(flow);
17091853 3032 kvfree(parse_attr);
a88780a9
RD
3033out:
3034 return err;
3035}
3036
3037static int
3038mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3039 struct tc_cls_flower_offload *f,
3040 int flags,
d11afc26 3041 struct net_device *filter_dev,
a88780a9
RD
3042 struct mlx5e_tc_flow **flow)
3043{
3044 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5dbe906f 3045 u16 flow_flags;
a88780a9
RD
3046 int err;
3047
3048 get_flags(flags, &flow_flags);
3049
bf07aa73
PB
3050 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3051 return -EOPNOTSUPP;
3052
a88780a9 3053 if (esw && esw->mode == SRIOV_OFFLOADS)
d11afc26
OS
3054 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3055 filter_dev, flow);
a88780a9 3056 else
d11afc26
OS
3057 err = mlx5e_add_nic_flow(priv, f, flow_flags,
3058 filter_dev, flow);
a88780a9
RD
3059
3060 return err;
3061}
3062
71d82d2a 3063int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
a88780a9
RD
3064 struct tc_cls_flower_offload *f, int flags)
3065{
3066 struct netlink_ext_ack *extack = f->common.extack;
d9ee0491 3067 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
a88780a9
RD
3068 struct mlx5e_tc_flow *flow;
3069 int err = 0;
3070
3071 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3072 if (flow) {
3073 NL_SET_ERR_MSG_MOD(extack,
3074 "flow cookie already exists, ignoring");
3075 netdev_warn_once(priv->netdev,
3076 "flow cookie %lx already exists, ignoring\n",
3077 f->cookie);
3078 goto out;
3079 }
3080
d11afc26 3081 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
a88780a9
RD
3082 if (err)
3083 goto out;
3084
3085 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
3086 if (err)
3087 goto err_free;
3088
3089 return 0;
3090
3091err_free:
3092 mlx5e_tc_del_flow(priv, flow);
232c0013 3093 kfree(flow);
a88780a9 3094out:
e3a2b7ed
AV
3095 return err;
3096}
3097
8f8ae895
OG
3098#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
3099#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
3100
3101static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3102{
3103 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
3104 return true;
3105
3106 return false;
3107}
3108
71d82d2a 3109int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
60bd4af8 3110 struct tc_cls_flower_offload *f, int flags)
e3a2b7ed 3111{
d9ee0491 3112 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
e3a2b7ed 3113 struct mlx5e_tc_flow *flow;
e3a2b7ed 3114
05866c82 3115 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
8f8ae895 3116 if (!flow || !same_flow_direction(flow, flags))
e3a2b7ed
AV
3117 return -EINVAL;
3118
05866c82 3119 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
e3a2b7ed 3120
961e8979 3121 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
3122
3123 kfree(flow);
3124
3125 return 0;
3126}
3127
71d82d2a 3128int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
60bd4af8 3129 struct tc_cls_flower_offload *f, int flags)
aad7e08d 3130{
04de7dda 3131 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
d9ee0491 3132 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
04de7dda 3133 struct mlx5_eswitch *peer_esw;
aad7e08d 3134 struct mlx5e_tc_flow *flow;
aad7e08d 3135 struct mlx5_fc *counter;
316d5f72
RD
3136 u64 lastuse = 0;
3137 u64 packets = 0;
3138 u64 bytes = 0;
aad7e08d 3139
05866c82 3140 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
8f8ae895 3141 if (!flow || !same_flow_direction(flow, flags))
aad7e08d
AV
3142 return -EINVAL;
3143
316d5f72
RD
3144 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
3145 counter = mlx5e_tc_get_counter(flow);
3146 if (!counter)
3147 return 0;
aad7e08d 3148
316d5f72
RD
3149 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3150 }
aad7e08d 3151
316d5f72
RD
3152 /* Under multipath it's possible for one rule to be currently
3153 * un-offloaded while the other rule is offloaded.
3154 */
04de7dda
RD
3155 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3156 if (!peer_esw)
3157 goto out;
3158
3159 if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
3160 (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
3161 u64 bytes2;
3162 u64 packets2;
3163 u64 lastuse2;
3164
3165 counter = mlx5e_tc_get_counter(flow->peer_flow);
316d5f72
RD
3166 if (!counter)
3167 goto no_peer_counter;
04de7dda
RD
3168 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
3169
3170 bytes += bytes2;
3171 packets += packets2;
3172 lastuse = max_t(u64, lastuse, lastuse2);
3173 }
3174
316d5f72 3175no_peer_counter:
04de7dda 3176 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
04de7dda 3177out:
3b1903ef 3178 flow_stats_update(&f->stats, bytes, packets, lastuse);
fed06ee8 3179
aad7e08d
AV
3180 return 0;
3181}
3182
4d8fcf21
AH
3183static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3184 struct mlx5e_priv *peer_priv)
3185{
3186 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3187 struct mlx5e_hairpin_entry *hpe;
3188 u16 peer_vhca_id;
3189 int bkt;
3190
3191 if (!same_hw_devs(priv, peer_priv))
3192 return;
3193
3194 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3195
3196 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3197 if (hpe->peer_vhca_id == peer_vhca_id)
3198 hpe->hp->pair->peer_gone = true;
3199 }
3200}
3201
3202static int mlx5e_tc_netdev_event(struct notifier_block *this,
3203 unsigned long event, void *ptr)
3204{
3205 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3206 struct mlx5e_flow_steering *fs;
3207 struct mlx5e_priv *peer_priv;
3208 struct mlx5e_tc_table *tc;
3209 struct mlx5e_priv *priv;
3210
3211 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3212 event != NETDEV_UNREGISTER ||
3213 ndev->reg_state == NETREG_REGISTERED)
3214 return NOTIFY_DONE;
3215
3216 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3217 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3218 priv = container_of(fs, struct mlx5e_priv, fs);
3219 peer_priv = netdev_priv(ndev);
3220 if (priv == peer_priv ||
3221 !(priv->netdev->features & NETIF_F_HW_TC))
3222 return NOTIFY_DONE;
3223
3224 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3225
3226 return NOTIFY_DONE;
3227}
3228
655dc3d2 3229int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
e8f887ac 3230{
acff797c 3231 struct mlx5e_tc_table *tc = &priv->fs.tc;
4d8fcf21 3232 int err;
e8f887ac 3233
11c9c548 3234 hash_init(tc->mod_hdr_tbl);
5c65c564 3235 hash_init(tc->hairpin_tbl);
11c9c548 3236
4d8fcf21
AH
3237 err = rhashtable_init(&tc->ht, &tc_ht_params);
3238 if (err)
3239 return err;
3240
3241 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3242 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3243 tc->netdevice_nb.notifier_call = NULL;
3244 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3245 }
3246
3247 return err;
e8f887ac
AV
3248}
3249
3250static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3251{
3252 struct mlx5e_tc_flow *flow = ptr;
655dc3d2 3253 struct mlx5e_priv *priv = flow->priv;
e8f887ac 3254
961e8979 3255 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
3256 kfree(flow);
3257}
3258
655dc3d2 3259void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
e8f887ac 3260{
acff797c 3261 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac 3262
4d8fcf21
AH
3263 if (tc->netdevice_nb.notifier_call)
3264 unregister_netdevice_notifier(&tc->netdevice_nb);
3265
d9ee0491 3266 rhashtable_destroy(&tc->ht);
e8f887ac 3267
acff797c
MG
3268 if (!IS_ERR_OR_NULL(tc->t)) {
3269 mlx5_destroy_flow_table(tc->t);
3270 tc->t = NULL;
e8f887ac
AV
3271 }
3272}
655dc3d2
OG
3273
3274int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3275{
3276 return rhashtable_init(tc_ht, &tc_ht_params);
3277}
3278
3279void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3280{
3281 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3282}
01252a27 3283
d9ee0491 3284int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
01252a27 3285{
d9ee0491 3286 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
01252a27
OG
3287
3288 return atomic_read(&tc_ht->nelems);
3289}
04de7dda
RD
3290
3291void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
3292{
3293 struct mlx5e_tc_flow *flow, *tmp;
3294
3295 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
3296 __mlx5e_tc_del_fdb_peer_flow(flow);
3297}
b4a23329
RD
3298
3299void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
3300{
3301 struct mlx5_rep_uplink_priv *rpriv =
3302 container_of(work, struct mlx5_rep_uplink_priv,
3303 reoffload_flows_work);
3304 struct mlx5e_tc_flow *flow, *tmp;
3305
3306 rtnl_lock();
3307 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
3308 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
3309 remove_unready_flow(flow);
3310 }
3311 rtnl_unlock();
3312}