]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: Check correct ip_version in decapsulation route resolution
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
133dcfc5 34#include <linux/idr.h>
69697b6e
OG
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/fs.h>
39#include "mlx5_core.h"
40#include "eswitch.h"
34ca6535 41#include "esw/indir_table.h"
ea651a86 42#include "esw/acl/ofld.h"
a508728a 43#include "esw/indir_table.h"
80f09dfc 44#include "rdma.h"
e52c2802
PB
45#include "en.h"
46#include "fs_core.h"
ac004b83 47#include "lib/devcom.h"
a3888f33 48#include "lib/eq.h"
ae430332 49#include "lib/fs_chains.h"
c620b772 50#include "en_tc.h"
69697b6e 51
cd7e4186
BW
52/* There are two match-all miss flows, one for unicast dst mac and
53 * one for multicast.
54 */
55#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
56#define UPLINK_REP_INDEX 0
57
96e32687
EC
58/* Per vport tables */
59
60#define MLX5_ESW_VPORT_TABLE_SIZE 128
61
62/* This struct is used as a key to the hash table and we need it to be packed
63 * so hash result is consistent
64 */
65struct mlx5_vport_key {
66 u32 chain;
67 u16 prio;
68 u16 vport;
69 u16 vhca_id;
70} __packed;
71
c620b772
AL
72struct mlx5_vport_tbl_attr {
73 u16 chain;
74 u16 prio;
75 u16 vport;
76};
77
96e32687
EC
78struct mlx5_vport_table {
79 struct hlist_node hlist;
80 struct mlx5_flow_table *fdb;
81 u32 num_rules;
82 struct mlx5_vport_key key;
83};
84
87dac697
JL
85#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
86
96e32687
EC
87static struct mlx5_flow_table *
88esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
89{
90 struct mlx5_flow_table_attr ft_attr = {};
91 struct mlx5_flow_table *fdb;
92
87dac697 93 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
94 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
95 ft_attr.prio = FDB_PER_VPORT;
96 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
97 if (IS_ERR(fdb)) {
98 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
99 PTR_ERR(fdb));
100 }
101
102 return fdb;
103}
104
105static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
c620b772 106 struct mlx5_vport_tbl_attr *attr,
96e32687
EC
107 struct mlx5_vport_key *key)
108{
c620b772 109 key->vport = attr->vport;
96e32687
EC
110 key->chain = attr->chain;
111 key->prio = attr->prio;
112 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
113 return jhash(key, sizeof(*key), 0);
114}
115
116/* caller must hold vports.lock */
117static struct mlx5_vport_table *
118esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
119{
120 struct mlx5_vport_table *e;
121
122 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
123 if (!memcmp(&e->key, skey, sizeof(*skey)))
124 return e;
125
126 return NULL;
127}
128
129static void
c620b772 130esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
96e32687
EC
131{
132 struct mlx5_vport_table *e;
133 struct mlx5_vport_key key;
134 u32 hkey;
135
136 mutex_lock(&esw->fdb_table.offloads.vports.lock);
137 hkey = flow_attr_to_vport_key(esw, attr, &key);
138 e = esw_vport_tbl_lookup(esw, &key, hkey);
139 if (!e || --e->num_rules)
140 goto out;
141
142 hash_del(&e->hlist);
143 mlx5_destroy_flow_table(e->fdb);
144 kfree(e);
145out:
146 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
147}
148
149static struct mlx5_flow_table *
c620b772 150esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
96e32687
EC
151{
152 struct mlx5_core_dev *dev = esw->dev;
153 struct mlx5_flow_namespace *ns;
154 struct mlx5_flow_table *fdb;
155 struct mlx5_vport_table *e;
156 struct mlx5_vport_key skey;
157 u32 hkey;
158
159 mutex_lock(&esw->fdb_table.offloads.vports.lock);
160 hkey = flow_attr_to_vport_key(esw, attr, &skey);
161 e = esw_vport_tbl_lookup(esw, &skey, hkey);
162 if (e) {
163 e->num_rules++;
164 goto out;
165 }
166
167 e = kzalloc(sizeof(*e), GFP_KERNEL);
168 if (!e) {
169 fdb = ERR_PTR(-ENOMEM);
170 goto err_alloc;
171 }
172
173 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
174 if (!ns) {
175 esw_warn(dev, "Failed to get FDB namespace\n");
176 fdb = ERR_PTR(-ENOENT);
177 goto err_ns;
178 }
179
180 fdb = esw_vport_tbl_create(esw, ns);
181 if (IS_ERR(fdb))
182 goto err_ns;
183
184 e->fdb = fdb;
185 e->num_rules = 1;
186 e->key = skey;
187 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
188out:
189 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
190 return e->fdb;
191
192err_ns:
193 kfree(e);
194err_alloc:
195 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
196 return fdb;
197}
198
199int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
200{
c620b772 201 struct mlx5_vport_tbl_attr attr;
96e32687
EC
202 struct mlx5_flow_table *fdb;
203 struct mlx5_vport *vport;
204 int i;
205
c620b772 206 attr.chain = 0;
96e32687 207 attr.prio = 1;
96e32687 208 mlx5_esw_for_all_vports(esw, i, vport) {
c620b772 209 attr.vport = vport->vport;
96e32687 210 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 211 if (IS_ERR(fdb))
96e32687
EC
212 goto out;
213 }
214 return 0;
215
216out:
217 mlx5_esw_vport_tbl_put(esw);
218 return PTR_ERR(fdb);
219}
220
221void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
222{
c620b772 223 struct mlx5_vport_tbl_attr attr;
96e32687
EC
224 struct mlx5_vport *vport;
225 int i;
226
c620b772 227 attr.chain = 0;
96e32687 228 attr.prio = 1;
96e32687 229 mlx5_esw_for_all_vports(esw, i, vport) {
c620b772 230 attr.vport = vport->vport;
96e32687
EC
231 esw_vport_tbl_put(esw, &attr);
232 }
233}
234
235/* End: Per vport tables */
236
879c8f84
BW
237static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
238 u16 vport_num)
239{
02f3afd9 240 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
241
242 WARN_ON(idx > esw->total_vports - 1);
243 return &esw->offloads.vport_reps[idx];
244}
245
6f7bbad1
JL
246static void
247mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
248 struct mlx5_flow_spec *spec,
249 struct mlx5_esw_flow_attr *attr)
250{
251 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
036e19b9
HI
252 attr && attr->in_rep)
253 spec->flow_context.flow_source =
254 attr->in_rep->vport == MLX5_VPORT_UPLINK ?
255 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
256 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
6f7bbad1 257}
b7826076 258
c01cfd0f
JL
259static void
260mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
261 struct mlx5_flow_spec *spec,
a508728a 262 struct mlx5_flow_attr *attr,
b055ecf5
MB
263 struct mlx5_eswitch *src_esw,
264 u16 vport)
c01cfd0f
JL
265{
266 void *misc2;
267 void *misc;
268
269 /* Use metadata matching because vport is not represented by single
270 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
271 */
272 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
a508728a
VB
273 if (mlx5_esw_indir_table_decap_vport(attr))
274 vport = mlx5_esw_indir_table_decap_vport(attr);
c01cfd0f
JL
275 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
276 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
b055ecf5
MB
277 mlx5_eswitch_get_vport_metadata_for_match(src_esw,
278 vport));
c01cfd0f
JL
279
280 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
281 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
282 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
283
284 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
c01cfd0f
JL
285 } else {
286 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
b055ecf5 287 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
c01cfd0f
JL
288
289 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
290 MLX5_SET(fte_match_set_misc, misc,
291 source_eswitch_owner_vhca_id,
b055ecf5 292 MLX5_CAP_GEN(src_esw->dev, vhca_id));
c01cfd0f
JL
293
294 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
295 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
296 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
297 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
298 source_eswitch_owner_vhca_id);
299
300 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
301 }
c01cfd0f
JL
302}
303
a508728a
VB
304static int
305esw_setup_decap_indir(struct mlx5_eswitch *esw,
306 struct mlx5_flow_attr *attr,
307 struct mlx5_flow_spec *spec)
308{
309 struct mlx5_flow_table *ft;
310
311 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
312 return -EOPNOTSUPP;
313
314 ft = mlx5_esw_indir_table_get(esw, attr, spec,
315 mlx5_esw_indir_table_decap_vport(attr), true);
316 return PTR_ERR_OR_ZERO(ft);
317}
318
9e51c0a6 319static void
a508728a
VB
320esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
321 struct mlx5_flow_attr *attr)
322{
323 if (mlx5_esw_indir_table_decap_vport(attr))
324 mlx5_esw_indir_table_put(esw, attr,
325 mlx5_esw_indir_table_decap_vport(attr),
326 true);
327}
328
329static int
9e51c0a6
VB
330esw_setup_ft_dest(struct mlx5_flow_destination *dest,
331 struct mlx5_flow_act *flow_act,
a508728a 332 struct mlx5_eswitch *esw,
9e51c0a6 333 struct mlx5_flow_attr *attr,
a508728a 334 struct mlx5_flow_spec *spec,
9e51c0a6
VB
335 int i)
336{
337 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
338 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
339 dest[i].ft = attr->dest_ft;
a508728a
VB
340
341 if (mlx5_esw_indir_table_decap_vport(attr))
342 return esw_setup_decap_indir(esw, attr, spec);
343 return 0;
9e51c0a6
VB
344}
345
346static void
347esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
348 struct mlx5_flow_act *flow_act,
349 struct mlx5_fs_chains *chains,
350 int i)
351{
352 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
353 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
354 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
355}
356
357static int
358esw_setup_chain_dest(struct mlx5_flow_destination *dest,
359 struct mlx5_flow_act *flow_act,
360 struct mlx5_fs_chains *chains,
361 u32 chain, u32 prio, u32 level,
362 int i)
363{
364 struct mlx5_flow_table *ft;
365
366 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
367 ft = mlx5_chains_get_table(chains, chain, prio, level);
368 if (IS_ERR(ft))
369 return PTR_ERR(ft);
370
371 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
372 dest[i].ft = ft;
373 return 0;
374}
375
10742efc
VB
376static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
377 int from, int to)
378{
379 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
380 struct mlx5_fs_chains *chains = esw_chains(esw);
381 int i;
382
383 for (i = from; i < to; i++)
384 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
385 mlx5_chains_put_table(chains, 0, 1, 0);
a508728a
VB
386 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
387 esw_attr->dests[i].mdev))
388 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
389 false);
10742efc
VB
390}
391
392static bool
393esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
394{
395 int i;
396
397 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
398 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
399 return true;
400 return false;
401}
402
403static int
404esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
405 struct mlx5_flow_act *flow_act,
406 struct mlx5_eswitch *esw,
407 struct mlx5_fs_chains *chains,
408 struct mlx5_flow_attr *attr,
409 int *i)
410{
411 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
412 int j, err;
413
414 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
415 return -EOPNOTSUPP;
416
417 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
418 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
419 if (err)
420 goto err_setup_chain;
421 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
422 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
423 }
424 return 0;
425
426err_setup_chain:
427 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
428 return err;
429}
430
431static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
432 struct mlx5_flow_attr *attr)
433{
434 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
435
436 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
437}
438
a508728a
VB
439static bool
440esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
441{
442 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
443 int i;
444
445 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
446 if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
447 esw_attr->dests[i].mdev))
448 return true;
449 return false;
450}
451
452static int
453esw_setup_indir_table(struct mlx5_flow_destination *dest,
454 struct mlx5_flow_act *flow_act,
455 struct mlx5_eswitch *esw,
456 struct mlx5_flow_attr *attr,
457 struct mlx5_flow_spec *spec,
458 bool ignore_flow_lvl,
459 int *i)
460{
461 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
462 int j, err;
463
464 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
465 return -EOPNOTSUPP;
466
467 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
468 if (ignore_flow_lvl)
469 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
470 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
471
472 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
473 esw_attr->dests[j].rep->vport, false);
474 if (IS_ERR(dest[*i].ft)) {
475 err = PTR_ERR(dest[*i].ft);
476 goto err_indir_tbl_get;
477 }
478 }
479
480 if (mlx5_esw_indir_table_decap_vport(attr)) {
481 err = esw_setup_decap_indir(esw, attr, spec);
482 if (err)
483 goto err_indir_tbl_get;
484 }
485
486 return 0;
487
488err_indir_tbl_get:
489 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
490 return err;
491}
492
493static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
494{
495 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
496
497 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
498 esw_cleanup_decap_indir(esw, attr);
499}
500
9e51c0a6
VB
501static void
502esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
503{
504 mlx5_chains_put_table(chains, chain, prio, level);
505}
506
507static void
508esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
509 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
510 int attr_idx, int dest_idx, bool pkt_reformat)
511{
512 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
513 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
514 dest[dest_idx].vport.vhca_id =
515 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
516 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
517 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
518 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
519 if (pkt_reformat) {
520 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
521 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
522 }
523 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
524 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
525 }
526}
527
528static int
529esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
530 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
531 int i)
532{
533 int j;
534
535 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
536 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
537 return i;
538}
539
540static int
541esw_setup_dests(struct mlx5_flow_destination *dest,
542 struct mlx5_flow_act *flow_act,
543 struct mlx5_eswitch *esw,
544 struct mlx5_flow_attr *attr,
10742efc 545 struct mlx5_flow_spec *spec,
9e51c0a6
VB
546 int *i)
547{
548 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
549 struct mlx5_fs_chains *chains = esw_chains(esw);
550 int err = 0;
551
10742efc
VB
552 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
553 MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
554 mlx5_eswitch_vport_match_metadata_enabled(esw))
555 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
556
9e51c0a6 557 if (attr->dest_ft) {
a508728a 558 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
9e51c0a6
VB
559 (*i)++;
560 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
561 esw_setup_slow_path_dest(dest, flow_act, chains, *i);
562 (*i)++;
563 } else if (attr->dest_chain) {
564 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
565 1, 0, *i);
566 (*i)++;
a508728a
VB
567 } else if (esw_is_indir_table(esw, attr)) {
568 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
10742efc
VB
569 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
570 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
9e51c0a6
VB
571 } else {
572 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
573 }
574
575 return err;
576}
577
578static void
579esw_cleanup_dests(struct mlx5_eswitch *esw,
580 struct mlx5_flow_attr *attr)
581{
10742efc 582 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
9e51c0a6
VB
583 struct mlx5_fs_chains *chains = esw_chains(esw);
584
a508728a
VB
585 if (attr->dest_ft) {
586 esw_cleanup_decap_indir(esw, attr);
587 } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
10742efc
VB
588 if (attr->dest_chain)
589 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
a508728a
VB
590 else if (esw_is_indir_table(esw, attr))
591 esw_cleanup_indir_table(esw, attr);
10742efc
VB
592 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
593 esw_cleanup_chain_src_port_rewrite(esw, attr);
594 }
9e51c0a6
VB
595}
596
74491de9 597struct mlx5_flow_handle *
3d80d1a2
OG
598mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
599 struct mlx5_flow_spec *spec,
c620b772 600 struct mlx5_flow_attr *attr)
3d80d1a2 601{
592d3651 602 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 603 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 604 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 605 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
606 bool split = !!(esw_attr->split_count);
607 struct mlx5_vport_tbl_attr fwd_attr;
74491de9 608 struct mlx5_flow_handle *rule;
e52c2802 609 struct mlx5_flow_table *fdb;
9e51c0a6 610 int i = 0;
3d80d1a2 611
f6455de0 612 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
613 return ERR_PTR(-EOPNOTSUPP);
614
6acfbf38
OG
615 flow_act.action = attr->action;
616 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 617 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
618 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
619 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
620 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
c620b772
AL
621 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
622 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
623 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
cc495188 624 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
c620b772
AL
625 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
626 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
627 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
cc495188 628 }
6acfbf38 629 }
776b12b6 630
10742efc
VB
631 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
632
66958ed9 633 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
9e51c0a6
VB
634 int err;
635
10742efc 636 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
9e51c0a6
VB
637 if (err) {
638 rule = ERR_PTR(err);
639 goto err_create_goto_table;
56e858df 640 }
e37a79e5 641 }
14e6b038 642
c620b772
AL
643 if (esw_attr->decap_pkt_reformat)
644 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
14e6b038 645
66958ed9 646 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 647 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 648 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 649 i++;
3d80d1a2
OG
650 }
651
93b3586e 652 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 653 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
654 if (attr->inner_match_level != MLX5_MATCH_NONE)
655 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 656
aa24670e 657 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 658 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 659
96e32687 660 if (split) {
c620b772
AL
661 fwd_attr.chain = attr->chain;
662 fwd_attr.prio = attr->prio;
663 fwd_attr.vport = esw_attr->in_rep->vport;
664
665 fdb = esw_vport_tbl_get(esw, &fwd_attr);
96e32687 666 } else {
d18296ff 667 if (attr->chain || attr->prio)
ae430332
AL
668 fdb = mlx5_chains_get_table(chains, attr->chain,
669 attr->prio, 0);
d18296ff 670 else
c620b772 671 fdb = attr->ft;
6fb0701a
PB
672
673 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
a508728a 674 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
b055ecf5
MB
675 esw_attr->in_mdev->priv.eswitch,
676 esw_attr->in_rep->vport);
96e32687 677 }
e52c2802
PB
678 if (IS_ERR(fdb)) {
679 rule = ERR_CAST(fdb);
680 goto err_esw_get;
681 }
682
84be2fda 683 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
c620b772 684 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
10caabda 685 &flow_act, dest, i);
84be2fda 686 else
10caabda 687 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 688 if (IS_ERR(rule))
e52c2802 689 goto err_add_rule;
375f51e2 690 else
525e84be 691 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 692
e52c2802
PB
693 return rule;
694
695err_add_rule:
96e32687 696 if (split)
c620b772 697 esw_vport_tbl_put(esw, &fwd_attr);
d18296ff 698 else if (attr->chain || attr->prio)
ae430332 699 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 700err_esw_get:
9e51c0a6 701 esw_cleanup_dests(esw, attr);
e52c2802 702err_create_goto_table:
aa0cbbae 703 return rule;
3d80d1a2
OG
704}
705
e4ad91f2
CM
706struct mlx5_flow_handle *
707mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
708 struct mlx5_flow_spec *spec,
c620b772 709 struct mlx5_flow_attr *attr)
e4ad91f2
CM
710{
711 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 712 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 713 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 714 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772 715 struct mlx5_vport_tbl_attr fwd_attr;
e52c2802
PB
716 struct mlx5_flow_table *fast_fdb;
717 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 718 struct mlx5_flow_handle *rule;
10742efc 719 int i, err = 0;
e4ad91f2 720
ae430332 721 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
e52c2802
PB
722 if (IS_ERR(fast_fdb)) {
723 rule = ERR_CAST(fast_fdb);
724 goto err_get_fast;
725 }
726
c620b772
AL
727 fwd_attr.chain = attr->chain;
728 fwd_attr.prio = attr->prio;
729 fwd_attr.vport = esw_attr->in_rep->vport;
730 fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
e52c2802
PB
731 if (IS_ERR(fwd_fdb)) {
732 rule = ERR_CAST(fwd_fdb);
733 goto err_get_fwd;
734 }
735
e4ad91f2 736 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10742efc 737 for (i = 0; i < esw_attr->split_count; i++) {
a508728a
VB
738 if (esw_is_indir_table(esw, attr))
739 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
740 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
10742efc
VB
741 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
742 &i);
743 else
744 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
745
746 if (err) {
747 rule = ERR_PTR(err);
748 goto err_chain_src_rewrite;
749 }
750 }
e4ad91f2 751 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
873d2f12 752 dest[i].ft = fwd_fdb;
e4ad91f2
CM
753 i++;
754
a508728a 755 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
b055ecf5
MB
756 esw_attr->in_mdev->priv.eswitch,
757 esw_attr->in_rep->vport);
e4ad91f2 758
93b3586e 759 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 760 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 761
278d51f2 762 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 763 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 764
10742efc
VB
765 if (IS_ERR(rule)) {
766 i = esw_attr->split_count;
767 goto err_chain_src_rewrite;
768 }
e4ad91f2 769
525e84be 770 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
771
772 return rule;
10742efc
VB
773err_chain_src_rewrite:
774 esw_put_dest_tables_loop(esw, attr, 0, i);
c620b772 775 esw_vport_tbl_put(esw, &fwd_attr);
e52c2802 776err_get_fwd:
ae430332 777 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 778err_get_fast:
e4ad91f2
CM
779 return rule;
780}
781
e52c2802
PB
782static void
783__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
784 struct mlx5_flow_handle *rule,
c620b772 785 struct mlx5_flow_attr *attr,
e52c2802
PB
786 bool fwd_rule)
787{
c620b772 788 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 789 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
790 bool split = (esw_attr->split_count > 0);
791 struct mlx5_vport_tbl_attr fwd_attr;
10caabda 792 int i;
e52c2802
PB
793
794 mlx5_del_flow_rules(rule);
10caabda 795
84be2fda 796 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
d8a2034f
EC
797 /* unref the term table */
798 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
c620b772
AL
799 if (esw_attr->dests[i].termtbl)
800 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
d8a2034f 801 }
10caabda
OS
802 }
803
525e84be 804 atomic64_dec(&esw->offloads.num_flows);
e52c2802 805
c620b772
AL
806 if (fwd_rule || split) {
807 fwd_attr.chain = attr->chain;
808 fwd_attr.prio = attr->prio;
809 fwd_attr.vport = esw_attr->in_rep->vport;
810 }
811
e52c2802 812 if (fwd_rule) {
c620b772 813 esw_vport_tbl_put(esw, &fwd_attr);
ae430332 814 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
10742efc 815 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
e52c2802 816 } else {
96e32687 817 if (split)
c620b772 818 esw_vport_tbl_put(esw, &fwd_attr);
d18296ff 819 else if (attr->chain || attr->prio)
ae430332 820 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
9e51c0a6 821 esw_cleanup_dests(esw, attr);
e52c2802
PB
822 }
823}
824
d85cdccb
OG
825void
826mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
827 struct mlx5_flow_handle *rule,
c620b772 828 struct mlx5_flow_attr *attr)
d85cdccb 829{
e52c2802 830 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
831}
832
48265006
OG
833void
834mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
835 struct mlx5_flow_handle *rule,
c620b772 836 struct mlx5_flow_attr *attr)
48265006 837{
e52c2802 838 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
839}
840
f5f82476
OG
841static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
842{
843 struct mlx5_eswitch_rep *rep;
411ec9e0 844 int i, err = 0;
f5f82476
OG
845
846 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 847 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 848 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
849 continue;
850
851 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
852 if (err)
853 goto out;
854 }
855
856out:
857 return err;
858}
859
860static struct mlx5_eswitch_rep *
861esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
862{
863 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
864
865 in_rep = attr->in_rep;
df65a573 866 out_rep = attr->dests[0].rep;
f5f82476
OG
867
868 if (push)
869 vport = in_rep;
870 else if (pop)
871 vport = out_rep;
872 else
873 vport = in_rep;
874
875 return vport;
876}
877
878static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
879 bool push, bool pop, bool fwd)
880{
881 struct mlx5_eswitch_rep *in_rep, *out_rep;
882
883 if ((push || pop) && !fwd)
884 goto out_notsupp;
885
886 in_rep = attr->in_rep;
df65a573 887 out_rep = attr->dests[0].rep;
f5f82476 888
b05af6aa 889 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
890 goto out_notsupp;
891
b05af6aa 892 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
893 goto out_notsupp;
894
895 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
896 if (!push && !pop && fwd)
b05af6aa 897 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
898 goto out_notsupp;
899
900 /* protects against (1) setting rules with different vlans to push and
901 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
902 */
1482bd3d 903 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
904 goto out_notsupp;
905
906 return 0;
907
908out_notsupp:
9eb78923 909 return -EOPNOTSUPP;
f5f82476
OG
910}
911
912int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
c620b772 913 struct mlx5_flow_attr *attr)
f5f82476
OG
914{
915 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
c620b772 916 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
f5f82476
OG
917 struct mlx5_eswitch_rep *vport = NULL;
918 bool push, pop, fwd;
919 int err = 0;
920
6acfbf38 921 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 922 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
923 return 0;
924
f5f82476
OG
925 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
926 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
927 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
928 !attr->dest_chain);
f5f82476 929
0e18134f
VB
930 mutex_lock(&esw->state_lock);
931
c620b772 932 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
f5f82476 933 if (err)
0e18134f 934 goto unlock;
f5f82476 935
39ac237c 936 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476 937
c620b772 938 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
f5f82476
OG
939
940 if (!push && !pop && fwd) {
941 /* tracks VF --> wire rules without vlan push action */
c620b772 942 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 943 vport->vlan_refcount++;
39ac237c 944 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
945 }
946
0e18134f 947 goto unlock;
f5f82476
OG
948 }
949
950 if (!push && !pop)
0e18134f 951 goto unlock;
f5f82476
OG
952
953 if (!(offloads->vlan_push_pop_refcount)) {
954 /* it's the 1st vlan rule, apply global vlan pop policy */
955 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
956 if (err)
957 goto out;
958 }
959 offloads->vlan_push_pop_refcount++;
960
961 if (push) {
962 if (vport->vlan_refcount)
963 goto skip_set_push;
964
c620b772
AL
965 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
966 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
f5f82476
OG
967 if (err)
968 goto out;
c620b772 969 vport->vlan = esw_attr->vlan_vid[0];
f5f82476
OG
970skip_set_push:
971 vport->vlan_refcount++;
972 }
973out:
974 if (!err)
39ac237c 975 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
976unlock:
977 mutex_unlock(&esw->state_lock);
f5f82476
OG
978 return err;
979}
980
981int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
c620b772 982 struct mlx5_flow_attr *attr)
f5f82476
OG
983{
984 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
c620b772 985 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
f5f82476
OG
986 struct mlx5_eswitch_rep *vport = NULL;
987 bool push, pop, fwd;
988 int err = 0;
989
6acfbf38 990 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 991 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
992 return 0;
993
39ac237c 994 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
995 return 0;
996
997 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
998 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
999 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
1000
0e18134f
VB
1001 mutex_lock(&esw->state_lock);
1002
c620b772 1003 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
f5f82476
OG
1004
1005 if (!push && !pop && fwd) {
1006 /* tracks VF --> wire rules without vlan push action */
c620b772 1007 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
1008 vport->vlan_refcount--;
1009
0e18134f 1010 goto out;
f5f82476
OG
1011 }
1012
1013 if (push) {
1014 vport->vlan_refcount--;
1015 if (vport->vlan_refcount)
1016 goto skip_unset_push;
1017
1018 vport->vlan = 0;
1019 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
1020 0, 0, SET_VLAN_STRIP);
1021 if (err)
1022 goto out;
1023 }
1024
1025skip_unset_push:
1026 offloads->vlan_push_pop_refcount--;
1027 if (offloads->vlan_push_pop_refcount)
0e18134f 1028 goto out;
f5f82476
OG
1029
1030 /* no more vlan rules, stop global vlan pop policy */
1031 err = esw_set_global_vlan_pop(esw, 0);
1032
1033out:
0e18134f 1034 mutex_unlock(&esw->state_lock);
f5f82476
OG
1035 return err;
1036}
1037
f7a68945 1038struct mlx5_flow_handle *
02f3afd9
PP
1039mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
1040 u32 sqn)
ab22be9b 1041{
66958ed9 1042 struct mlx5_flow_act flow_act = {0};
4c5009c5 1043 struct mlx5_flow_destination dest = {};
74491de9 1044 struct mlx5_flow_handle *flow_rule;
c5bb1730 1045 struct mlx5_flow_spec *spec;
ab22be9b
OG
1046 void *misc;
1047
1b9a07ee 1048 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1049 if (!spec) {
ab22be9b
OG
1050 flow_rule = ERR_PTR(-ENOMEM);
1051 goto out;
1052 }
1053
c5bb1730 1054 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 1055 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
1056 /* source vport is the esw manager */
1057 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 1058
c5bb1730 1059 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
1060 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
1061 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1062
c5bb1730 1063 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 1064 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 1065 dest.vport.num = vport;
66958ed9 1066 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 1067
39ac237c
PB
1068 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1069 spec, &flow_act, &dest, 1);
ab22be9b
OG
1070 if (IS_ERR(flow_rule))
1071 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
1072out:
c5bb1730 1073 kvfree(spec);
ab22be9b
OG
1074 return flow_rule;
1075}
57cbd893 1076EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 1077
159fe639
MB
1078void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
1079{
1080 mlx5_del_flow_rules(rule);
1081}
1082
8e404fef
VB
1083static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1084{
1085 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
1086 int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
1087
1088 if (!num_vfs || !flows)
1089 return;
1090
1091 mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
1092 mlx5_del_flow_rules(flows[i++]);
1093
1094 kvfree(flows);
1095}
1096
1097static int
1098mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1099{
1100 int num_vfs, vport_num, rule_idx = 0, err = 0;
1101 struct mlx5_flow_destination dest = {};
1102 struct mlx5_flow_act flow_act = {0};
1103 struct mlx5_flow_handle *flow_rule;
1104 struct mlx5_flow_handle **flows;
1105 struct mlx5_flow_spec *spec;
1106
1107 num_vfs = esw->esw_funcs.num_vfs;
1108 flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
1109 if (!flows)
1110 return -ENOMEM;
1111
1112 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1113 if (!spec) {
1114 err = -ENOMEM;
1115 goto alloc_err;
1116 }
1117
1118 MLX5_SET(fte_match_param, spec->match_criteria,
1119 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1120 MLX5_SET(fte_match_param, spec->match_criteria,
1121 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1122 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1123 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1124
1125 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1126 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1127 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1128
1129 mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
1130 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1131 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1132 dest.vport.num = vport_num;
1133
1134 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1135 spec, &flow_act, &dest, 1);
1136 if (IS_ERR(flow_rule)) {
1137 err = PTR_ERR(flow_rule);
1138 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
1139 rule_idx, PTR_ERR(flow_rule));
1140 goto rule_err;
1141 }
1142 flows[rule_idx++] = flow_rule;
1143 }
1144
1145 esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
1146 kvfree(spec);
1147 return 0;
1148
1149rule_err:
1150 while (--rule_idx >= 0)
1151 mlx5_del_flow_rules(flows[rule_idx]);
1152 kvfree(spec);
1153alloc_err:
1154 kvfree(flows);
1155 return err;
1156}
1157
5b7cb745
PB
1158static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1159{
1160 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1161 MLX5_FDB_TO_VPORT_REG_C_1;
1162}
1163
332bd3a5 1164static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
1165{
1166 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
1167 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1168 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 1169 u8 curr, wanted;
c1286050
JL
1170 int err;
1171
5b7cb745
PB
1172 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1173 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 1174 return 0;
c1286050 1175
e08a6832
LR
1176 MLX5_SET(query_esw_vport_context_in, in, opcode,
1177 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1178 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
1179 if (err)
1180 return err;
1181
5b7cb745
PB
1182 curr = MLX5_GET(query_esw_vport_context_out, out,
1183 esw_vport_context.fdb_to_vport_reg_c_id);
1184 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1185 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1186 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 1187
332bd3a5 1188 if (enable)
5b7cb745 1189 curr |= wanted;
332bd3a5 1190 else
5b7cb745 1191 curr &= ~wanted;
c1286050 1192
e08a6832 1193 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 1194 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 1195 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
1196 field_select.fdb_to_vport_reg_c_id, 1);
1197
e08a6832 1198 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
1199 if (!err) {
1200 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1201 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1202 else
1203 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1204 }
1205
1206 return err;
c1286050
JL
1207}
1208
a5641cb5
JL
1209static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1210 struct mlx5_core_dev *peer_dev,
ac004b83
RD
1211 struct mlx5_flow_spec *spec,
1212 struct mlx5_flow_destination *dest)
1213{
a5641cb5 1214 void *misc;
ac004b83 1215
a5641cb5
JL
1216 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1217 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1218 misc_parameters_2);
0f0d3827
PB
1219 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1220 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 1221
a5641cb5
JL
1222 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1223 } else {
1224 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1225 misc_parameters);
ac004b83 1226
a5641cb5
JL
1227 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1228 MLX5_CAP_GEN(peer_dev, vhca_id));
1229
1230 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1231
1232 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1233 misc_parameters);
1234 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1235 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1236 source_eswitch_owner_vhca_id);
1237 }
ac004b83
RD
1238
1239 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1240 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 1241 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 1242 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
1243}
1244
a5641cb5
JL
1245static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1246 struct mlx5_eswitch *peer_esw,
1247 struct mlx5_flow_spec *spec,
1248 u16 vport)
1249{
1250 void *misc;
1251
1252 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1253 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1254 misc_parameters_2);
1255 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1256 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1257 vport));
1258 } else {
1259 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1260 misc_parameters);
1261 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1262 }
1263}
1264
ac004b83
RD
1265static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1266 struct mlx5_core_dev *peer_dev)
1267{
1268 struct mlx5_flow_destination dest = {};
1269 struct mlx5_flow_act flow_act = {0};
1270 struct mlx5_flow_handle **flows;
1271 struct mlx5_flow_handle *flow;
1272 struct mlx5_flow_spec *spec;
1273 /* total vports is the same for both e-switches */
1274 int nvports = esw->total_vports;
1275 void *misc;
1276 int err, i;
1277
1278 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1279 if (!spec)
1280 return -ENOMEM;
1281
a5641cb5 1282 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
1283
1284 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
1285 if (!flows) {
1286 err = -ENOMEM;
1287 goto alloc_flows_err;
1288 }
1289
1290 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1291 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1292 misc_parameters);
1293
81cd229c 1294 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
1295 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1296 spec, MLX5_VPORT_PF);
1297
81cd229c
BW
1298 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1299 spec, &flow_act, &dest, 1);
1300 if (IS_ERR(flow)) {
1301 err = PTR_ERR(flow);
1302 goto add_pf_flow_err;
1303 }
1304 flows[MLX5_VPORT_PF] = flow;
1305 }
1306
1307 if (mlx5_ecpf_vport_exists(esw->dev)) {
1308 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1309 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1310 spec, &flow_act, &dest, 1);
1311 if (IS_ERR(flow)) {
1312 err = PTR_ERR(flow);
1313 goto add_ecpf_flow_err;
1314 }
1315 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
1316 }
1317
786ef904 1318 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
1319 esw_set_peer_miss_rule_source_port(esw,
1320 peer_dev->priv.eswitch,
1321 spec, i);
1322
ac004b83
RD
1323 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1324 spec, &flow_act, &dest, 1);
1325 if (IS_ERR(flow)) {
1326 err = PTR_ERR(flow);
81cd229c 1327 goto add_vf_flow_err;
ac004b83
RD
1328 }
1329 flows[i] = flow;
1330 }
1331
1332 esw->fdb_table.offloads.peer_miss_rules = flows;
1333
1334 kvfree(spec);
1335 return 0;
1336
81cd229c 1337add_vf_flow_err:
879c8f84 1338 nvports = --i;
786ef904 1339 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 1340 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
1341
1342 if (mlx5_ecpf_vport_exists(esw->dev))
1343 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1344add_ecpf_flow_err:
1345 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1346 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1347add_pf_flow_err:
1348 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
1349 kvfree(flows);
1350alloc_flows_err:
1351 kvfree(spec);
1352 return err;
1353}
1354
1355static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1356{
1357 struct mlx5_flow_handle **flows;
1358 int i;
1359
1360 flows = esw->fdb_table.offloads.peer_miss_rules;
1361
786ef904
PP
1362 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
1363 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
1364 mlx5_del_flow_rules(flows[i]);
1365
81cd229c
BW
1366 if (mlx5_ecpf_vport_exists(esw->dev))
1367 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1368
1369 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1370 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1371
ac004b83
RD
1372 kvfree(flows);
1373}
1374
3aa33572
OG
1375static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1376{
66958ed9 1377 struct mlx5_flow_act flow_act = {0};
4c5009c5 1378 struct mlx5_flow_destination dest = {};
74491de9 1379 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 1380 struct mlx5_flow_spec *spec;
f80be543
MB
1381 void *headers_c;
1382 void *headers_v;
3aa33572 1383 int err = 0;
f80be543
MB
1384 u8 *dmac_c;
1385 u8 *dmac_v;
3aa33572 1386
1b9a07ee 1387 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1388 if (!spec) {
3aa33572
OG
1389 err = -ENOMEM;
1390 goto out;
1391 }
1392
f80be543
MB
1393 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1394 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1395 outer_headers);
1396 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1397 outer_headers.dmac_47_16);
1398 dmac_c[0] = 0x01;
1399
3aa33572 1400 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1401 dest.vport.num = esw->manager_vport;
66958ed9 1402 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1403
39ac237c
PB
1404 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1405 spec, &flow_act, &dest, 1);
3aa33572
OG
1406 if (IS_ERR(flow_rule)) {
1407 err = PTR_ERR(flow_rule);
f80be543 1408 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1409 goto out;
1410 }
1411
f80be543
MB
1412 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1413
1414 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1415 outer_headers);
1416 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1417 outer_headers.dmac_47_16);
1418 dmac_v[0] = 0x01;
39ac237c
PB
1419 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1420 spec, &flow_act, &dest, 1);
f80be543
MB
1421 if (IS_ERR(flow_rule)) {
1422 err = PTR_ERR(flow_rule);
1423 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1424 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1425 goto out;
1426 }
1427
1428 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1429
3aa33572 1430out:
c5bb1730 1431 kvfree(spec);
3aa33572
OG
1432 return err;
1433}
1434
11b717d6
PB
1435struct mlx5_flow_handle *
1436esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1437{
1438 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1439 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1440 struct mlx5_flow_context *flow_context;
1441 struct mlx5_flow_handle *flow_rule;
1442 struct mlx5_flow_destination dest;
1443 struct mlx5_flow_spec *spec;
1444 void *misc;
1445
60acc105
PB
1446 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1447 return ERR_PTR(-EOPNOTSUPP);
1448
11b717d6
PB
1449 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1450 if (!spec)
1451 return ERR_PTR(-ENOMEM);
1452
1453 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1454 misc_parameters_2);
1455 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1456 ESW_CHAIN_TAG_METADATA_MASK);
1457 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1458 misc_parameters_2);
1459 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1460 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1461 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1462 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1463 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1464
1465 flow_context = &spec->flow_context;
1466 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1467 flow_context->flow_tag = tag;
1468 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1469 dest.ft = esw->offloads.ft_offloads;
1470
1471 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1472 kfree(spec);
1473
1474 if (IS_ERR(flow_rule))
1475 esw_warn(esw->dev,
1476 "Failed to create restore rule for tag: %d, err(%d)\n",
1477 tag, (int)PTR_ERR(flow_rule));
1478
1479 return flow_rule;
1480}
1481
1482u32
1483esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1484{
1485 return ESW_CHAIN_TAG_METADATA_MASK;
1486}
1487
1967ce6e 1488#define MAX_PF_SQ 256
cd3d07e7 1489#define MAX_SQ_NVPORTS 32
1967ce6e 1490
a5641cb5
JL
1491static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1492 u32 *flow_group_in)
1493{
1494 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1495 flow_group_in,
1496 match_criteria);
1497
1498 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1499 MLX5_SET(create_flow_group_in, flow_group_in,
1500 match_criteria_enable,
1501 MLX5_MATCH_MISC_PARAMETERS_2);
1502
0f0d3827
PB
1503 MLX5_SET(fte_match_param, match_criteria,
1504 misc_parameters_2.metadata_reg_c_0,
1505 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1506 } else {
1507 MLX5_SET(create_flow_group_in, flow_group_in,
1508 match_criteria_enable,
1509 MLX5_MATCH_MISC_PARAMETERS);
1510
1511 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1512 misc_parameters.source_port);
1513 }
1514}
1515
ae430332
AL
1516#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1517#define fdb_modify_header_fwd_to_table_supported(esw) \
1518 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1519static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1520{
1521 struct mlx5_core_dev *dev = esw->dev;
1522
1523 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1524 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1525
1526 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1527 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1528 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1529 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1530 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1531 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1532 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1533 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1534 /* Disabled when ttl workaround is needed, e.g
1535 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1536 */
1537 esw_warn(dev,
1538 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1539 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1540 } else {
1541 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1542 esw_info(dev, "Supported tc chains and prios offload\n");
1543 }
1544
1545 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1546 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1547}
1548
1549static int
1550esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1551{
1552 struct mlx5_core_dev *dev = esw->dev;
1553 struct mlx5_flow_table *nf_ft, *ft;
1554 struct mlx5_chains_attr attr = {};
1555 struct mlx5_fs_chains *chains;
1556 u32 fdb_max;
1557 int err;
1558
1559 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1560
1561 esw_init_chains_offload_flags(esw, &attr.flags);
1562 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1563 attr.max_ft_sz = fdb_max;
1564 attr.max_grp_num = esw->params.large_group_num;
1565 attr.default_ft = miss_fdb;
1566 attr.max_restore_tag = esw_get_max_restore_tag(esw);
1567
1568 chains = mlx5_chains_create(dev, &attr);
1569 if (IS_ERR(chains)) {
1570 err = PTR_ERR(chains);
1571 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1572 return err;
1573 }
1574
1575 esw->fdb_table.offloads.esw_chains_priv = chains;
1576
1577 /* Create tc_end_ft which is the always created ft chain */
1578 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1579 1, 0);
1580 if (IS_ERR(nf_ft)) {
1581 err = PTR_ERR(nf_ft);
1582 goto nf_ft_err;
1583 }
1584
1585 /* Always open the root for fast path */
1586 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1587 if (IS_ERR(ft)) {
1588 err = PTR_ERR(ft);
1589 goto level_0_err;
1590 }
1591
1592 /* Open level 1 for split fdb rules now if prios isn't supported */
1593 if (!mlx5_chains_prios_supported(chains)) {
1594 err = mlx5_esw_vport_tbl_get(esw);
1595 if (err)
1596 goto level_1_err;
1597 }
1598
1599 mlx5_chains_set_end_ft(chains, nf_ft);
1600
1601 return 0;
1602
1603level_1_err:
1604 mlx5_chains_put_table(chains, 0, 1, 0);
1605level_0_err:
1606 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1607nf_ft_err:
1608 mlx5_chains_destroy(chains);
1609 esw->fdb_table.offloads.esw_chains_priv = NULL;
1610
1611 return err;
1612}
1613
1614static void
1615esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1616{
1617 if (!mlx5_chains_prios_supported(chains))
1618 mlx5_esw_vport_tbl_put(esw);
1619 mlx5_chains_put_table(chains, 0, 1, 0);
1620 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1621 mlx5_chains_destroy(chains);
1622}
1623
1624#else /* CONFIG_MLX5_CLS_ACT */
1625
1626static int
1627esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1628{ return 0; }
1629
1630static void
1631esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1632{}
1633
1634#endif
1635
0da3c12d 1636static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1967ce6e
OG
1637{
1638 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1639 struct mlx5_flow_table_attr ft_attr = {};
8e404fef 1640 int num_vfs, table_size, ix, err = 0;
1967ce6e
OG
1641 struct mlx5_core_dev *dev = esw->dev;
1642 struct mlx5_flow_namespace *root_ns;
1643 struct mlx5_flow_table *fdb = NULL;
39ac237c 1644 u32 flags = 0, *flow_group_in;
1967ce6e
OG
1645 struct mlx5_flow_group *g;
1646 void *match_criteria;
f80be543 1647 u8 *dmac;
1967ce6e
OG
1648
1649 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1650
1b9a07ee 1651 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1652 if (!flow_group_in)
1653 return -ENOMEM;
1654
1655 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1656 if (!root_ns) {
1657 esw_warn(dev, "Failed to get FDB flow namespace\n");
1658 err = -EOPNOTSUPP;
1659 goto ns_err;
1660 }
8463daf1
MG
1661 esw->fdb_table.offloads.ns = root_ns;
1662 err = mlx5_flow_namespace_set_mode(root_ns,
1663 esw->dev->priv.steering->mode);
1664 if (err) {
1665 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1666 goto ns_err;
1667 }
1967ce6e 1668
0da3c12d 1669 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
8e404fef 1670 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
b3ba5149 1671
e52c2802
PB
1672 /* create the slow path fdb with encap set, so further table instances
1673 * can be created at run time while VFs are probed if the FW allows that.
1674 */
1675 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1676 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1677 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1678
1679 ft_attr.flags = flags;
b3ba5149
ES
1680 ft_attr.max_fte = table_size;
1681 ft_attr.prio = FDB_SLOW_PATH;
1682
1683 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1684 if (IS_ERR(fdb)) {
1685 err = PTR_ERR(fdb);
1686 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1687 goto slow_fdb_err;
1688 }
52fff327 1689 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1690
ae430332 1691 err = esw_chains_create(esw, fdb);
39ac237c 1692 if (err) {
ae430332 1693 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
39ac237c 1694 goto fdb_chains_err;
e52c2802
PB
1695 }
1696
69697b6e 1697 /* create send-to-vport group */
69697b6e
OG
1698 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1699 MLX5_MATCH_MISC_PARAMETERS);
1700
1701 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1702
1703 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1704 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1705
0da3c12d 1706 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1707 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1708 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1709
1710 g = mlx5_create_flow_group(fdb, flow_group_in);
1711 if (IS_ERR(g)) {
1712 err = PTR_ERR(g);
1713 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1714 goto send_vport_err;
1715 }
1716 esw->fdb_table.offloads.send_to_vport_grp = g;
1717
8e404fef
VB
1718 /* meta send to vport */
1719 memset(flow_group_in, 0, inlen);
1720 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1721 MLX5_MATCH_MISC_PARAMETERS_2);
1722
1723 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1724
1725 MLX5_SET(fte_match_param, match_criteria,
1726 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1727 MLX5_SET(fte_match_param, match_criteria,
1728 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1729
1730 num_vfs = esw->esw_funcs.num_vfs;
1731 if (num_vfs) {
1732 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1733 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
1734 ix += num_vfs;
1735
1736 g = mlx5_create_flow_group(fdb, flow_group_in);
1737 if (IS_ERR(g)) {
1738 err = PTR_ERR(g);
1739 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
1740 err);
1741 goto send_vport_meta_err;
1742 }
1743 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1744
1745 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
1746 if (err)
1747 goto meta_rule_err;
1748 }
1749
6cec0229
MD
1750 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1751 /* create peer esw miss group */
1752 memset(flow_group_in, 0, inlen);
ac004b83 1753
6cec0229 1754 esw_set_flow_group_source_port(esw, flow_group_in);
a5641cb5 1755
6cec0229
MD
1756 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1757 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1758 flow_group_in,
1759 match_criteria);
ac004b83 1760
6cec0229
MD
1761 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1762 misc_parameters.source_eswitch_owner_vhca_id);
a5641cb5 1763
6cec0229
MD
1764 MLX5_SET(create_flow_group_in, flow_group_in,
1765 source_eswitch_owner_vhca_id_valid, 1);
1766 }
ac004b83 1767
6cec0229
MD
1768 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1769 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1770 ix + esw->total_vports - 1);
1771 ix += esw->total_vports;
ac004b83 1772
6cec0229
MD
1773 g = mlx5_create_flow_group(fdb, flow_group_in);
1774 if (IS_ERR(g)) {
1775 err = PTR_ERR(g);
1776 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1777 goto peer_miss_err;
1778 }
1779 esw->fdb_table.offloads.peer_miss_grp = g;
ac004b83 1780 }
ac004b83 1781
69697b6e
OG
1782 /* create miss group */
1783 memset(flow_group_in, 0, inlen);
f80be543
MB
1784 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1785 MLX5_MATCH_OUTER_HEADERS);
1786 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1787 match_criteria);
1788 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1789 outer_headers.dmac_47_16);
1790 dmac[0] = 0x01;
69697b6e
OG
1791
1792 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1793 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1794 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1795
1796 g = mlx5_create_flow_group(fdb, flow_group_in);
1797 if (IS_ERR(g)) {
1798 err = PTR_ERR(g);
1799 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1800 goto miss_err;
1801 }
1802 esw->fdb_table.offloads.miss_grp = g;
1803
3aa33572
OG
1804 err = esw_add_fdb_miss_rule(esw);
1805 if (err)
1806 goto miss_rule_err;
1807
c88a026e 1808 kvfree(flow_group_in);
69697b6e
OG
1809 return 0;
1810
3aa33572
OG
1811miss_rule_err:
1812 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1813miss_err:
6cec0229
MD
1814 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1815 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
ac004b83 1816peer_miss_err:
8e404fef
VB
1817 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1818meta_rule_err:
1819 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1820 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1821send_vport_meta_err:
69697b6e
OG
1822 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1823send_vport_err:
ae430332 1824 esw_chains_destroy(esw, esw_chains(esw));
39ac237c 1825fdb_chains_err:
52fff327 1826 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1827slow_fdb_err:
8463daf1
MG
1828 /* Holds true only as long as DMFS is the default */
1829 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1830ns_err:
1831 kvfree(flow_group_in);
1832 return err;
1833}
1834
1967ce6e 1835static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1836{
e52c2802 1837 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1838 return;
1839
1967ce6e 1840 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1841 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1842 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
8e404fef 1843 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
69697b6e 1844 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
8e404fef
VB
1845 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1846 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
6cec0229
MD
1847 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1848 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1849 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1850
ae430332
AL
1851 esw_chains_destroy(esw, esw_chains(esw));
1852
52fff327 1853 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1854 /* Holds true only as long as DMFS is the default */
1855 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1856 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1857}
c116c6ee 1858
8d6bd3c3 1859static int esw_create_offloads_table(struct mlx5_eswitch *esw)
c116c6ee 1860{
b3ba5149 1861 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1862 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1863 struct mlx5_flow_table *ft_offloads;
1864 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1865 int err = 0;
1866
1867 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1868 if (!ns) {
1869 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1870 return -EOPNOTSUPP;
c116c6ee
OG
1871 }
1872
8d6bd3c3 1873 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
11b717d6 1874 ft_attr.prio = 1;
b3ba5149
ES
1875
1876 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1877 if (IS_ERR(ft_offloads)) {
1878 err = PTR_ERR(ft_offloads);
1879 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1880 return err;
1881 }
1882
1883 esw->offloads.ft_offloads = ft_offloads;
1884 return 0;
1885}
1886
1887static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1888{
1889 struct mlx5_esw_offload *offloads = &esw->offloads;
1890
1891 mlx5_destroy_flow_table(offloads->ft_offloads);
1892}
fed9ce22 1893
8d6bd3c3 1894static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
fed9ce22
OG
1895{
1896 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1897 struct mlx5_flow_group *g;
fed9ce22 1898 u32 *flow_group_in;
8d6bd3c3 1899 int nvports;
fed9ce22 1900 int err = 0;
fed9ce22 1901
8d6bd3c3 1902 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1903 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1904 if (!flow_group_in)
1905 return -ENOMEM;
1906
1907 /* create vport rx group */
a5641cb5 1908 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1909
1910 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1911 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1912
1913 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1914
1915 if (IS_ERR(g)) {
1916 err = PTR_ERR(g);
1917 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1918 goto out;
1919 }
1920
1921 esw->offloads.vport_rx_group = g;
1922out:
e574978a 1923 kvfree(flow_group_in);
fed9ce22
OG
1924 return err;
1925}
1926
1927static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1928{
1929 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1930}
1931
74491de9 1932struct mlx5_flow_handle *
02f3afd9 1933mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1934 struct mlx5_flow_destination *dest)
fed9ce22 1935{
66958ed9 1936 struct mlx5_flow_act flow_act = {0};
74491de9 1937 struct mlx5_flow_handle *flow_rule;
c5bb1730 1938 struct mlx5_flow_spec *spec;
fed9ce22
OG
1939 void *misc;
1940
1b9a07ee 1941 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1942 if (!spec) {
fed9ce22
OG
1943 flow_rule = ERR_PTR(-ENOMEM);
1944 goto out;
1945 }
1946
a5641cb5
JL
1947 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1948 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1949 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1950 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1951
a5641cb5 1952 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1953 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1954 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1955
a5641cb5
JL
1956 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1957 } else {
1958 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1959 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1960
1961 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1962 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1963
1964 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1965 }
fed9ce22 1966
66958ed9 1967 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1968 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1969 &flow_act, dest, 1);
fed9ce22
OG
1970 if (IS_ERR(flow_rule)) {
1971 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1972 goto out;
1973 }
1974
1975out:
c5bb1730 1976 kvfree(spec);
fed9ce22
OG
1977 return flow_rule;
1978}
feae9087 1979
bf3347c4 1980
cc617ced
PP
1981static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1982{
1983 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1984 struct mlx5_core_dev *dev = esw->dev;
1985 int vport;
1986
1987 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1988 return -EOPNOTSUPP;
1989
1990 if (esw->mode == MLX5_ESWITCH_NONE)
1991 return -EOPNOTSUPP;
1992
1993 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1994 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1995 mlx5_mode = MLX5_INLINE_MODE_NONE;
1996 goto out;
1997 case MLX5_CAP_INLINE_MODE_L2:
1998 mlx5_mode = MLX5_INLINE_MODE_L2;
1999 goto out;
2000 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2001 goto query_vports;
2002 }
2003
2004query_vports:
2005 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2006 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2007 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2008 if (prev_mlx5_mode != mlx5_mode)
2009 return -EINVAL;
2010 prev_mlx5_mode = mlx5_mode;
2011 }
2012
2013out:
2014 *mode = mlx5_mode;
2015 return 0;
e08a6832 2016}
bf3347c4 2017
11b717d6
PB
2018static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2019{
2020 struct mlx5_esw_offload *offloads = &esw->offloads;
2021
60acc105
PB
2022 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2023 return;
2024
6724e66b 2025 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
2026 mlx5_destroy_flow_group(offloads->restore_group);
2027 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2028}
2029
2030static int esw_create_restore_table(struct mlx5_eswitch *esw)
2031{
d65dbedf 2032 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
2033 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2034 struct mlx5_flow_table_attr ft_attr = {};
2035 struct mlx5_core_dev *dev = esw->dev;
2036 struct mlx5_flow_namespace *ns;
6724e66b 2037 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
2038 void *match_criteria, *misc;
2039 struct mlx5_flow_table *ft;
2040 struct mlx5_flow_group *g;
2041 u32 *flow_group_in;
2042 int err = 0;
2043
60acc105
PB
2044 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2045 return 0;
2046
11b717d6
PB
2047 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2048 if (!ns) {
2049 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2050 return -EOPNOTSUPP;
2051 }
2052
2053 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2054 if (!flow_group_in) {
2055 err = -ENOMEM;
2056 goto out_free;
2057 }
2058
2059 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
2060 ft = mlx5_create_flow_table(ns, &ft_attr);
2061 if (IS_ERR(ft)) {
2062 err = PTR_ERR(ft);
2063 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2064 err);
2065 goto out_free;
2066 }
2067
11b717d6
PB
2068 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2069 match_criteria);
2070 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2071 misc_parameters_2);
2072
2073 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2074 ESW_CHAIN_TAG_METADATA_MASK);
2075 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2076 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2077 ft_attr.max_fte - 1);
2078 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2079 MLX5_MATCH_MISC_PARAMETERS_2);
2080 g = mlx5_create_flow_group(ft, flow_group_in);
2081 if (IS_ERR(g)) {
2082 err = PTR_ERR(g);
2083 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2084 err);
2085 goto err_group;
2086 }
2087
6724e66b
PB
2088 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2089 MLX5_SET(copy_action_in, modact, src_field,
2090 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2091 MLX5_SET(copy_action_in, modact, dst_field,
2092 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2093 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2094 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2095 modact);
2096 if (IS_ERR(mod_hdr)) {
e9864539 2097 err = PTR_ERR(mod_hdr);
6724e66b
PB
2098 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2099 err);
6724e66b
PB
2100 goto err_mod_hdr;
2101 }
2102
11b717d6
PB
2103 esw->offloads.ft_offloads_restore = ft;
2104 esw->offloads.restore_group = g;
6724e66b 2105 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 2106
c8508713
RD
2107 kvfree(flow_group_in);
2108
11b717d6
PB
2109 return 0;
2110
6724e66b
PB
2111err_mod_hdr:
2112 mlx5_destroy_flow_group(g);
11b717d6
PB
2113err_group:
2114 mlx5_destroy_flow_table(ft);
2115out_free:
2116 kvfree(flow_group_in);
2117
2118 return err;
cc617ced
PP
2119}
2120
db7ff19e
EB
2121static int esw_offloads_start(struct mlx5_eswitch *esw,
2122 struct netlink_ext_ack *extack)
c930a3ad 2123{
062f4bf4 2124 int err, err1;
c930a3ad 2125
8e0aa4bc
PP
2126 mlx5_eswitch_disable_locked(esw, false);
2127 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2128 esw->dev->priv.sriov.num_vfs);
6c419ba8 2129 if (err) {
8c98ee77
EB
2130 NL_SET_ERR_MSG_MOD(extack,
2131 "Failed setting eswitch to offloads");
8e0aa4bc
PP
2132 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2133 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2134 if (err1) {
2135 NL_SET_ERR_MSG_MOD(extack,
2136 "Failed setting eswitch back to legacy");
2137 }
6c419ba8 2138 }
bffaa916
RD
2139 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2140 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
2141 &esw->offloads.inline_mode)) {
2142 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
2143 NL_SET_ERR_MSG_MOD(extack,
2144 "Inline mode is different between vports");
bffaa916
RD
2145 }
2146 }
c930a3ad
OG
2147 return err;
2148}
2149
e8d31c4d
MB
2150void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2151{
2152 kfree(esw->offloads.vport_reps);
2153}
2154
2155int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2156{
2752b823 2157 int total_vports = esw->total_vports;
e8d31c4d 2158 struct mlx5_eswitch_rep *rep;
d6518db2 2159 int vport_index;
ef2e4094 2160 u8 rep_type;
e8d31c4d 2161
2aca1787 2162 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
2163 sizeof(struct mlx5_eswitch_rep),
2164 GFP_KERNEL);
2165 if (!esw->offloads.vport_reps)
2166 return -ENOMEM;
2167
d6518db2
BW
2168 mlx5_esw_for_all_reps(esw, vport_index, rep) {
2169 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 2170 rep->vport_index = vport_index;
f121e0ea
BW
2171
2172 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 2173 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 2174 REP_UNREGISTERED);
e8d31c4d
MB
2175 }
2176
e8d31c4d
MB
2177 return 0;
2178}
2179
c9b99abc
BW
2180static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2181 struct mlx5_eswitch_rep *rep, u8 rep_type)
2182{
8693115a 2183 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 2184 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 2185 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
2186}
2187
d7f33a45
VP
2188static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2189{
2190 struct mlx5_eswitch_rep *rep;
2191 int i;
2192
2193 mlx5_esw_for_each_sf_rep(esw, i, rep)
2194 __esw_offloads_unload_rep(esw, rep, rep_type);
2195}
2196
4110fc59 2197static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
2198{
2199 struct mlx5_eswitch_rep *rep;
4110fc59
BW
2200 int i;
2201
d7f33a45
VP
2202 __unload_reps_sf_vport(esw, rep_type);
2203
4110fc59
BW
2204 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
2205 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 2206
81cd229c
BW
2207 if (mlx5_ecpf_vport_exists(esw->dev)) {
2208 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2209 __esw_offloads_unload_rep(esw, rep, rep_type);
2210 }
2211
2212 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2213 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2214 __esw_offloads_unload_rep(esw, rep, rep_type);
2215 }
2216
879c8f84 2217 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 2218 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2219}
2220
d970812b 2221int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 2222{
c2d7712c
BW
2223 struct mlx5_eswitch_rep *rep;
2224 int rep_type;
a4b97ab4
MB
2225 int err;
2226
c2d7712c
BW
2227 rep = mlx5_eswitch_get_rep(esw, vport_num);
2228 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2229 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2230 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2231 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2232 if (err)
2233 goto err_reps;
2234 }
2235
2236 return 0;
a4b97ab4
MB
2237
2238err_reps:
c2d7712c
BW
2239 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2240 for (--rep_type; rep_type >= 0; rep_type--)
2241 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2242 return err;
2243}
2244
d970812b 2245void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
c2d7712c
BW
2246{
2247 struct mlx5_eswitch_rep *rep;
2248 int rep_type;
2249
c2d7712c
BW
2250 rep = mlx5_eswitch_get_rep(esw, vport_num);
2251 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2252 __esw_offloads_unload_rep(esw, rep, rep_type);
2253}
2254
38679b5a
PP
2255int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2256{
2257 int err;
2258
2259 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2260 return 0;
2261
c7eddc60
PP
2262 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2263 if (err)
2264 return err;
2265
38679b5a 2266 err = mlx5_esw_offloads_rep_load(esw, vport_num);
c7eddc60
PP
2267 if (err)
2268 goto load_err;
2269 return err;
2270
2271load_err:
2272 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2273 return err;
2274}
2275
2276void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2277{
2278 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2279 return;
2280
2281 mlx5_esw_offloads_rep_unload(esw, vport_num);
c7eddc60 2282 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2283}
2284
ac004b83
RD
2285#define ESW_OFFLOADS_DEVCOM_PAIR (0)
2286#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2287
2288static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2289 struct mlx5_eswitch *peer_esw)
2290{
2291 int err;
2292
2293 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2294 if (err)
2295 return err;
2296
2297 return 0;
2298}
2299
2300static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2301{
d956873f 2302#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 2303 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 2304#endif
ac004b83
RD
2305 esw_del_fdb_peer_miss_rules(esw);
2306}
2307
8463daf1
MG
2308static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2309 struct mlx5_eswitch *peer_esw,
2310 bool pair)
2311{
2312 struct mlx5_flow_root_namespace *peer_ns;
2313 struct mlx5_flow_root_namespace *ns;
2314 int err;
2315
2316 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2317 ns = esw->dev->priv.steering->fdb_root_ns;
2318
2319 if (pair) {
2320 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2321 if (err)
2322 return err;
2323
e53e6655 2324 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
2325 if (err) {
2326 mlx5_flow_namespace_set_peer(ns, NULL);
2327 return err;
2328 }
2329 } else {
2330 mlx5_flow_namespace_set_peer(ns, NULL);
2331 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2332 }
2333
2334 return 0;
2335}
2336
ac004b83
RD
2337static int mlx5_esw_offloads_devcom_event(int event,
2338 void *my_data,
2339 void *event_data)
2340{
2341 struct mlx5_eswitch *esw = my_data;
ac004b83 2342 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 2343 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
2344 int err;
2345
2346 switch (event) {
2347 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
2348 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2349 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2350 break;
2351
8463daf1 2352 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
2353 if (err)
2354 goto err_out;
8463daf1
MG
2355 err = mlx5_esw_offloads_pair(esw, peer_esw);
2356 if (err)
2357 goto err_peer;
ac004b83
RD
2358
2359 err = mlx5_esw_offloads_pair(peer_esw, esw);
2360 if (err)
2361 goto err_pair;
2362
2363 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2364 break;
2365
2366 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2367 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2368 break;
2369
2370 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2371 mlx5_esw_offloads_unpair(peer_esw);
2372 mlx5_esw_offloads_unpair(esw);
8463daf1 2373 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2374 break;
2375 }
2376
2377 return 0;
2378
2379err_pair:
2380 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
2381err_peer:
2382 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2383err_out:
2384 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2385 event, err);
2386 return err;
2387}
2388
2389static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2390{
2391 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2392
04de7dda
RD
2393 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2394 mutex_init(&esw->offloads.peer_mutex);
2395
ac004b83
RD
2396 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2397 return;
2398
2399 mlx5_devcom_register_component(devcom,
2400 MLX5_DEVCOM_ESW_OFFLOADS,
2401 mlx5_esw_offloads_devcom_event,
2402 esw);
2403
2404 mlx5_devcom_send_event(devcom,
2405 MLX5_DEVCOM_ESW_OFFLOADS,
2406 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2407}
2408
2409static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2410{
2411 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2412
2413 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2414 return;
2415
2416 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2417 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2418
2419 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2420}
2421
92ab1eb3
JL
2422static bool
2423esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2424{
2425 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2426 return false;
2427
2428 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2429 MLX5_FDB_TO_VPORT_REG_C_0))
2430 return false;
2431
2432 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2433 return false;
2434
2435 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2436 mlx5_ecpf_vport_exists(esw->dev))
2437 return false;
2438
2439 return true;
2440}
2441
133dcfc5
VP
2442u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2443{
7cd7becd 2444 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2445 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
2446 u32 pf_num;
133dcfc5
VP
2447 int id;
2448
7cd7becd 2449 /* Only 4 bits of pf_num */
2450 pf_num = PCI_FUNC(esw->dev->pdev->devfn);
2451 if (pf_num > max_pf_num)
2452 return 0;
133dcfc5 2453
7cd7becd 2454 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2455 /* Use only non-zero vport_id (1-4095) for all PF's */
2456 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2457 if (id < 0)
2458 return 0;
2459 id = (pf_num << ESW_VPORT_BITS) | id;
2460 return id;
133dcfc5
VP
2461}
2462
2463void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2464{
7cd7becd 2465 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2466
2467 /* Metadata contains only 12 bits of actual ida id */
2468 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
133dcfc5
VP
2469}
2470
2471static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2472 struct mlx5_vport *vport)
2473{
133dcfc5
VP
2474 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2475 vport->metadata = vport->default_metadata;
2476 return vport->metadata ? 0 : -ENOSPC;
2477}
2478
2479static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2480 struct mlx5_vport *vport)
2481{
406493a5 2482 if (!vport->default_metadata)
133dcfc5
VP
2483 return;
2484
2485 WARN_ON(vport->metadata != vport->default_metadata);
2486 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2487}
2488
fc99c3d6
VP
2489static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2490{
2491 struct mlx5_vport *vport;
2492 int i;
2493
2494 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2495 return;
2496
2497 mlx5_esw_for_all_vports_reverse(esw, i, vport)
2498 esw_offloads_vport_metadata_cleanup(esw, vport);
2499}
2500
2501static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2502{
2503 struct mlx5_vport *vport;
2504 int err;
2505 int i;
2506
2507 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2508 return 0;
2509
2510 mlx5_esw_for_all_vports(esw, i, vport) {
2511 err = esw_offloads_vport_metadata_setup(esw, vport);
2512 if (err)
2513 goto metadata_err;
2514 }
2515
2516 return 0;
2517
2518metadata_err:
2519 esw_offloads_metadata_uninit(esw);
2520 return err;
2521}
2522
748da30b 2523int
89a0f1fb
PP
2524esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2525 struct mlx5_vport *vport)
7445cfb1 2526{
7445cfb1
JL
2527 int err;
2528
07bab950 2529 err = esw_acl_ingress_ofld_setup(esw, vport);
89a0f1fb 2530 if (err)
fc99c3d6 2531 return err;
7445cfb1 2532
2c40db2f
PP
2533 err = esw_acl_egress_ofld_setup(esw, vport);
2534 if (err)
2535 goto egress_err;
07bab950
VP
2536
2537 return 0;
2538
2539egress_err:
2540 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb
PP
2541 return err;
2542}
18486737 2543
748da30b 2544void
89a0f1fb
PP
2545esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2546 struct mlx5_vport *vport)
2547{
ea651a86 2548 esw_acl_egress_ofld_cleanup(vport);
07bab950 2549 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb 2550}
7445cfb1 2551
748da30b 2552static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2553{
2554 struct mlx5_vport *vport;
18486737 2555
748da30b 2556 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
4e9a9ef7 2557 return esw_vport_create_offloads_acl_tables(esw, vport);
18486737
EB
2558}
2559
748da30b 2560static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2561{
786ef904 2562 struct mlx5_vport *vport;
7445cfb1 2563
748da30b
VP
2564 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2565 esw_vport_destroy_offloads_acl_tables(esw, vport);
18486737
EB
2566}
2567
062f4bf4 2568static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2569{
34ca6535 2570 struct mlx5_esw_indir_table *indir;
6ed1803a
MB
2571 int err;
2572
5c1d260e 2573 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
2574 mutex_init(&esw->fdb_table.offloads.vports.lock);
2575 hash_init(esw->fdb_table.offloads.vports.table);
e52c2802 2576
34ca6535
VB
2577 indir = mlx5_esw_indir_table_init();
2578 if (IS_ERR(indir)) {
2579 err = PTR_ERR(indir);
2580 goto create_indir_err;
2581 }
2582 esw->fdb_table.offloads.indir = indir;
2583
748da30b 2584 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 2585 if (err)
f8d1edda 2586 goto create_acl_err;
18486737 2587
8d6bd3c3 2588 err = esw_create_offloads_table(esw);
c930a3ad 2589 if (err)
11b717d6 2590 goto create_offloads_err;
c930a3ad 2591
11b717d6 2592 err = esw_create_restore_table(esw);
c930a3ad 2593 if (err)
11b717d6
PB
2594 goto create_restore_err;
2595
0da3c12d 2596 err = esw_create_offloads_fdb_tables(esw);
11b717d6
PB
2597 if (err)
2598 goto create_fdb_err;
c930a3ad 2599
8d6bd3c3 2600 err = esw_create_vport_rx_group(esw);
c930a3ad
OG
2601 if (err)
2602 goto create_fg_err;
2603
2604 return 0;
2605
2606create_fg_err:
1967ce6e 2607 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2608create_fdb_err:
11b717d6
PB
2609 esw_destroy_restore_table(esw);
2610create_restore_err:
2611 esw_destroy_offloads_table(esw);
2612create_offloads_err:
748da30b 2613 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 2614create_acl_err:
34ca6535
VB
2615 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2616create_indir_err:
f8d1edda 2617 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
2618 return err;
2619}
2620
eca8cc38
BW
2621static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2622{
2623 esw_destroy_vport_rx_group(esw);
eca8cc38 2624 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2625 esw_destroy_restore_table(esw);
2626 esw_destroy_offloads_table(esw);
748da30b 2627 esw_destroy_uplink_offloads_acl_tables(esw);
34ca6535 2628 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
f8d1edda 2629 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
2630}
2631
7e736f9a
PP
2632static void
2633esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2634{
5ccf2770 2635 bool host_pf_disabled;
7e736f9a 2636 u16 new_num_vfs;
a3888f33 2637
7e736f9a
PP
2638 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2639 host_params_context.host_num_of_vfs);
5ccf2770
BW
2640 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2641 host_params_context.host_pf_disabled);
a3888f33 2642
7e736f9a
PP
2643 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2644 return;
a3888f33
BW
2645
2646 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2647 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2648 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2649 } else {
7e736f9a 2650 int err;
a3888f33 2651
23bb50cf
BW
2652 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2653 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2654 if (err)
7e736f9a 2655 return;
a3888f33 2656 }
7e736f9a 2657 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2658}
2659
7e736f9a 2660static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2661{
7e736f9a
PP
2662 struct mlx5_host_work *host_work;
2663 struct mlx5_eswitch *esw;
dd28087c 2664 const u32 *out;
ac35dcd6 2665
7e736f9a
PP
2666 host_work = container_of(work, struct mlx5_host_work, work);
2667 esw = host_work->esw;
a3888f33 2668
dd28087c
PP
2669 out = mlx5_esw_query_functions(esw->dev);
2670 if (IS_ERR(out))
7e736f9a 2671 goto out;
a3888f33 2672
7e736f9a 2673 esw_vfs_changed_event_handler(esw, out);
dd28087c 2674 kvfree(out);
a3888f33 2675out:
ac35dcd6
VP
2676 kfree(host_work);
2677}
2678
16fff98a 2679int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2680{
cd56f929 2681 struct mlx5_esw_functions *esw_funcs;
a3888f33 2682 struct mlx5_host_work *host_work;
a3888f33
BW
2683 struct mlx5_eswitch *esw;
2684
2685 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2686 if (!host_work)
2687 return NOTIFY_DONE;
2688
cd56f929
VP
2689 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2690 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2691
2692 host_work->esw = esw;
2693
062f4bf4 2694 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2695 queue_work(esw->work_queue, &host_work->work);
2696
2697 return NOTIFY_OK;
2698}
2699
a53cf949
PP
2700static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
2701{
2702 const u32 *query_host_out;
2703
2704 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2705 return 0;
2706
2707 query_host_out = mlx5_esw_query_functions(esw->dev);
2708 if (IS_ERR(query_host_out))
2709 return PTR_ERR(query_host_out);
2710
2711 /* Mark non local controller with non zero controller number. */
2712 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
2713 host_params_context.host_number);
2714 kvfree(query_host_out);
2715 return 0;
2716}
2717
5896b972 2718int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2719{
3b83b6c2
DL
2720 struct mlx5_vport *vport;
2721 int err, i;
eca8cc38 2722
9a64144d
MG
2723 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2724 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2725 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2726 else
2727 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2728
2bb72e7e 2729 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2730 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2731
a53cf949
PP
2732 err = mlx5_esw_host_number_init(esw);
2733 if (err)
cd1ef966 2734 goto err_metadata;
a53cf949 2735
cd1ef966 2736 if (esw_check_vport_match_metadata_supported(esw))
4e9a9ef7
VP
2737 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2738
fc99c3d6
VP
2739 err = esw_offloads_metadata_init(esw);
2740 if (err)
2741 goto err_metadata;
2742
332bd3a5
PP
2743 err = esw_set_passing_vport_metadata(esw, true);
2744 if (err)
2745 goto err_vport_metadata;
c1286050 2746
7983a675
PB
2747 err = esw_offloads_steering_init(esw);
2748 if (err)
2749 goto err_steering_init;
2750
3b83b6c2
DL
2751 /* Representor will control the vport link state */
2752 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2753 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2754
c2d7712c
BW
2755 /* Uplink vport rep must load first. */
2756 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2757 if (err)
c2d7712c 2758 goto err_uplink;
c1286050 2759
c2d7712c 2760 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2761 if (err)
c2d7712c 2762 goto err_vports;
eca8cc38
BW
2763
2764 esw_offloads_devcom_init(esw);
a3888f33 2765
eca8cc38
BW
2766 return 0;
2767
925a6acc 2768err_vports:
c2d7712c
BW
2769 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2770err_uplink:
7983a675 2771 esw_offloads_steering_cleanup(esw);
79949985
PP
2772err_steering_init:
2773 esw_set_passing_vport_metadata(esw, false);
7983a675 2774err_vport_metadata:
fc99c3d6
VP
2775 esw_offloads_metadata_uninit(esw);
2776err_metadata:
4e9a9ef7 2777 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
8463daf1 2778 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2779 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2780 return err;
2781}
2782
db7ff19e
EB
2783static int esw_offloads_stop(struct mlx5_eswitch *esw,
2784 struct netlink_ext_ack *extack)
c930a3ad 2785{
062f4bf4 2786 int err, err1;
c930a3ad 2787
8e0aa4bc
PP
2788 mlx5_eswitch_disable_locked(esw, false);
2789 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2790 MLX5_ESWITCH_IGNORE_NUM_VFS);
6c419ba8 2791 if (err) {
8c98ee77 2792 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
8e0aa4bc
PP
2793 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2794 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2795 if (err1) {
2796 NL_SET_ERR_MSG_MOD(extack,
2797 "Failed setting eswitch back to offloads");
2798 }
6c419ba8 2799 }
c930a3ad
OG
2800
2801 return err;
2802}
2803
5896b972 2804void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2805{
ac004b83 2806 esw_offloads_devcom_cleanup(esw);
5896b972 2807 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2808 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2809 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2810 esw_offloads_steering_cleanup(esw);
fc99c3d6 2811 esw_offloads_metadata_uninit(esw);
4e9a9ef7 2812 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
8463daf1 2813 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2814 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2815 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2816}
2817
ef78618b 2818static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2819{
2820 switch (mode) {
2821 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2822 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2823 break;
2824 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2825 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2826 break;
2827 default:
2828 return -EINVAL;
2829 }
2830
2831 return 0;
2832}
2833
ef78618b
OG
2834static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2835{
2836 switch (mlx5_mode) {
f6455de0 2837 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2838 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2839 break;
f6455de0 2840 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2841 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2842 break;
2843 default:
2844 return -EINVAL;
2845 }
2846
2847 return 0;
2848}
2849
bffaa916
RD
2850static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2851{
2852 switch (mode) {
2853 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2854 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2855 break;
2856 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2857 *mlx5_mode = MLX5_INLINE_MODE_L2;
2858 break;
2859 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2860 *mlx5_mode = MLX5_INLINE_MODE_IP;
2861 break;
2862 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2863 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2864 break;
2865 default:
2866 return -EINVAL;
2867 }
2868
2869 return 0;
2870}
2871
2872static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2873{
2874 switch (mlx5_mode) {
2875 case MLX5_INLINE_MODE_NONE:
2876 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2877 break;
2878 case MLX5_INLINE_MODE_L2:
2879 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2880 break;
2881 case MLX5_INLINE_MODE_IP:
2882 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2883 break;
2884 case MLX5_INLINE_MODE_TCP_UDP:
2885 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2886 break;
2887 default:
2888 return -EINVAL;
2889 }
2890
2891 return 0;
2892}
2893
ae24432c
PP
2894static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2895{
2896 /* devlink commands in NONE eswitch mode are currently supported only
2897 * on ECPF.
2898 */
2899 return (esw->mode == MLX5_ESWITCH_NONE &&
2900 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2901}
2902
db7ff19e
EB
2903int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2904 struct netlink_ext_ack *extack)
9d1cef19 2905{
9d1cef19 2906 u16 cur_mlx5_mode, mlx5_mode = 0;
bd939753 2907 struct mlx5_eswitch *esw;
ea2128fd 2908 int err = 0;
9d1cef19 2909
bd939753
PP
2910 esw = mlx5_devlink_eswitch_get(devlink);
2911 if (IS_ERR(esw))
2912 return PTR_ERR(esw);
9d1cef19 2913
ef78618b 2914 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2915 return -EINVAL;
2916
8e0aa4bc 2917 mutex_lock(&esw->mode_lock);
8e0aa4bc 2918 cur_mlx5_mode = esw->mode;
c930a3ad 2919 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 2920 goto unlock;
c930a3ad
OG
2921
2922 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
8e0aa4bc 2923 err = esw_offloads_start(esw, extack);
c930a3ad 2924 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
8e0aa4bc 2925 err = esw_offloads_stop(esw, extack);
c930a3ad 2926 else
8e0aa4bc
PP
2927 err = -EINVAL;
2928
2929unlock:
2930 mutex_unlock(&esw->mode_lock);
2931 return err;
feae9087
OG
2932}
2933
2934int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2935{
bd939753 2936 struct mlx5_eswitch *esw;
9d1cef19 2937 int err;
c930a3ad 2938
bd939753
PP
2939 esw = mlx5_devlink_eswitch_get(devlink);
2940 if (IS_ERR(esw))
2941 return PTR_ERR(esw);
c930a3ad 2942
8e0aa4bc 2943 mutex_lock(&esw->mode_lock);
bd939753 2944 err = eswitch_devlink_esw_mode_check(esw);
ae24432c 2945 if (err)
8e0aa4bc 2946 goto unlock;
ae24432c 2947
8e0aa4bc
PP
2948 err = esw_mode_to_devlink(esw->mode, mode);
2949unlock:
2950 mutex_unlock(&esw->mode_lock);
2951 return err;
feae9087 2952}
127ea380 2953
db7ff19e
EB
2954int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2955 struct netlink_ext_ack *extack)
bffaa916
RD
2956{
2957 struct mlx5_core_dev *dev = devlink_priv(devlink);
db68cc56 2958 int err, vport, num_vport;
bd939753 2959 struct mlx5_eswitch *esw;
bffaa916
RD
2960 u8 mlx5_mode;
2961
bd939753
PP
2962 esw = mlx5_devlink_eswitch_get(devlink);
2963 if (IS_ERR(esw))
2964 return PTR_ERR(esw);
bffaa916 2965
8e0aa4bc 2966 mutex_lock(&esw->mode_lock);
ae24432c
PP
2967 err = eswitch_devlink_esw_mode_check(esw);
2968 if (err)
8e0aa4bc 2969 goto out;
ae24432c 2970
c415f704
OG
2971 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2972 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2973 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
8e0aa4bc 2974 goto out;
c8b838d1 2975 fallthrough;
c415f704 2976 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2977 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
2978 err = -EOPNOTSUPP;
2979 goto out;
c415f704
OG
2980 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2981 break;
2982 }
bffaa916 2983
525e84be 2984 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2985 NL_SET_ERR_MSG_MOD(extack,
2986 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
2987 err = -EOPNOTSUPP;
2988 goto out;
375f51e2
RD
2989 }
2990
bffaa916
RD
2991 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2992 if (err)
2993 goto out;
2994
411ec9e0 2995 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2997 if (err) {
8c98ee77
EB
2998 NL_SET_ERR_MSG_MOD(extack,
2999 "Failed to set min inline on vport");
bffaa916
RD
3000 goto revert_inline_mode;
3001 }
3002 }
3003
3004 esw->offloads.inline_mode = mlx5_mode;
8e0aa4bc 3005 mutex_unlock(&esw->mode_lock);
bffaa916
RD
3006 return 0;
3007
3008revert_inline_mode:
db68cc56 3009 num_vport = --vport;
411ec9e0 3010 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
3011 mlx5_modify_nic_vport_min_inline(dev,
3012 vport,
3013 esw->offloads.inline_mode);
3014out:
8e0aa4bc 3015 mutex_unlock(&esw->mode_lock);
bffaa916
RD
3016 return err;
3017}
3018
3019int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3020{
bd939753 3021 struct mlx5_eswitch *esw;
9d1cef19 3022 int err;
bffaa916 3023
bd939753
PP
3024 esw = mlx5_devlink_eswitch_get(devlink);
3025 if (IS_ERR(esw))
3026 return PTR_ERR(esw);
bffaa916 3027
8e0aa4bc 3028 mutex_lock(&esw->mode_lock);
ae24432c
PP
3029 err = eswitch_devlink_esw_mode_check(esw);
3030 if (err)
8e0aa4bc 3031 goto unlock;
ae24432c 3032
8e0aa4bc
PP
3033 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3034unlock:
3035 mutex_unlock(&esw->mode_lock);
3036 return err;
bffaa916
RD
3037}
3038
98fdbea5
LR
3039int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3040 enum devlink_eswitch_encap_mode encap,
db7ff19e 3041 struct netlink_ext_ack *extack)
7768d197
RD
3042{
3043 struct mlx5_core_dev *dev = devlink_priv(devlink);
bd939753 3044 struct mlx5_eswitch *esw;
7768d197
RD
3045 int err;
3046
bd939753
PP
3047 esw = mlx5_devlink_eswitch_get(devlink);
3048 if (IS_ERR(esw))
3049 return PTR_ERR(esw);
7768d197 3050
8e0aa4bc 3051 mutex_lock(&esw->mode_lock);
ae24432c
PP
3052 err = eswitch_devlink_esw_mode_check(esw);
3053 if (err)
8e0aa4bc 3054 goto unlock;
ae24432c 3055
7768d197 3056 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 3057 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
3058 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3059 err = -EOPNOTSUPP;
3060 goto unlock;
3061 }
7768d197 3062
8e0aa4bc
PP
3063 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3064 err = -EOPNOTSUPP;
3065 goto unlock;
3066 }
7768d197 3067
f6455de0 3068 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 3069 esw->offloads.encap = encap;
8e0aa4bc 3070 goto unlock;
7768d197
RD
3071 }
3072
3073 if (esw->offloads.encap == encap)
8e0aa4bc 3074 goto unlock;
7768d197 3075
525e84be 3076 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
3077 NL_SET_ERR_MSG_MOD(extack,
3078 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
3079 err = -EOPNOTSUPP;
3080 goto unlock;
7768d197
RD
3081 }
3082
e52c2802 3083 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
3084
3085 esw->offloads.encap = encap;
e52c2802 3086
0da3c12d 3087 err = esw_create_offloads_fdb_tables(esw);
e52c2802 3088
7768d197 3089 if (err) {
8c98ee77
EB
3090 NL_SET_ERR_MSG_MOD(extack,
3091 "Failed re-creating fast FDB table");
7768d197 3092 esw->offloads.encap = !encap;
0da3c12d 3093 (void)esw_create_offloads_fdb_tables(esw);
7768d197 3094 }
e52c2802 3095
8e0aa4bc
PP
3096unlock:
3097 mutex_unlock(&esw->mode_lock);
7768d197
RD
3098 return err;
3099}
3100
98fdbea5
LR
3101int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3102 enum devlink_eswitch_encap_mode *encap)
7768d197 3103{
bd939753 3104 struct mlx5_eswitch *esw;
9d1cef19 3105 int err;
7768d197 3106
bd939753
PP
3107 esw = mlx5_devlink_eswitch_get(devlink);
3108 if (IS_ERR(esw))
3109 return PTR_ERR(esw);
3110
7768d197 3111
8e0aa4bc 3112 mutex_lock(&esw->mode_lock);
ae24432c
PP
3113 err = eswitch_devlink_esw_mode_check(esw);
3114 if (err)
8e0aa4bc 3115 goto unlock;
ae24432c 3116
7768d197 3117 *encap = esw->offloads.encap;
8e0aa4bc
PP
3118unlock:
3119 mutex_unlock(&esw->mode_lock);
7768d197
RD
3120 return 0;
3121}
3122
c2d7712c
BW
3123static bool
3124mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3125{
3126 /* Currently, only ECPF based device has representor for host PF. */
3127 if (vport_num == MLX5_VPORT_PF &&
3128 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3129 return false;
3130
3131 if (vport_num == MLX5_VPORT_ECPF &&
3132 !mlx5_ecpf_vport_exists(esw->dev))
3133 return false;
3134
3135 return true;
3136}
3137
f8e8fa02 3138void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 3139 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 3140 u8 rep_type)
127ea380 3141{
8693115a 3142 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
3143 struct mlx5_eswitch_rep *rep;
3144 int i;
9deb2241 3145
8693115a 3146 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 3147 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
3148 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
3149 rep_data = &rep->rep_data[rep_type];
3150 atomic_set(&rep_data->state, REP_REGISTERED);
3151 }
f8e8fa02 3152 }
127ea380 3153}
f8e8fa02 3154EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 3155
f8e8fa02 3156void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 3157{
cb67b832 3158 struct mlx5_eswitch_rep *rep;
f8e8fa02 3159 int i;
cb67b832 3160
f6455de0 3161 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 3162 __unload_reps_all_vport(esw, rep_type);
127ea380 3163
f8e8fa02 3164 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 3165 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 3166}
f8e8fa02 3167EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 3168
a4b97ab4 3169void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 3170{
726293f1
HHZ
3171 struct mlx5_eswitch_rep *rep;
3172
879c8f84 3173 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 3174 return rep->rep_data[rep_type].priv;
726293f1 3175}
22215908
MB
3176
3177void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 3178 u16 vport,
22215908
MB
3179 u8 rep_type)
3180{
22215908
MB
3181 struct mlx5_eswitch_rep *rep;
3182
879c8f84 3183 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 3184
8693115a
PP
3185 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3186 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3187 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
3188 return NULL;
3189}
57cbd893 3190EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
3191
3192void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3193{
879c8f84 3194 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 3195}
57cbd893
MB
3196EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3197
3198struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 3199 u16 vport)
57cbd893 3200{
879c8f84 3201 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
3202}
3203EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
3204
3205bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
3206{
3207 return vport_num >= MLX5_VPORT_FIRST_VF &&
3208 vport_num <= esw->dev->priv.sriov.max_vfs;
3209}
7445cfb1 3210
5b7cb745
PB
3211bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3212{
3213 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3214}
3215EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3216
7445cfb1
JL
3217bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3218{
3219 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3220}
3221EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3222
0f0d3827 3223u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
3224 u16 vport_num)
3225{
133dcfc5 3226 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0f0d3827 3227
133dcfc5
VP
3228 if (WARN_ON_ONCE(IS_ERR(vport)))
3229 return 0;
0f0d3827 3230
133dcfc5 3231 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
3232}
3233EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
d970812b
PP
3234
3235int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3236 u16 vport_num, u32 sfnum)
3237{
3238 int err;
3239
3240 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3241 if (err)
3242 return err;
3243
3244 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
3245 if (err)
3246 goto devlink_err;
3247
3248 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3249 if (err)
3250 goto rep_err;
3251 return 0;
3252
3253rep_err:
3254 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3255devlink_err:
3256 mlx5_esw_vport_disable(esw, vport_num);
3257 return err;
3258}
3259
3260void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3261{
3262 mlx5_esw_offloads_rep_unload(esw, vport_num);
3263 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3264 mlx5_esw_vport_disable(esw, vport_num);
3265}
84ae9c1f
VB
3266
3267static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3268{
3269 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3270 void *query_ctx;
3271 void *hca_caps;
3272 int err;
3273
3274 *vhca_id = 0;
3275 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3276 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3277 return -EPERM;
3278
3279 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3280 if (!query_ctx)
3281 return -ENOMEM;
3282
3283 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3284 if (err)
3285 goto out_free;
3286
3287 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3288 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3289
3290out_free:
3291 kfree(query_ctx);
3292 return err;
3293}
3294
3295int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3296{
3297 u16 *old_entry, *vhca_map_entry, vhca_id;
3298 int err;
3299
3300 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3301 if (err) {
3302 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3303 vport_num, err);
3304 return err;
3305 }
3306
3307 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3308 if (!vhca_map_entry)
3309 return -ENOMEM;
3310
3311 *vhca_map_entry = vport_num;
3312 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3313 if (xa_is_err(old_entry)) {
3314 kfree(vhca_map_entry);
3315 return xa_err(old_entry);
3316 }
3317 kfree(old_entry);
3318 return 0;
3319}
3320
3321void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3322{
3323 u16 *vhca_map_entry, vhca_id;
3324 int err;
3325
3326 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3327 if (err)
3328 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3329 vport_num, err);
3330
3331 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3332 kfree(vhca_map_entry);
3333}
3334
3335int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3336{
3337 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3338
3339 if (!res)
3340 return -ENOENT;
3341
3342 *vport_num = *res;
3343 return 0;
3344}
10742efc
VB
3345
3346u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3347 u16 vport_num)
3348{
3349 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3350
3351 if (WARN_ON_ONCE(IS_ERR(vport)))
3352 return 0;
3353
3354 return vport->metadata;
3355}
3356EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);