]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: E-Switch, Refactor eswitch egress acl codes
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
ea651a86 40#include "esw/acl/ofld.h"
49964352 41#include "esw/chains.h"
80f09dfc 42#include "rdma.h"
e52c2802
PB
43#include "en.h"
44#include "fs_core.h"
ac004b83 45#include "lib/devcom.h"
a3888f33 46#include "lib/eq.h"
69697b6e 47
cd7e4186
BW
48/* There are two match-all miss flows, one for unicast dst mac and
49 * one for multicast.
50 */
51#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
52#define UPLINK_REP_INDEX 0
53
96e32687
EC
54/* Per vport tables */
55
56#define MLX5_ESW_VPORT_TABLE_SIZE 128
57
58/* This struct is used as a key to the hash table and we need it to be packed
59 * so hash result is consistent
60 */
61struct mlx5_vport_key {
62 u32 chain;
63 u16 prio;
64 u16 vport;
65 u16 vhca_id;
66} __packed;
67
68struct mlx5_vport_table {
69 struct hlist_node hlist;
70 struct mlx5_flow_table *fdb;
71 u32 num_rules;
72 struct mlx5_vport_key key;
73};
74
87dac697
JL
75#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
76
96e32687
EC
77static struct mlx5_flow_table *
78esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
79{
80 struct mlx5_flow_table_attr ft_attr = {};
81 struct mlx5_flow_table *fdb;
82
87dac697 83 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
84 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
85 ft_attr.prio = FDB_PER_VPORT;
86 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
87 if (IS_ERR(fdb)) {
88 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
89 PTR_ERR(fdb));
90 }
91
92 return fdb;
93}
94
95static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
96 struct mlx5_esw_flow_attr *attr,
97 struct mlx5_vport_key *key)
98{
99 key->vport = attr->in_rep->vport;
100 key->chain = attr->chain;
101 key->prio = attr->prio;
102 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
103 return jhash(key, sizeof(*key), 0);
104}
105
106/* caller must hold vports.lock */
107static struct mlx5_vport_table *
108esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
109{
110 struct mlx5_vport_table *e;
111
112 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
113 if (!memcmp(&e->key, skey, sizeof(*skey)))
114 return e;
115
116 return NULL;
117}
118
119static void
120esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
121{
122 struct mlx5_vport_table *e;
123 struct mlx5_vport_key key;
124 u32 hkey;
125
126 mutex_lock(&esw->fdb_table.offloads.vports.lock);
127 hkey = flow_attr_to_vport_key(esw, attr, &key);
128 e = esw_vport_tbl_lookup(esw, &key, hkey);
129 if (!e || --e->num_rules)
130 goto out;
131
132 hash_del(&e->hlist);
133 mlx5_destroy_flow_table(e->fdb);
134 kfree(e);
135out:
136 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
137}
138
139static struct mlx5_flow_table *
140esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
141{
142 struct mlx5_core_dev *dev = esw->dev;
143 struct mlx5_flow_namespace *ns;
144 struct mlx5_flow_table *fdb;
145 struct mlx5_vport_table *e;
146 struct mlx5_vport_key skey;
147 u32 hkey;
148
149 mutex_lock(&esw->fdb_table.offloads.vports.lock);
150 hkey = flow_attr_to_vport_key(esw, attr, &skey);
151 e = esw_vport_tbl_lookup(esw, &skey, hkey);
152 if (e) {
153 e->num_rules++;
154 goto out;
155 }
156
157 e = kzalloc(sizeof(*e), GFP_KERNEL);
158 if (!e) {
159 fdb = ERR_PTR(-ENOMEM);
160 goto err_alloc;
161 }
162
163 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
164 if (!ns) {
165 esw_warn(dev, "Failed to get FDB namespace\n");
166 fdb = ERR_PTR(-ENOENT);
167 goto err_ns;
168 }
169
170 fdb = esw_vport_tbl_create(esw, ns);
171 if (IS_ERR(fdb))
172 goto err_ns;
173
174 e->fdb = fdb;
175 e->num_rules = 1;
176 e->key = skey;
177 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
178out:
179 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
180 return e->fdb;
181
182err_ns:
183 kfree(e);
184err_alloc:
185 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
186 return fdb;
187}
188
189int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
190{
191 struct mlx5_esw_flow_attr attr = {};
192 struct mlx5_eswitch_rep rep = {};
193 struct mlx5_flow_table *fdb;
194 struct mlx5_vport *vport;
195 int i;
196
197 attr.prio = 1;
198 attr.in_rep = &rep;
199 mlx5_esw_for_all_vports(esw, i, vport) {
200 attr.in_rep->vport = vport->vport;
201 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 202 if (IS_ERR(fdb))
96e32687
EC
203 goto out;
204 }
205 return 0;
206
207out:
208 mlx5_esw_vport_tbl_put(esw);
209 return PTR_ERR(fdb);
210}
211
212void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
213{
214 struct mlx5_esw_flow_attr attr = {};
215 struct mlx5_eswitch_rep rep = {};
216 struct mlx5_vport *vport;
217 int i;
218
219 attr.prio = 1;
220 attr.in_rep = &rep;
221 mlx5_esw_for_all_vports(esw, i, vport) {
222 attr.in_rep->vport = vport->vport;
223 esw_vport_tbl_put(esw, &attr);
224 }
225}
226
227/* End: Per vport tables */
228
879c8f84
BW
229static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
230 u16 vport_num)
231{
02f3afd9 232 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
233
234 WARN_ON(idx > esw->total_vports - 1);
235 return &esw->offloads.vport_reps[idx];
236}
237
b7826076
PP
238static bool
239esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
240 const struct mlx5_vport *vport)
241{
242 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
243 mlx5_eswitch_is_vf_vport(esw, vport->vport));
244}
245
c01cfd0f
JL
246static void
247mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
248 struct mlx5_flow_spec *spec,
249 struct mlx5_esw_flow_attr *attr)
250{
251 void *misc2;
252 void *misc;
253
254 /* Use metadata matching because vport is not represented by single
255 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
256 */
257 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
258 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
259 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
260 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
261 attr->in_rep->vport));
262
263 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
264 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
265 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
266
267 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
268 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
269 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
270 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
271 } else {
272 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
273 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
274
275 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
276 MLX5_SET(fte_match_set_misc, misc,
277 source_eswitch_owner_vhca_id,
278 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
279
280 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
281 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
282 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
283 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
284 source_eswitch_owner_vhca_id);
285
286 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
287 }
288
289 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
290 attr->in_rep->vport == MLX5_VPORT_UPLINK)
291 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
292}
293
74491de9 294struct mlx5_flow_handle *
3d80d1a2
OG
295mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
296 struct mlx5_flow_spec *spec,
776b12b6 297 struct mlx5_esw_flow_attr *attr)
3d80d1a2 298{
592d3651 299 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 300 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 301 bool split = !!(attr->split_count);
74491de9 302 struct mlx5_flow_handle *rule;
e52c2802 303 struct mlx5_flow_table *fdb;
592d3651 304 int j, i = 0;
3d80d1a2 305
f6455de0 306 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
307 return ERR_PTR(-EOPNOTSUPP);
308
6acfbf38
OG
309 flow_act.action = attr->action;
310 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 311 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
312 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
313 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
314 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
315 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
316 flow_act.vlan[0].vid = attr->vlan_vid[0];
317 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
318 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
319 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
320 flow_act.vlan[1].vid = attr->vlan_vid[1];
321 flow_act.vlan[1].prio = attr->vlan_prio[1];
322 }
6acfbf38 323 }
776b12b6 324
66958ed9 325 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
39ac237c 326 struct mlx5_flow_table *ft;
e52c2802 327
d18296ff
PB
328 if (attr->dest_ft) {
329 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
330 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
331 dest[i].ft = attr->dest_ft;
332 i++;
333 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
39ac237c
PB
334 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
335 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278d51f2 336 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
39ac237c
PB
337 i++;
338 } else if (attr->dest_chain) {
339 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
340 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
341 1, 0);
e52c2802
PB
342 if (IS_ERR(ft)) {
343 rule = ERR_CAST(ft);
344 goto err_create_goto_table;
345 }
346
347 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
348 dest[i].ft = ft;
592d3651 349 i++;
e52c2802 350 } else {
e85e02ba 351 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 352 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 353 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 354 dest[i].vport.vhca_id =
df65a573 355 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
356 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
357 dest[i].vport.flags |=
358 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
359 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
360 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 361 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 362 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
363 dest[i].vport.pkt_reformat =
364 attr->dests[j].pkt_reformat;
f493f155 365 }
e52c2802
PB
366 i++;
367 }
56e858df 368 }
e37a79e5 369 }
14e6b038
EC
370
371 if (attr->decap_pkt_reformat)
372 flow_act.pkt_reformat = attr->decap_pkt_reformat;
373
66958ed9 374 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 375 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 376 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 377 i++;
3d80d1a2
OG
378 }
379
93b3586e 380 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 381 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
382 if (attr->inner_match_level != MLX5_MATCH_NONE)
383 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 384
aa24670e 385 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 386 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 387
96e32687
EC
388 if (split) {
389 fdb = esw_vport_tbl_get(esw, attr);
390 } else {
d18296ff
PB
391 if (attr->chain || attr->prio)
392 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
393 attr->prio, 0);
394 else
395 fdb = attr->fdb;
6fb0701a
PB
396
397 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
398 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
96e32687 399 }
e52c2802
PB
400 if (IS_ERR(fdb)) {
401 rule = ERR_CAST(fdb);
402 goto err_esw_get;
403 }
404
84be2fda 405 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
10caabda
OS
406 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
407 &flow_act, dest, i);
84be2fda 408 else
10caabda 409 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 410 if (IS_ERR(rule))
e52c2802 411 goto err_add_rule;
375f51e2 412 else
525e84be 413 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 414
e52c2802
PB
415 return rule;
416
417err_add_rule:
96e32687
EC
418 if (split)
419 esw_vport_tbl_put(esw, attr);
d18296ff 420 else if (attr->chain || attr->prio)
96e32687 421 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 422err_esw_get:
39ac237c
PB
423 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
424 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802 425err_create_goto_table:
aa0cbbae 426 return rule;
3d80d1a2
OG
427}
428
e4ad91f2
CM
429struct mlx5_flow_handle *
430mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
431 struct mlx5_flow_spec *spec,
432 struct mlx5_esw_flow_attr *attr)
433{
434 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 435 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
436 struct mlx5_flow_table *fast_fdb;
437 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 438 struct mlx5_flow_handle *rule;
e4ad91f2
CM
439 int i;
440
39ac237c 441 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
e52c2802
PB
442 if (IS_ERR(fast_fdb)) {
443 rule = ERR_CAST(fast_fdb);
444 goto err_get_fast;
445 }
446
96e32687 447 fwd_fdb = esw_vport_tbl_get(esw, attr);
e52c2802
PB
448 if (IS_ERR(fwd_fdb)) {
449 rule = ERR_CAST(fwd_fdb);
450 goto err_get_fwd;
451 }
452
e4ad91f2 453 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 454 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 455 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 456 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 457 dest[i].vport.vhca_id =
df65a573 458 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
459 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
460 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
461 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
462 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 463 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 464 }
e4ad91f2
CM
465 }
466 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 467 dest[i].ft = fwd_fdb,
e4ad91f2
CM
468 i++;
469
c01cfd0f 470 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 471
93b3586e 472 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 473 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 474
278d51f2 475 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 476 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 477
e52c2802
PB
478 if (IS_ERR(rule))
479 goto add_err;
e4ad91f2 480
525e84be 481 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
482
483 return rule;
484add_err:
96e32687 485 esw_vport_tbl_put(esw, attr);
e52c2802 486err_get_fwd:
39ac237c 487 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 488err_get_fast:
e4ad91f2
CM
489 return rule;
490}
491
e52c2802
PB
492static void
493__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
494 struct mlx5_flow_handle *rule,
495 struct mlx5_esw_flow_attr *attr,
496 bool fwd_rule)
497{
e85e02ba 498 bool split = (attr->split_count > 0);
10caabda 499 int i;
e52c2802
PB
500
501 mlx5_del_flow_rules(rule);
10caabda 502
84be2fda 503 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
d8a2034f
EC
504 /* unref the term table */
505 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
506 if (attr->dests[i].termtbl)
507 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
508 }
10caabda
OS
509 }
510
525e84be 511 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
512
513 if (fwd_rule) {
96e32687 514 esw_vport_tbl_put(esw, attr);
39ac237c 515 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 516 } else {
96e32687
EC
517 if (split)
518 esw_vport_tbl_put(esw, attr);
d18296ff 519 else if (attr->chain || attr->prio)
96e32687
EC
520 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
521 0);
e52c2802 522 if (attr->dest_chain)
39ac237c 523 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802
PB
524 }
525}
526
d85cdccb
OG
527void
528mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
529 struct mlx5_flow_handle *rule,
530 struct mlx5_esw_flow_attr *attr)
531{
e52c2802 532 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
533}
534
48265006
OG
535void
536mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
537 struct mlx5_flow_handle *rule,
538 struct mlx5_esw_flow_attr *attr)
539{
e52c2802 540 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
541}
542
f5f82476
OG
543static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
544{
545 struct mlx5_eswitch_rep *rep;
411ec9e0 546 int i, err = 0;
f5f82476
OG
547
548 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 549 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 550 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
551 continue;
552
553 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
554 if (err)
555 goto out;
556 }
557
558out:
559 return err;
560}
561
562static struct mlx5_eswitch_rep *
563esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
564{
565 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
566
567 in_rep = attr->in_rep;
df65a573 568 out_rep = attr->dests[0].rep;
f5f82476
OG
569
570 if (push)
571 vport = in_rep;
572 else if (pop)
573 vport = out_rep;
574 else
575 vport = in_rep;
576
577 return vport;
578}
579
580static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
581 bool push, bool pop, bool fwd)
582{
583 struct mlx5_eswitch_rep *in_rep, *out_rep;
584
585 if ((push || pop) && !fwd)
586 goto out_notsupp;
587
588 in_rep = attr->in_rep;
df65a573 589 out_rep = attr->dests[0].rep;
f5f82476 590
b05af6aa 591 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
592 goto out_notsupp;
593
b05af6aa 594 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
595 goto out_notsupp;
596
597 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
598 if (!push && !pop && fwd)
b05af6aa 599 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
600 goto out_notsupp;
601
602 /* protects against (1) setting rules with different vlans to push and
603 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
604 */
1482bd3d 605 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
606 goto out_notsupp;
607
608 return 0;
609
610out_notsupp:
9eb78923 611 return -EOPNOTSUPP;
f5f82476
OG
612}
613
614int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
615 struct mlx5_esw_flow_attr *attr)
616{
617 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
618 struct mlx5_eswitch_rep *vport = NULL;
619 bool push, pop, fwd;
620 int err = 0;
621
6acfbf38 622 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 623 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
624 return 0;
625
f5f82476
OG
626 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
627 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
628 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
629 !attr->dest_chain);
f5f82476 630
0e18134f
VB
631 mutex_lock(&esw->state_lock);
632
f5f82476
OG
633 err = esw_add_vlan_action_check(attr, push, pop, fwd);
634 if (err)
0e18134f 635 goto unlock;
f5f82476 636
39ac237c 637 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
638
639 vport = esw_vlan_action_get_vport(attr, push, pop);
640
641 if (!push && !pop && fwd) {
642 /* tracks VF --> wire rules without vlan push action */
b05af6aa 643 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 644 vport->vlan_refcount++;
39ac237c 645 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
646 }
647
0e18134f 648 goto unlock;
f5f82476
OG
649 }
650
651 if (!push && !pop)
0e18134f 652 goto unlock;
f5f82476
OG
653
654 if (!(offloads->vlan_push_pop_refcount)) {
655 /* it's the 1st vlan rule, apply global vlan pop policy */
656 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
657 if (err)
658 goto out;
659 }
660 offloads->vlan_push_pop_refcount++;
661
662 if (push) {
663 if (vport->vlan_refcount)
664 goto skip_set_push;
665
1482bd3d 666 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
667 SET_VLAN_INSERT | SET_VLAN_STRIP);
668 if (err)
669 goto out;
1482bd3d 670 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
671skip_set_push:
672 vport->vlan_refcount++;
673 }
674out:
675 if (!err)
39ac237c 676 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
677unlock:
678 mutex_unlock(&esw->state_lock);
f5f82476
OG
679 return err;
680}
681
682int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
683 struct mlx5_esw_flow_attr *attr)
684{
685 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
686 struct mlx5_eswitch_rep *vport = NULL;
687 bool push, pop, fwd;
688 int err = 0;
689
6acfbf38 690 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 691 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
692 return 0;
693
39ac237c 694 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
695 return 0;
696
697 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
698 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
699 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
700
0e18134f
VB
701 mutex_lock(&esw->state_lock);
702
f5f82476
OG
703 vport = esw_vlan_action_get_vport(attr, push, pop);
704
705 if (!push && !pop && fwd) {
706 /* tracks VF --> wire rules without vlan push action */
b05af6aa 707 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
708 vport->vlan_refcount--;
709
0e18134f 710 goto out;
f5f82476
OG
711 }
712
713 if (push) {
714 vport->vlan_refcount--;
715 if (vport->vlan_refcount)
716 goto skip_unset_push;
717
718 vport->vlan = 0;
719 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
720 0, 0, SET_VLAN_STRIP);
721 if (err)
722 goto out;
723 }
724
725skip_unset_push:
726 offloads->vlan_push_pop_refcount--;
727 if (offloads->vlan_push_pop_refcount)
0e18134f 728 goto out;
f5f82476
OG
729
730 /* no more vlan rules, stop global vlan pop policy */
731 err = esw_set_global_vlan_pop(esw, 0);
732
733out:
0e18134f 734 mutex_unlock(&esw->state_lock);
f5f82476
OG
735 return err;
736}
737
f7a68945 738struct mlx5_flow_handle *
02f3afd9
PP
739mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
740 u32 sqn)
ab22be9b 741{
66958ed9 742 struct mlx5_flow_act flow_act = {0};
4c5009c5 743 struct mlx5_flow_destination dest = {};
74491de9 744 struct mlx5_flow_handle *flow_rule;
c5bb1730 745 struct mlx5_flow_spec *spec;
ab22be9b
OG
746 void *misc;
747
1b9a07ee 748 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 749 if (!spec) {
ab22be9b
OG
750 flow_rule = ERR_PTR(-ENOMEM);
751 goto out;
752 }
753
c5bb1730 754 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 755 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
756 /* source vport is the esw manager */
757 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 758
c5bb1730 759 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
760 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
761 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
762
c5bb1730 763 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 764 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 765 dest.vport.num = vport;
66958ed9 766 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 767
39ac237c
PB
768 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
769 spec, &flow_act, &dest, 1);
ab22be9b
OG
770 if (IS_ERR(flow_rule))
771 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
772out:
c5bb1730 773 kvfree(spec);
ab22be9b
OG
774 return flow_rule;
775}
57cbd893 776EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 777
159fe639
MB
778void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
779{
780 mlx5_del_flow_rules(rule);
781}
782
5b7cb745
PB
783static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
784{
785 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
786 MLX5_FDB_TO_VPORT_REG_C_1;
787}
788
332bd3a5 789static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
790{
791 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
792 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
793 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 794 u8 curr, wanted;
c1286050
JL
795 int err;
796
5b7cb745
PB
797 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
798 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 799 return 0;
c1286050 800
e08a6832
LR
801 MLX5_SET(query_esw_vport_context_in, in, opcode,
802 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
803 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
804 if (err)
805 return err;
806
5b7cb745
PB
807 curr = MLX5_GET(query_esw_vport_context_out, out,
808 esw_vport_context.fdb_to_vport_reg_c_id);
809 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
810 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
811 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 812
332bd3a5 813 if (enable)
5b7cb745 814 curr |= wanted;
332bd3a5 815 else
5b7cb745 816 curr &= ~wanted;
c1286050 817
e08a6832 818 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 819 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 820 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
821 field_select.fdb_to_vport_reg_c_id, 1);
822
e08a6832 823 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
824 if (!err) {
825 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
826 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
827 else
828 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
829 }
830
831 return err;
c1286050
JL
832}
833
a5641cb5
JL
834static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
835 struct mlx5_core_dev *peer_dev,
ac004b83
RD
836 struct mlx5_flow_spec *spec,
837 struct mlx5_flow_destination *dest)
838{
a5641cb5 839 void *misc;
ac004b83 840
a5641cb5
JL
841 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
842 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
843 misc_parameters_2);
0f0d3827
PB
844 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
845 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 846
a5641cb5
JL
847 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
848 } else {
849 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
850 misc_parameters);
ac004b83 851
a5641cb5
JL
852 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
853 MLX5_CAP_GEN(peer_dev, vhca_id));
854
855 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
856
857 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
858 misc_parameters);
859 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
860 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
861 source_eswitch_owner_vhca_id);
862 }
ac004b83
RD
863
864 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 865 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 866 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 867 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
868}
869
a5641cb5
JL
870static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
871 struct mlx5_eswitch *peer_esw,
872 struct mlx5_flow_spec *spec,
873 u16 vport)
874{
875 void *misc;
876
877 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
879 misc_parameters_2);
880 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
881 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
882 vport));
883 } else {
884 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
885 misc_parameters);
886 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
887 }
888}
889
ac004b83
RD
890static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
891 struct mlx5_core_dev *peer_dev)
892{
893 struct mlx5_flow_destination dest = {};
894 struct mlx5_flow_act flow_act = {0};
895 struct mlx5_flow_handle **flows;
896 struct mlx5_flow_handle *flow;
897 struct mlx5_flow_spec *spec;
898 /* total vports is the same for both e-switches */
899 int nvports = esw->total_vports;
900 void *misc;
901 int err, i;
902
903 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
904 if (!spec)
905 return -ENOMEM;
906
a5641cb5 907 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
908
909 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
910 if (!flows) {
911 err = -ENOMEM;
912 goto alloc_flows_err;
913 }
914
915 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
916 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
917 misc_parameters);
918
81cd229c 919 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
920 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
921 spec, MLX5_VPORT_PF);
922
81cd229c
BW
923 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
924 spec, &flow_act, &dest, 1);
925 if (IS_ERR(flow)) {
926 err = PTR_ERR(flow);
927 goto add_pf_flow_err;
928 }
929 flows[MLX5_VPORT_PF] = flow;
930 }
931
932 if (mlx5_ecpf_vport_exists(esw->dev)) {
933 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
934 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
935 spec, &flow_act, &dest, 1);
936 if (IS_ERR(flow)) {
937 err = PTR_ERR(flow);
938 goto add_ecpf_flow_err;
939 }
940 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
941 }
942
786ef904 943 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
944 esw_set_peer_miss_rule_source_port(esw,
945 peer_dev->priv.eswitch,
946 spec, i);
947
ac004b83
RD
948 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
949 spec, &flow_act, &dest, 1);
950 if (IS_ERR(flow)) {
951 err = PTR_ERR(flow);
81cd229c 952 goto add_vf_flow_err;
ac004b83
RD
953 }
954 flows[i] = flow;
955 }
956
957 esw->fdb_table.offloads.peer_miss_rules = flows;
958
959 kvfree(spec);
960 return 0;
961
81cd229c 962add_vf_flow_err:
879c8f84 963 nvports = --i;
786ef904 964 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 965 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
966
967 if (mlx5_ecpf_vport_exists(esw->dev))
968 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
969add_ecpf_flow_err:
970 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
971 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
972add_pf_flow_err:
973 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
974 kvfree(flows);
975alloc_flows_err:
976 kvfree(spec);
977 return err;
978}
979
980static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
981{
982 struct mlx5_flow_handle **flows;
983 int i;
984
985 flows = esw->fdb_table.offloads.peer_miss_rules;
986
786ef904
PP
987 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
988 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
989 mlx5_del_flow_rules(flows[i]);
990
81cd229c
BW
991 if (mlx5_ecpf_vport_exists(esw->dev))
992 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
993
994 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
995 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
996
ac004b83
RD
997 kvfree(flows);
998}
999
3aa33572
OG
1000static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1001{
66958ed9 1002 struct mlx5_flow_act flow_act = {0};
4c5009c5 1003 struct mlx5_flow_destination dest = {};
74491de9 1004 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 1005 struct mlx5_flow_spec *spec;
f80be543
MB
1006 void *headers_c;
1007 void *headers_v;
3aa33572 1008 int err = 0;
f80be543
MB
1009 u8 *dmac_c;
1010 u8 *dmac_v;
3aa33572 1011
1b9a07ee 1012 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1013 if (!spec) {
3aa33572
OG
1014 err = -ENOMEM;
1015 goto out;
1016 }
1017
f80be543
MB
1018 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1019 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1020 outer_headers);
1021 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1022 outer_headers.dmac_47_16);
1023 dmac_c[0] = 0x01;
1024
3aa33572 1025 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1026 dest.vport.num = esw->manager_vport;
66958ed9 1027 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1028
39ac237c
PB
1029 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1030 spec, &flow_act, &dest, 1);
3aa33572
OG
1031 if (IS_ERR(flow_rule)) {
1032 err = PTR_ERR(flow_rule);
f80be543 1033 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1034 goto out;
1035 }
1036
f80be543
MB
1037 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1038
1039 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1040 outer_headers);
1041 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1042 outer_headers.dmac_47_16);
1043 dmac_v[0] = 0x01;
39ac237c
PB
1044 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1045 spec, &flow_act, &dest, 1);
f80be543
MB
1046 if (IS_ERR(flow_rule)) {
1047 err = PTR_ERR(flow_rule);
1048 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1049 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1050 goto out;
1051 }
1052
1053 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1054
3aa33572 1055out:
c5bb1730 1056 kvfree(spec);
3aa33572
OG
1057 return err;
1058}
1059
11b717d6
PB
1060struct mlx5_flow_handle *
1061esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1062{
1063 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1064 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1065 struct mlx5_flow_context *flow_context;
1066 struct mlx5_flow_handle *flow_rule;
1067 struct mlx5_flow_destination dest;
1068 struct mlx5_flow_spec *spec;
1069 void *misc;
1070
60acc105
PB
1071 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1072 return ERR_PTR(-EOPNOTSUPP);
1073
11b717d6
PB
1074 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1075 if (!spec)
1076 return ERR_PTR(-ENOMEM);
1077
1078 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1079 misc_parameters_2);
1080 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1081 ESW_CHAIN_TAG_METADATA_MASK);
1082 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1083 misc_parameters_2);
1084 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1085 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1086 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1087 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1088 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1089
1090 flow_context = &spec->flow_context;
1091 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1092 flow_context->flow_tag = tag;
1093 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1094 dest.ft = esw->offloads.ft_offloads;
1095
1096 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1097 kfree(spec);
1098
1099 if (IS_ERR(flow_rule))
1100 esw_warn(esw->dev,
1101 "Failed to create restore rule for tag: %d, err(%d)\n",
1102 tag, (int)PTR_ERR(flow_rule));
1103
1104 return flow_rule;
1105}
1106
1107u32
1108esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1109{
1110 return ESW_CHAIN_TAG_METADATA_MASK;
1111}
1112
1967ce6e 1113#define MAX_PF_SQ 256
cd3d07e7 1114#define MAX_SQ_NVPORTS 32
1967ce6e 1115
a5641cb5
JL
1116static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1117 u32 *flow_group_in)
1118{
1119 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1120 flow_group_in,
1121 match_criteria);
1122
1123 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1124 MLX5_SET(create_flow_group_in, flow_group_in,
1125 match_criteria_enable,
1126 MLX5_MATCH_MISC_PARAMETERS_2);
1127
0f0d3827
PB
1128 MLX5_SET(fte_match_param, match_criteria,
1129 misc_parameters_2.metadata_reg_c_0,
1130 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1131 } else {
1132 MLX5_SET(create_flow_group_in, flow_group_in,
1133 match_criteria_enable,
1134 MLX5_MATCH_MISC_PARAMETERS);
1135
1136 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1137 misc_parameters.source_port);
1138 }
1139}
1140
1967ce6e
OG
1141static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1142{
1143 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1144 struct mlx5_flow_table_attr ft_attr = {};
1145 struct mlx5_core_dev *dev = esw->dev;
1146 struct mlx5_flow_namespace *root_ns;
1147 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
1148 u32 flags = 0, *flow_group_in;
1149 int table_size, ix, err = 0;
1967ce6e
OG
1150 struct mlx5_flow_group *g;
1151 void *match_criteria;
f80be543 1152 u8 *dmac;
1967ce6e
OG
1153
1154 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1155
1b9a07ee 1156 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1157 if (!flow_group_in)
1158 return -ENOMEM;
1159
1160 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1161 if (!root_ns) {
1162 esw_warn(dev, "Failed to get FDB flow namespace\n");
1163 err = -EOPNOTSUPP;
1164 goto ns_err;
1165 }
8463daf1
MG
1166 esw->fdb_table.offloads.ns = root_ns;
1167 err = mlx5_flow_namespace_set_mode(root_ns,
1168 esw->dev->priv.steering->mode);
1169 if (err) {
1170 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1171 goto ns_err;
1172 }
1967ce6e 1173
cd7e4186
BW
1174 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1175 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1176
e52c2802
PB
1177 /* create the slow path fdb with encap set, so further table instances
1178 * can be created at run time while VFs are probed if the FW allows that.
1179 */
1180 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1181 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1182 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1183
1184 ft_attr.flags = flags;
b3ba5149
ES
1185 ft_attr.max_fte = table_size;
1186 ft_attr.prio = FDB_SLOW_PATH;
1187
1188 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1189 if (IS_ERR(fdb)) {
1190 err = PTR_ERR(fdb);
1191 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1192 goto slow_fdb_err;
1193 }
52fff327 1194 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1195
39ac237c
PB
1196 err = mlx5_esw_chains_create(esw);
1197 if (err) {
1198 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1199 goto fdb_chains_err;
e52c2802
PB
1200 }
1201
69697b6e 1202 /* create send-to-vport group */
69697b6e
OG
1203 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1204 MLX5_MATCH_MISC_PARAMETERS);
1205
1206 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1207
1208 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1209 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1210
cd3d07e7 1211 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1212 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1213 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1214
1215 g = mlx5_create_flow_group(fdb, flow_group_in);
1216 if (IS_ERR(g)) {
1217 err = PTR_ERR(g);
1218 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1219 goto send_vport_err;
1220 }
1221 esw->fdb_table.offloads.send_to_vport_grp = g;
1222
ac004b83
RD
1223 /* create peer esw miss group */
1224 memset(flow_group_in, 0, inlen);
ac004b83 1225
a5641cb5
JL
1226 esw_set_flow_group_source_port(esw, flow_group_in);
1227
1228 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1229 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1230 flow_group_in,
1231 match_criteria);
ac004b83 1232
a5641cb5
JL
1233 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1234 misc_parameters.source_eswitch_owner_vhca_id);
1235
1236 MLX5_SET(create_flow_group_in, flow_group_in,
1237 source_eswitch_owner_vhca_id_valid, 1);
1238 }
ac004b83 1239
ac004b83
RD
1240 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1241 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1242 ix + esw->total_vports - 1);
1243 ix += esw->total_vports;
1244
1245 g = mlx5_create_flow_group(fdb, flow_group_in);
1246 if (IS_ERR(g)) {
1247 err = PTR_ERR(g);
1248 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1249 goto peer_miss_err;
1250 }
1251 esw->fdb_table.offloads.peer_miss_grp = g;
1252
69697b6e
OG
1253 /* create miss group */
1254 memset(flow_group_in, 0, inlen);
f80be543
MB
1255 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1256 MLX5_MATCH_OUTER_HEADERS);
1257 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1258 match_criteria);
1259 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1260 outer_headers.dmac_47_16);
1261 dmac[0] = 0x01;
69697b6e
OG
1262
1263 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1264 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1265 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1266
1267 g = mlx5_create_flow_group(fdb, flow_group_in);
1268 if (IS_ERR(g)) {
1269 err = PTR_ERR(g);
1270 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1271 goto miss_err;
1272 }
1273 esw->fdb_table.offloads.miss_grp = g;
1274
3aa33572
OG
1275 err = esw_add_fdb_miss_rule(esw);
1276 if (err)
1277 goto miss_rule_err;
1278
e52c2802 1279 esw->nvports = nvports;
c88a026e 1280 kvfree(flow_group_in);
69697b6e
OG
1281 return 0;
1282
3aa33572
OG
1283miss_rule_err:
1284 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1285miss_err:
ac004b83
RD
1286 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1287peer_miss_err:
69697b6e
OG
1288 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1289send_vport_err:
39ac237c
PB
1290 mlx5_esw_chains_destroy(esw);
1291fdb_chains_err:
52fff327 1292 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1293slow_fdb_err:
8463daf1
MG
1294 /* Holds true only as long as DMFS is the default */
1295 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1296ns_err:
1297 kvfree(flow_group_in);
1298 return err;
1299}
1300
1967ce6e 1301static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1302{
e52c2802 1303 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1304 return;
1305
1967ce6e 1306 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1307 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1308 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1309 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1310 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1311 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1312
39ac237c 1313 mlx5_esw_chains_destroy(esw);
52fff327 1314 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1315 /* Holds true only as long as DMFS is the default */
1316 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1317 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1318}
c116c6ee 1319
cd7e4186 1320static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1321{
b3ba5149 1322 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1323 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1324 struct mlx5_flow_table *ft_offloads;
1325 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1326 int err = 0;
1327
1328 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1329 if (!ns) {
1330 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1331 return -EOPNOTSUPP;
c116c6ee
OG
1332 }
1333
cd7e4186 1334 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
11b717d6 1335 ft_attr.prio = 1;
b3ba5149
ES
1336
1337 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1338 if (IS_ERR(ft_offloads)) {
1339 err = PTR_ERR(ft_offloads);
1340 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1341 return err;
1342 }
1343
1344 esw->offloads.ft_offloads = ft_offloads;
1345 return 0;
1346}
1347
1348static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1349{
1350 struct mlx5_esw_offload *offloads = &esw->offloads;
1351
1352 mlx5_destroy_flow_table(offloads->ft_offloads);
1353}
fed9ce22 1354
cd7e4186 1355static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1356{
1357 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1358 struct mlx5_flow_group *g;
fed9ce22 1359 u32 *flow_group_in;
fed9ce22 1360 int err = 0;
fed9ce22 1361
cd7e4186 1362 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1363 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1364 if (!flow_group_in)
1365 return -ENOMEM;
1366
1367 /* create vport rx group */
a5641cb5 1368 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1369
1370 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1371 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1372
1373 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1374
1375 if (IS_ERR(g)) {
1376 err = PTR_ERR(g);
1377 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1378 goto out;
1379 }
1380
1381 esw->offloads.vport_rx_group = g;
1382out:
e574978a 1383 kvfree(flow_group_in);
fed9ce22
OG
1384 return err;
1385}
1386
1387static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1388{
1389 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1390}
1391
74491de9 1392struct mlx5_flow_handle *
02f3afd9 1393mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1394 struct mlx5_flow_destination *dest)
fed9ce22 1395{
66958ed9 1396 struct mlx5_flow_act flow_act = {0};
74491de9 1397 struct mlx5_flow_handle *flow_rule;
c5bb1730 1398 struct mlx5_flow_spec *spec;
fed9ce22
OG
1399 void *misc;
1400
1b9a07ee 1401 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1402 if (!spec) {
fed9ce22
OG
1403 flow_rule = ERR_PTR(-ENOMEM);
1404 goto out;
1405 }
1406
a5641cb5
JL
1407 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1408 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1409 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1410 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1411
a5641cb5 1412 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1413 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1414 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1415
a5641cb5
JL
1416 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1417 } else {
1418 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1419 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1420
1421 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1422 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1423
1424 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1425 }
fed9ce22 1426
66958ed9 1427 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1428 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1429 &flow_act, dest, 1);
fed9ce22
OG
1430 if (IS_ERR(flow_rule)) {
1431 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1432 goto out;
1433 }
1434
1435out:
c5bb1730 1436 kvfree(spec);
fed9ce22
OG
1437 return flow_rule;
1438}
feae9087 1439
bf3347c4 1440
cc617ced
PP
1441static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1442{
1443 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1444 struct mlx5_core_dev *dev = esw->dev;
1445 int vport;
1446
1447 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1448 return -EOPNOTSUPP;
1449
1450 if (esw->mode == MLX5_ESWITCH_NONE)
1451 return -EOPNOTSUPP;
1452
1453 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1454 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1455 mlx5_mode = MLX5_INLINE_MODE_NONE;
1456 goto out;
1457 case MLX5_CAP_INLINE_MODE_L2:
1458 mlx5_mode = MLX5_INLINE_MODE_L2;
1459 goto out;
1460 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1461 goto query_vports;
1462 }
1463
1464query_vports:
1465 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1466 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1467 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1468 if (prev_mlx5_mode != mlx5_mode)
1469 return -EINVAL;
1470 prev_mlx5_mode = mlx5_mode;
1471 }
1472
1473out:
1474 *mode = mlx5_mode;
1475 return 0;
e08a6832 1476}
bf3347c4 1477
11b717d6
PB
1478static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1479{
1480 struct mlx5_esw_offload *offloads = &esw->offloads;
1481
60acc105
PB
1482 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1483 return;
1484
6724e66b 1485 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1486 mlx5_destroy_flow_group(offloads->restore_group);
1487 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1488}
1489
1490static int esw_create_restore_table(struct mlx5_eswitch *esw)
1491{
d65dbedf 1492 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
1493 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1494 struct mlx5_flow_table_attr ft_attr = {};
1495 struct mlx5_core_dev *dev = esw->dev;
1496 struct mlx5_flow_namespace *ns;
6724e66b 1497 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1498 void *match_criteria, *misc;
1499 struct mlx5_flow_table *ft;
1500 struct mlx5_flow_group *g;
1501 u32 *flow_group_in;
1502 int err = 0;
1503
60acc105
PB
1504 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1505 return 0;
1506
11b717d6
PB
1507 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1508 if (!ns) {
1509 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1510 return -EOPNOTSUPP;
1511 }
1512
1513 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1514 if (!flow_group_in) {
1515 err = -ENOMEM;
1516 goto out_free;
1517 }
1518
1519 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1520 ft = mlx5_create_flow_table(ns, &ft_attr);
1521 if (IS_ERR(ft)) {
1522 err = PTR_ERR(ft);
1523 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1524 err);
1525 goto out_free;
1526 }
1527
1528 memset(flow_group_in, 0, inlen);
1529 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1530 match_criteria);
1531 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1532 misc_parameters_2);
1533
1534 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1535 ESW_CHAIN_TAG_METADATA_MASK);
1536 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1537 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1538 ft_attr.max_fte - 1);
1539 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1540 MLX5_MATCH_MISC_PARAMETERS_2);
1541 g = mlx5_create_flow_group(ft, flow_group_in);
1542 if (IS_ERR(g)) {
1543 err = PTR_ERR(g);
1544 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1545 err);
1546 goto err_group;
1547 }
1548
6724e66b
PB
1549 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1550 MLX5_SET(copy_action_in, modact, src_field,
1551 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1552 MLX5_SET(copy_action_in, modact, dst_field,
1553 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1554 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1555 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1556 modact);
1557 if (IS_ERR(mod_hdr)) {
e9864539 1558 err = PTR_ERR(mod_hdr);
6724e66b
PB
1559 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1560 err);
6724e66b
PB
1561 goto err_mod_hdr;
1562 }
1563
11b717d6
PB
1564 esw->offloads.ft_offloads_restore = ft;
1565 esw->offloads.restore_group = g;
6724e66b 1566 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 1567
c8508713
RD
1568 kvfree(flow_group_in);
1569
11b717d6
PB
1570 return 0;
1571
6724e66b
PB
1572err_mod_hdr:
1573 mlx5_destroy_flow_group(g);
11b717d6
PB
1574err_group:
1575 mlx5_destroy_flow_table(ft);
1576out_free:
1577 kvfree(flow_group_in);
1578
1579 return err;
cc617ced
PP
1580}
1581
db7ff19e
EB
1582static int esw_offloads_start(struct mlx5_eswitch *esw,
1583 struct netlink_ext_ack *extack)
c930a3ad 1584{
062f4bf4 1585 int err, err1;
c930a3ad 1586
f6455de0 1587 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1588 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1589 NL_SET_ERR_MSG_MOD(extack,
1590 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1591 return -EINVAL;
1592 }
1593
8e0aa4bc
PP
1594 mlx5_eswitch_disable_locked(esw, false);
1595 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1596 esw->dev->priv.sriov.num_vfs);
6c419ba8 1597 if (err) {
8c98ee77
EB
1598 NL_SET_ERR_MSG_MOD(extack,
1599 "Failed setting eswitch to offloads");
8e0aa4bc
PP
1600 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1601 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
1602 if (err1) {
1603 NL_SET_ERR_MSG_MOD(extack,
1604 "Failed setting eswitch back to legacy");
1605 }
6c419ba8 1606 }
bffaa916
RD
1607 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1608 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1609 &esw->offloads.inline_mode)) {
1610 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1611 NL_SET_ERR_MSG_MOD(extack,
1612 "Inline mode is different between vports");
bffaa916
RD
1613 }
1614 }
c930a3ad
OG
1615 return err;
1616}
1617
e8d31c4d
MB
1618void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1619{
1620 kfree(esw->offloads.vport_reps);
1621}
1622
1623int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1624{
2752b823 1625 int total_vports = esw->total_vports;
e8d31c4d 1626 struct mlx5_eswitch_rep *rep;
d6518db2 1627 int vport_index;
ef2e4094 1628 u8 rep_type;
e8d31c4d 1629
2aca1787 1630 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1631 sizeof(struct mlx5_eswitch_rep),
1632 GFP_KERNEL);
1633 if (!esw->offloads.vport_reps)
1634 return -ENOMEM;
1635
d6518db2
BW
1636 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1637 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1638 rep->vport_index = vport_index;
f121e0ea
BW
1639
1640 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1641 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1642 REP_UNREGISTERED);
e8d31c4d
MB
1643 }
1644
e8d31c4d
MB
1645 return 0;
1646}
1647
c9b99abc
BW
1648static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1649 struct mlx5_eswitch_rep *rep, u8 rep_type)
1650{
8693115a 1651 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1652 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1653 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1654}
1655
4110fc59 1656static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1657{
1658 struct mlx5_eswitch_rep *rep;
4110fc59
BW
1659 int i;
1660
1661 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1662 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 1663
81cd229c
BW
1664 if (mlx5_ecpf_vport_exists(esw->dev)) {
1665 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1666 __esw_offloads_unload_rep(esw, rep, rep_type);
1667 }
1668
1669 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1670 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1671 __esw_offloads_unload_rep(esw, rep, rep_type);
1672 }
1673
879c8f84 1674 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1675 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1676}
1677
c2d7712c 1678int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 1679{
c2d7712c
BW
1680 struct mlx5_eswitch_rep *rep;
1681 int rep_type;
a4b97ab4
MB
1682 int err;
1683
c2d7712c
BW
1684 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1685 return 0;
a4b97ab4 1686
c2d7712c
BW
1687 rep = mlx5_eswitch_get_rep(esw, vport_num);
1688 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1689 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1690 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1691 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1692 if (err)
1693 goto err_reps;
1694 }
1695
1696 return 0;
a4b97ab4
MB
1697
1698err_reps:
c2d7712c
BW
1699 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1700 for (--rep_type; rep_type >= 0; rep_type--)
1701 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1702 return err;
1703}
1704
c2d7712c
BW
1705void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1706{
1707 struct mlx5_eswitch_rep *rep;
1708 int rep_type;
1709
1710 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1711 return;
1712
1713 rep = mlx5_eswitch_get_rep(esw, vport_num);
1714 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1715 __esw_offloads_unload_rep(esw, rep, rep_type);
1716}
1717
ac004b83
RD
1718#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1719#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1720
1721static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1722 struct mlx5_eswitch *peer_esw)
1723{
1724 int err;
1725
1726 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1727 if (err)
1728 return err;
1729
1730 return 0;
1731}
1732
1733static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1734{
d956873f 1735#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 1736 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 1737#endif
ac004b83
RD
1738 esw_del_fdb_peer_miss_rules(esw);
1739}
1740
8463daf1
MG
1741static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1742 struct mlx5_eswitch *peer_esw,
1743 bool pair)
1744{
1745 struct mlx5_flow_root_namespace *peer_ns;
1746 struct mlx5_flow_root_namespace *ns;
1747 int err;
1748
1749 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1750 ns = esw->dev->priv.steering->fdb_root_ns;
1751
1752 if (pair) {
1753 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1754 if (err)
1755 return err;
1756
e53e6655 1757 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1758 if (err) {
1759 mlx5_flow_namespace_set_peer(ns, NULL);
1760 return err;
1761 }
1762 } else {
1763 mlx5_flow_namespace_set_peer(ns, NULL);
1764 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1765 }
1766
1767 return 0;
1768}
1769
ac004b83
RD
1770static int mlx5_esw_offloads_devcom_event(int event,
1771 void *my_data,
1772 void *event_data)
1773{
1774 struct mlx5_eswitch *esw = my_data;
ac004b83 1775 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1776 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1777 int err;
1778
1779 switch (event) {
1780 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1781 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1782 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1783 break;
1784
8463daf1 1785 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1786 if (err)
1787 goto err_out;
8463daf1
MG
1788 err = mlx5_esw_offloads_pair(esw, peer_esw);
1789 if (err)
1790 goto err_peer;
ac004b83
RD
1791
1792 err = mlx5_esw_offloads_pair(peer_esw, esw);
1793 if (err)
1794 goto err_pair;
1795
1796 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1797 break;
1798
1799 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1800 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1801 break;
1802
1803 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1804 mlx5_esw_offloads_unpair(peer_esw);
1805 mlx5_esw_offloads_unpair(esw);
8463daf1 1806 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1807 break;
1808 }
1809
1810 return 0;
1811
1812err_pair:
1813 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1814err_peer:
1815 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1816err_out:
1817 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1818 event, err);
1819 return err;
1820}
1821
1822static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1823{
1824 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1825
04de7dda
RD
1826 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1827 mutex_init(&esw->offloads.peer_mutex);
1828
ac004b83
RD
1829 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1830 return;
1831
1832 mlx5_devcom_register_component(devcom,
1833 MLX5_DEVCOM_ESW_OFFLOADS,
1834 mlx5_esw_offloads_devcom_event,
1835 esw);
1836
1837 mlx5_devcom_send_event(devcom,
1838 MLX5_DEVCOM_ESW_OFFLOADS,
1839 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1840}
1841
1842static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1843{
1844 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1845
1846 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1847 return;
1848
1849 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1850 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1851
1852 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1853}
1854
18486737
EB
1855static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1856 struct mlx5_vport *vport)
1857{
18486737
EB
1858 struct mlx5_flow_act flow_act = {0};
1859 struct mlx5_flow_spec *spec;
1860 int err = 0;
1861
1862 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1863 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1864 * required, allow
18486737
EB
1865 * Unmatched traffic is allowed by default
1866 */
18486737 1867 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
b7826076
PP
1868 if (!spec)
1869 return -ENOMEM;
18486737
EB
1870
1871 /* Untagged packets - push prio tag VLAN, allow */
1872 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1873 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1874 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1875 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1876 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1877 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1878 flow_act.vlan[0].vid = 0;
1879 flow_act.vlan[0].prio = 0;
7445cfb1 1880
d68316b5 1881 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1882 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1883 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1884 }
1885
18486737
EB
1886 vport->ingress.allow_rule =
1887 mlx5_add_flow_rules(vport->ingress.acl, spec,
1888 &flow_act, NULL, 0);
1889 if (IS_ERR(vport->ingress.allow_rule)) {
1890 err = PTR_ERR(vport->ingress.allow_rule);
1891 esw_warn(esw->dev,
1892 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1893 vport->vport, err);
1894 vport->ingress.allow_rule = NULL;
18486737
EB
1895 }
1896
18486737 1897 kvfree(spec);
18486737
EB
1898 return err;
1899}
1900
7445cfb1
JL
1901static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1902 struct mlx5_vport *vport)
1903{
d65dbedf 1904 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
7445cfb1 1905 struct mlx5_flow_act flow_act = {};
7445cfb1 1906 int err = 0;
0f0d3827
PB
1907 u32 key;
1908
1909 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
1910 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
7445cfb1
JL
1911
1912 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
0f0d3827
PB
1913 MLX5_SET(set_action_in, action, field,
1914 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1915 MLX5_SET(set_action_in, action, data, key);
1916 MLX5_SET(set_action_in, action, offset,
1917 ESW_SOURCE_PORT_METADATA_OFFSET);
1918 MLX5_SET(set_action_in, action, length,
1919 ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1 1920
d68316b5 1921 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1922 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1923 1, action);
d68316b5
PP
1924 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1925 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1926 esw_warn(esw->dev,
1927 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1928 vport->vport, err);
1929 return err;
1930 }
1931
1932 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1933 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1934 vport->ingress.offloads.modify_metadata_rule =
1935 mlx5_add_flow_rules(vport->ingress.acl,
5c2aa8ae 1936 NULL, &flow_act, NULL, 0);
d68316b5
PP
1937 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1938 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1939 esw_warn(esw->dev,
1940 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1941 vport->vport, err);
b7826076 1942 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
d68316b5 1943 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1 1944 }
7445cfb1
JL
1945 return err;
1946}
1947
a962d7a6
PP
1948static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1949 struct mlx5_vport *vport)
7445cfb1 1950{
d68316b5
PP
1951 if (vport->ingress.offloads.modify_metadata_rule) {
1952 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1953 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1954
d68316b5 1955 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1956 }
1957}
1958
10652f39
PP
1959static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1960 struct mlx5_vport *vport)
18486737 1961{
10652f39
PP
1962 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1963 struct mlx5_flow_group *g;
b7826076 1964 void *match_criteria;
10652f39 1965 u32 *flow_group_in;
b7826076 1966 u32 flow_index = 0;
10652f39 1967 int ret = 0;
18486737 1968
10652f39
PP
1969 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1970 if (!flow_group_in)
1971 return -ENOMEM;
18486737 1972
b7826076
PP
1973 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1974 /* This group is to hold FTE to match untagged packets when prio_tag
1975 * is enabled.
1976 */
1977 memset(flow_group_in, 0, inlen);
18486737 1978
b7826076
PP
1979 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1980 flow_group_in, match_criteria);
1981 MLX5_SET(create_flow_group_in, flow_group_in,
1982 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1983 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1984 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1985 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1986
1987 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1988 if (IS_ERR(g)) {
1989 ret = PTR_ERR(g);
1990 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1991 vport->vport, ret);
1992 goto prio_tag_err;
1993 }
1994 vport->ingress.offloads.metadata_prio_tag_grp = g;
1995 flow_index++;
1996 }
1997
1998 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1999 /* This group holds an FTE with no matches for add metadata for
2000 * tagged packets, if prio-tag is enabled (as a fallthrough),
2001 * or all traffic in case prio-tag is disabled.
2002 */
2003 memset(flow_group_in, 0, inlen);
2004 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
2005 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
2006
2007 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
2008 if (IS_ERR(g)) {
2009 ret = PTR_ERR(g);
2010 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
2011 vport->vport, ret);
2012 goto metadata_err;
2013 }
2014 vport->ingress.offloads.metadata_allmatch_grp = g;
2015 }
2016
2017 kvfree(flow_group_in);
2018 return 0;
2019
2020metadata_err:
2021 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
2022 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2023 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
18486737 2024 }
b7826076 2025prio_tag_err:
10652f39
PP
2026 kvfree(flow_group_in);
2027 return ret;
2028}
18486737 2029
10652f39
PP
2030static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
2031{
b7826076
PP
2032 if (vport->ingress.offloads.metadata_allmatch_grp) {
2033 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
2034 vport->ingress.offloads.metadata_allmatch_grp = NULL;
2035 }
2036
2037 if (vport->ingress.offloads.metadata_prio_tag_grp) {
2038 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2039 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
10652f39 2040 }
18486737
EB
2041}
2042
b1a3380a
VP
2043static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
2044 struct mlx5_vport *vport)
18486737 2045{
b7826076 2046 int num_ftes = 0;
18486737
EB
2047 int err;
2048
7445cfb1 2049 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
b7826076 2050 !esw_check_ingress_prio_tag_enabled(esw, vport))
7445cfb1
JL
2051 return 0;
2052
2053 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2054
2055 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2056 num_ftes++;
2057 if (esw_check_ingress_prio_tag_enabled(esw, vport))
2058 num_ftes++;
2059
2060 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
7445cfb1
JL
2061 if (err) {
2062 esw_warn(esw->dev,
2063 "failed to enable ingress acl (%d) on vport[%d]\n",
2064 err, vport->vport);
2065 return err;
2066 }
2067
10652f39
PP
2068 err = esw_vport_create_ingress_acl_group(esw, vport);
2069 if (err)
2070 goto group_err;
2071
7445cfb1
JL
2072 esw_debug(esw->dev,
2073 "vport[%d] configure ingress rules\n", vport->vport);
2074
2075 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2076 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
2077 if (err)
10652f39 2078 goto metadata_err;
7445cfb1
JL
2079 }
2080
b7826076 2081 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
786ef904 2082 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 2083 if (err)
10652f39 2084 goto prio_tag_err;
7445cfb1 2085 }
10652f39 2086 return 0;
7445cfb1 2087
10652f39
PP
2088prio_tag_err:
2089 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2090metadata_err:
10652f39
PP
2091 esw_vport_destroy_ingress_acl_group(vport);
2092group_err:
2093 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1
JL
2094 return err;
2095}
2096
92ab1eb3
JL
2097static bool
2098esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2099{
2100 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2101 return false;
2102
2103 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2104 MLX5_FDB_TO_VPORT_REG_C_0))
2105 return false;
2106
2107 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2108 return false;
2109
2110 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2111 mlx5_ecpf_vport_exists(esw->dev))
2112 return false;
2113
2114 return true;
2115}
2116
1e62e222
MD
2117static bool
2118esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
2119{
2120 return mlx5_core_mp_enabled(esw->dev);
2121}
2122
2123static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
2124{
2125 return esw_check_vport_match_metadata_mandatory(esw) &&
2126 esw_check_vport_match_metadata_supported(esw);
2127}
2128
748da30b 2129int
89a0f1fb
PP
2130esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2131 struct mlx5_vport *vport)
7445cfb1 2132{
7445cfb1
JL
2133 int err;
2134
89a0f1fb
PP
2135 err = esw_vport_ingress_config(esw, vport);
2136 if (err)
2137 return err;
7445cfb1 2138
89a0f1fb 2139 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
ea651a86 2140 err = esw_acl_egress_ofld_setup(esw, vport);
a962d7a6 2141 if (err) {
10652f39 2142 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2143 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2144 esw_vport_destroy_ingress_acl_group(vport);
10652f39 2145 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1 2146 }
18486737 2147 }
89a0f1fb
PP
2148 return err;
2149}
18486737 2150
748da30b 2151void
89a0f1fb
PP
2152esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2153 struct mlx5_vport *vport)
2154{
ea651a86 2155 esw_acl_egress_ofld_cleanup(vport);
10652f39 2156 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076 2157 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
2158 esw_vport_destroy_ingress_acl_group(vport);
2159 esw_vport_destroy_ingress_acl_table(vport);
89a0f1fb 2160}
7445cfb1 2161
748da30b 2162static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2163{
2164 struct mlx5_vport *vport;
7445cfb1 2165 int err;
18486737 2166
1e62e222 2167 if (esw_use_vport_metadata(esw))
92ab1eb3 2168 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 2169
748da30b
VP
2170 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2171 err = esw_vport_create_offloads_acl_tables(esw, vport);
2172 if (err)
2173 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2174 return err;
2175}
2176
748da30b 2177static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2178{
786ef904 2179 struct mlx5_vport *vport;
7445cfb1 2180
748da30b
VP
2181 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2182 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 2183 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2184}
2185
062f4bf4 2186static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2187{
062f4bf4
BW
2188 int num_vfs = esw->esw_funcs.num_vfs;
2189 int total_vports;
6ed1803a
MB
2190 int err;
2191
062f4bf4
BW
2192 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2193 total_vports = esw->total_vports;
2194 else
2195 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2196
5c1d260e 2197 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
2198 mutex_init(&esw->fdb_table.offloads.vports.lock);
2199 hash_init(esw->fdb_table.offloads.vports.table);
e52c2802 2200
748da30b 2201 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 2202 if (err)
f8d1edda 2203 goto create_acl_err;
18486737 2204
11b717d6 2205 err = esw_create_offloads_table(esw, total_vports);
c930a3ad 2206 if (err)
11b717d6 2207 goto create_offloads_err;
c930a3ad 2208
11b717d6 2209 err = esw_create_restore_table(esw);
c930a3ad 2210 if (err)
11b717d6
PB
2211 goto create_restore_err;
2212
2213 err = esw_create_offloads_fdb_tables(esw, total_vports);
2214 if (err)
2215 goto create_fdb_err;
c930a3ad 2216
062f4bf4 2217 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2218 if (err)
2219 goto create_fg_err;
2220
2221 return 0;
2222
2223create_fg_err:
1967ce6e 2224 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2225create_fdb_err:
11b717d6
PB
2226 esw_destroy_restore_table(esw);
2227create_restore_err:
2228 esw_destroy_offloads_table(esw);
2229create_offloads_err:
748da30b 2230 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda
PP
2231create_acl_err:
2232 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
2233 return err;
2234}
2235
eca8cc38
BW
2236static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2237{
2238 esw_destroy_vport_rx_group(esw);
eca8cc38 2239 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2240 esw_destroy_restore_table(esw);
2241 esw_destroy_offloads_table(esw);
748da30b 2242 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 2243 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
2244}
2245
7e736f9a
PP
2246static void
2247esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2248{
5ccf2770 2249 bool host_pf_disabled;
7e736f9a 2250 u16 new_num_vfs;
a3888f33 2251
7e736f9a
PP
2252 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2253 host_params_context.host_num_of_vfs);
5ccf2770
BW
2254 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2255 host_params_context.host_pf_disabled);
a3888f33 2256
7e736f9a
PP
2257 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2258 return;
a3888f33
BW
2259
2260 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2261 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2262 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2263 } else {
7e736f9a 2264 int err;
a3888f33 2265
23bb50cf
BW
2266 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2267 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2268 if (err)
7e736f9a 2269 return;
a3888f33 2270 }
7e736f9a 2271 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2272}
2273
7e736f9a 2274static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2275{
7e736f9a
PP
2276 struct mlx5_host_work *host_work;
2277 struct mlx5_eswitch *esw;
dd28087c 2278 const u32 *out;
ac35dcd6 2279
7e736f9a
PP
2280 host_work = container_of(work, struct mlx5_host_work, work);
2281 esw = host_work->esw;
a3888f33 2282
dd28087c
PP
2283 out = mlx5_esw_query_functions(esw->dev);
2284 if (IS_ERR(out))
7e736f9a 2285 goto out;
a3888f33 2286
7e736f9a 2287 esw_vfs_changed_event_handler(esw, out);
dd28087c 2288 kvfree(out);
a3888f33 2289out:
ac35dcd6
VP
2290 kfree(host_work);
2291}
2292
16fff98a 2293int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2294{
cd56f929 2295 struct mlx5_esw_functions *esw_funcs;
a3888f33 2296 struct mlx5_host_work *host_work;
a3888f33
BW
2297 struct mlx5_eswitch *esw;
2298
2299 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2300 if (!host_work)
2301 return NOTIFY_DONE;
2302
cd56f929
VP
2303 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2304 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2305
2306 host_work->esw = esw;
2307
062f4bf4 2308 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2309 queue_work(esw->work_queue, &host_work->work);
2310
2311 return NOTIFY_OK;
2312}
2313
5896b972 2314int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2315{
3b83b6c2
DL
2316 struct mlx5_vport *vport;
2317 int err, i;
eca8cc38 2318
9a64144d
MG
2319 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2320 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2321 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2322 else
2323 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2324
2bb72e7e 2325 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2326 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2327
332bd3a5
PP
2328 err = esw_set_passing_vport_metadata(esw, true);
2329 if (err)
2330 goto err_vport_metadata;
c1286050 2331
7983a675
PB
2332 err = esw_offloads_steering_init(esw);
2333 if (err)
2334 goto err_steering_init;
2335
3b83b6c2
DL
2336 /* Representor will control the vport link state */
2337 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2338 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2339
c2d7712c
BW
2340 /* Uplink vport rep must load first. */
2341 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2342 if (err)
c2d7712c 2343 goto err_uplink;
c1286050 2344
c2d7712c 2345 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2346 if (err)
c2d7712c 2347 goto err_vports;
eca8cc38
BW
2348
2349 esw_offloads_devcom_init(esw);
a3888f33 2350
eca8cc38
BW
2351 return 0;
2352
925a6acc 2353err_vports:
c2d7712c
BW
2354 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2355err_uplink:
7983a675 2356 esw_offloads_steering_cleanup(esw);
79949985
PP
2357err_steering_init:
2358 esw_set_passing_vport_metadata(esw, false);
7983a675 2359err_vport_metadata:
8463daf1 2360 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2361 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2362 return err;
2363}
2364
db7ff19e
EB
2365static int esw_offloads_stop(struct mlx5_eswitch *esw,
2366 struct netlink_ext_ack *extack)
c930a3ad 2367{
062f4bf4 2368 int err, err1;
c930a3ad 2369
8e0aa4bc
PP
2370 mlx5_eswitch_disable_locked(esw, false);
2371 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2372 MLX5_ESWITCH_IGNORE_NUM_VFS);
6c419ba8 2373 if (err) {
8c98ee77 2374 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
8e0aa4bc
PP
2375 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2376 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2377 if (err1) {
2378 NL_SET_ERR_MSG_MOD(extack,
2379 "Failed setting eswitch back to offloads");
2380 }
6c419ba8 2381 }
c930a3ad
OG
2382
2383 return err;
2384}
2385
5896b972 2386void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2387{
ac004b83 2388 esw_offloads_devcom_cleanup(esw);
5896b972 2389 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2390 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2391 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2392 esw_offloads_steering_cleanup(esw);
8463daf1 2393 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2394 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2395 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2396}
2397
ef78618b 2398static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2399{
2400 switch (mode) {
2401 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2402 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2403 break;
2404 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2405 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2406 break;
2407 default:
2408 return -EINVAL;
2409 }
2410
2411 return 0;
2412}
2413
ef78618b
OG
2414static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2415{
2416 switch (mlx5_mode) {
f6455de0 2417 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2418 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2419 break;
f6455de0 2420 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2421 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2422 break;
2423 default:
2424 return -EINVAL;
2425 }
2426
2427 return 0;
2428}
2429
bffaa916
RD
2430static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2431{
2432 switch (mode) {
2433 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2434 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2435 break;
2436 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2437 *mlx5_mode = MLX5_INLINE_MODE_L2;
2438 break;
2439 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2440 *mlx5_mode = MLX5_INLINE_MODE_IP;
2441 break;
2442 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2443 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2444 break;
2445 default:
2446 return -EINVAL;
2447 }
2448
2449 return 0;
2450}
2451
2452static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2453{
2454 switch (mlx5_mode) {
2455 case MLX5_INLINE_MODE_NONE:
2456 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2457 break;
2458 case MLX5_INLINE_MODE_L2:
2459 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2460 break;
2461 case MLX5_INLINE_MODE_IP:
2462 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2463 break;
2464 case MLX5_INLINE_MODE_TCP_UDP:
2465 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2466 break;
2467 default:
2468 return -EINVAL;
2469 }
2470
2471 return 0;
2472}
2473
0e6fa491 2474static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
feae9087 2475{
9d1cef19
OG
2476 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2477 return -EOPNOTSUPP;
c930a3ad 2478
733d3e54
OG
2479 if(!MLX5_ESWITCH_MANAGER(dev))
2480 return -EPERM;
c930a3ad 2481
9d1cef19
OG
2482 return 0;
2483}
2484
ae24432c
PP
2485static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2486{
2487 /* devlink commands in NONE eswitch mode are currently supported only
2488 * on ECPF.
2489 */
2490 return (esw->mode == MLX5_ESWITCH_NONE &&
2491 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2492}
2493
db7ff19e
EB
2494int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2495 struct netlink_ext_ack *extack)
9d1cef19
OG
2496{
2497 struct mlx5_core_dev *dev = devlink_priv(devlink);
8e0aa4bc 2498 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19
OG
2499 u16 cur_mlx5_mode, mlx5_mode = 0;
2500 int err;
2501
0e6fa491 2502 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2503 if (err)
2504 return err;
2505
ef78618b 2506 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2507 return -EINVAL;
2508
8e0aa4bc
PP
2509 mutex_lock(&esw->mode_lock);
2510 err = eswitch_devlink_esw_mode_check(esw);
2511 if (err)
2512 goto unlock;
2513
2514 cur_mlx5_mode = esw->mode;
2515
c930a3ad 2516 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 2517 goto unlock;
c930a3ad
OG
2518
2519 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
8e0aa4bc 2520 err = esw_offloads_start(esw, extack);
c930a3ad 2521 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
8e0aa4bc 2522 err = esw_offloads_stop(esw, extack);
c930a3ad 2523 else
8e0aa4bc
PP
2524 err = -EINVAL;
2525
2526unlock:
2527 mutex_unlock(&esw->mode_lock);
2528 return err;
feae9087
OG
2529}
2530
2531int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2532{
9d1cef19 2533 struct mlx5_core_dev *dev = devlink_priv(devlink);
8e0aa4bc 2534 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2535 int err;
c930a3ad 2536
0e6fa491 2537 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2538 if (err)
2539 return err;
c930a3ad 2540
8e0aa4bc 2541 mutex_lock(&esw->mode_lock);
ae24432c
PP
2542 err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
2543 if (err)
8e0aa4bc 2544 goto unlock;
ae24432c 2545
8e0aa4bc
PP
2546 err = esw_mode_to_devlink(esw->mode, mode);
2547unlock:
2548 mutex_unlock(&esw->mode_lock);
2549 return err;
feae9087 2550}
127ea380 2551
db7ff19e
EB
2552int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2553 struct netlink_ext_ack *extack)
bffaa916
RD
2554{
2555 struct mlx5_core_dev *dev = devlink_priv(devlink);
2556 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2557 int err, vport, num_vport;
bffaa916
RD
2558 u8 mlx5_mode;
2559
0e6fa491 2560 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2561 if (err)
2562 return err;
bffaa916 2563
8e0aa4bc 2564 mutex_lock(&esw->mode_lock);
ae24432c
PP
2565 err = eswitch_devlink_esw_mode_check(esw);
2566 if (err)
8e0aa4bc 2567 goto out;
ae24432c 2568
c415f704
OG
2569 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2570 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2571 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
8e0aa4bc 2572 goto out;
c415f704
OG
2573 /* fall through */
2574 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2575 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
2576 err = -EOPNOTSUPP;
2577 goto out;
c415f704
OG
2578 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2579 break;
2580 }
bffaa916 2581
525e84be 2582 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2583 NL_SET_ERR_MSG_MOD(extack,
2584 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
2585 err = -EOPNOTSUPP;
2586 goto out;
375f51e2
RD
2587 }
2588
bffaa916
RD
2589 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2590 if (err)
2591 goto out;
2592
411ec9e0 2593 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2594 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2595 if (err) {
8c98ee77
EB
2596 NL_SET_ERR_MSG_MOD(extack,
2597 "Failed to set min inline on vport");
bffaa916
RD
2598 goto revert_inline_mode;
2599 }
2600 }
2601
2602 esw->offloads.inline_mode = mlx5_mode;
8e0aa4bc 2603 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2604 return 0;
2605
2606revert_inline_mode:
db68cc56 2607 num_vport = --vport;
411ec9e0 2608 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2609 mlx5_modify_nic_vport_min_inline(dev,
2610 vport,
2611 esw->offloads.inline_mode);
2612out:
8e0aa4bc 2613 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2614 return err;
2615}
2616
2617int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2618{
2619 struct mlx5_core_dev *dev = devlink_priv(devlink);
2620 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2621 int err;
bffaa916 2622
0e6fa491 2623 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2624 if (err)
2625 return err;
bffaa916 2626
8e0aa4bc 2627 mutex_lock(&esw->mode_lock);
ae24432c
PP
2628 err = eswitch_devlink_esw_mode_check(esw);
2629 if (err)
8e0aa4bc 2630 goto unlock;
ae24432c 2631
8e0aa4bc
PP
2632 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2633unlock:
2634 mutex_unlock(&esw->mode_lock);
2635 return err;
bffaa916
RD
2636}
2637
98fdbea5
LR
2638int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2639 enum devlink_eswitch_encap_mode encap,
db7ff19e 2640 struct netlink_ext_ack *extack)
7768d197
RD
2641{
2642 struct mlx5_core_dev *dev = devlink_priv(devlink);
2643 struct mlx5_eswitch *esw = dev->priv.eswitch;
2644 int err;
2645
0e6fa491 2646 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2647 if (err)
2648 return err;
7768d197 2649
8e0aa4bc 2650 mutex_lock(&esw->mode_lock);
ae24432c
PP
2651 err = eswitch_devlink_esw_mode_check(esw);
2652 if (err)
8e0aa4bc 2653 goto unlock;
ae24432c 2654
7768d197 2655 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2656 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
2657 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2658 err = -EOPNOTSUPP;
2659 goto unlock;
2660 }
7768d197 2661
8e0aa4bc
PP
2662 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2663 err = -EOPNOTSUPP;
2664 goto unlock;
2665 }
7768d197 2666
f6455de0 2667 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 2668 esw->offloads.encap = encap;
8e0aa4bc 2669 goto unlock;
7768d197
RD
2670 }
2671
2672 if (esw->offloads.encap == encap)
8e0aa4bc 2673 goto unlock;
7768d197 2674
525e84be 2675 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2676 NL_SET_ERR_MSG_MOD(extack,
2677 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
2678 err = -EOPNOTSUPP;
2679 goto unlock;
7768d197
RD
2680 }
2681
e52c2802 2682 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2683
2684 esw->offloads.encap = encap;
e52c2802
PB
2685
2686 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2687
7768d197 2688 if (err) {
8c98ee77
EB
2689 NL_SET_ERR_MSG_MOD(extack,
2690 "Failed re-creating fast FDB table");
7768d197 2691 esw->offloads.encap = !encap;
e52c2802 2692 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2693 }
e52c2802 2694
8e0aa4bc
PP
2695unlock:
2696 mutex_unlock(&esw->mode_lock);
7768d197
RD
2697 return err;
2698}
2699
98fdbea5
LR
2700int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2701 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2702{
2703 struct mlx5_core_dev *dev = devlink_priv(devlink);
2704 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2705 int err;
7768d197 2706
0e6fa491 2707 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2708 if (err)
2709 return err;
7768d197 2710
8e0aa4bc 2711 mutex_lock(&esw->mode_lock);
ae24432c
PP
2712 err = eswitch_devlink_esw_mode_check(esw);
2713 if (err)
8e0aa4bc 2714 goto unlock;
ae24432c 2715
7768d197 2716 *encap = esw->offloads.encap;
8e0aa4bc
PP
2717unlock:
2718 mutex_unlock(&esw->mode_lock);
7768d197
RD
2719 return 0;
2720}
2721
c2d7712c
BW
2722static bool
2723mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2724{
2725 /* Currently, only ECPF based device has representor for host PF. */
2726 if (vport_num == MLX5_VPORT_PF &&
2727 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2728 return false;
2729
2730 if (vport_num == MLX5_VPORT_ECPF &&
2731 !mlx5_ecpf_vport_exists(esw->dev))
2732 return false;
2733
2734 return true;
2735}
2736
f8e8fa02 2737void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2738 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2739 u8 rep_type)
127ea380 2740{
8693115a 2741 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2742 struct mlx5_eswitch_rep *rep;
2743 int i;
9deb2241 2744
8693115a 2745 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2746 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
2747 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2748 rep_data = &rep->rep_data[rep_type];
2749 atomic_set(&rep_data->state, REP_REGISTERED);
2750 }
f8e8fa02 2751 }
127ea380 2752}
f8e8fa02 2753EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2754
f8e8fa02 2755void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2756{
cb67b832 2757 struct mlx5_eswitch_rep *rep;
f8e8fa02 2758 int i;
cb67b832 2759
f6455de0 2760 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2761 __unload_reps_all_vport(esw, rep_type);
127ea380 2762
f8e8fa02 2763 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2764 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2765}
f8e8fa02 2766EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2767
a4b97ab4 2768void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2769{
726293f1
HHZ
2770 struct mlx5_eswitch_rep *rep;
2771
879c8f84 2772 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2773 return rep->rep_data[rep_type].priv;
726293f1 2774}
22215908
MB
2775
2776void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2777 u16 vport,
22215908
MB
2778 u8 rep_type)
2779{
22215908
MB
2780 struct mlx5_eswitch_rep *rep;
2781
879c8f84 2782 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2783
8693115a
PP
2784 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2785 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2786 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2787 return NULL;
2788}
57cbd893 2789EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2790
2791void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2792{
879c8f84 2793 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2794}
57cbd893
MB
2795EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2796
2797struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2798 u16 vport)
57cbd893 2799{
879c8f84 2800 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2801}
2802EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2803
2804bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2805{
2806 return vport_num >= MLX5_VPORT_FIRST_VF &&
2807 vport_num <= esw->dev->priv.sriov.max_vfs;
2808}
7445cfb1 2809
5b7cb745
PB
2810bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2811{
2812 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2813}
2814EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2815
7445cfb1
JL
2816bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2817{
2818 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2819}
2820EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2821
0f0d3827 2822u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
2823 u16 vport_num)
2824{
0f0d3827
PB
2825 u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
2826 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
2827 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2828 u32 val;
2829
2830 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2831 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
2832
2833 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2834 vhca_id &= vhca_id_mask;
2835
2836 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2837 * don't overlap with VF numbers, and themselves, after trimming.
2838 */
2839 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
2840 vport_num_mask - 1);
2841 WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
2842 vport_num_mask - 1);
2843 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
2844 (MLX5_VPORT_ECPF & vport_num_mask));
2845
2846 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2847 * overlap with pf and ecpf.
2848 */
2849 if (vport_num != MLX5_VPORT_UPLINK &&
2850 vport_num != MLX5_VPORT_ECPF)
2851 WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
2852
2853 /* We can now trim vport_num to ESW_VPORT_BITS */
2854 vport_num &= vport_num_mask;
2855
2856 val = (vhca_id << ESW_VPORT_BITS) | vport_num;
2857 return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
2858}
2859EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);