]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: E-switch, Reuse total_vports and avoid duplicate nvports
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
133dcfc5 34#include <linux/idr.h>
69697b6e
OG
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/fs.h>
39#include "mlx5_core.h"
40#include "eswitch.h"
ea651a86 41#include "esw/acl/ofld.h"
49964352 42#include "esw/chains.h"
80f09dfc 43#include "rdma.h"
e52c2802
PB
44#include "en.h"
45#include "fs_core.h"
ac004b83 46#include "lib/devcom.h"
a3888f33 47#include "lib/eq.h"
69697b6e 48
cd7e4186
BW
49/* There are two match-all miss flows, one for unicast dst mac and
50 * one for multicast.
51 */
52#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
53#define UPLINK_REP_INDEX 0
54
96e32687
EC
55/* Per vport tables */
56
57#define MLX5_ESW_VPORT_TABLE_SIZE 128
58
59/* This struct is used as a key to the hash table and we need it to be packed
60 * so hash result is consistent
61 */
62struct mlx5_vport_key {
63 u32 chain;
64 u16 prio;
65 u16 vport;
66 u16 vhca_id;
67} __packed;
68
69struct mlx5_vport_table {
70 struct hlist_node hlist;
71 struct mlx5_flow_table *fdb;
72 u32 num_rules;
73 struct mlx5_vport_key key;
74};
75
87dac697
JL
76#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
77
96e32687
EC
78static struct mlx5_flow_table *
79esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
80{
81 struct mlx5_flow_table_attr ft_attr = {};
82 struct mlx5_flow_table *fdb;
83
87dac697 84 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
85 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
86 ft_attr.prio = FDB_PER_VPORT;
87 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
88 if (IS_ERR(fdb)) {
89 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
90 PTR_ERR(fdb));
91 }
92
93 return fdb;
94}
95
96static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
97 struct mlx5_esw_flow_attr *attr,
98 struct mlx5_vport_key *key)
99{
100 key->vport = attr->in_rep->vport;
101 key->chain = attr->chain;
102 key->prio = attr->prio;
103 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
104 return jhash(key, sizeof(*key), 0);
105}
106
107/* caller must hold vports.lock */
108static struct mlx5_vport_table *
109esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
110{
111 struct mlx5_vport_table *e;
112
113 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
114 if (!memcmp(&e->key, skey, sizeof(*skey)))
115 return e;
116
117 return NULL;
118}
119
120static void
121esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
122{
123 struct mlx5_vport_table *e;
124 struct mlx5_vport_key key;
125 u32 hkey;
126
127 mutex_lock(&esw->fdb_table.offloads.vports.lock);
128 hkey = flow_attr_to_vport_key(esw, attr, &key);
129 e = esw_vport_tbl_lookup(esw, &key, hkey);
130 if (!e || --e->num_rules)
131 goto out;
132
133 hash_del(&e->hlist);
134 mlx5_destroy_flow_table(e->fdb);
135 kfree(e);
136out:
137 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
138}
139
140static struct mlx5_flow_table *
141esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
142{
143 struct mlx5_core_dev *dev = esw->dev;
144 struct mlx5_flow_namespace *ns;
145 struct mlx5_flow_table *fdb;
146 struct mlx5_vport_table *e;
147 struct mlx5_vport_key skey;
148 u32 hkey;
149
150 mutex_lock(&esw->fdb_table.offloads.vports.lock);
151 hkey = flow_attr_to_vport_key(esw, attr, &skey);
152 e = esw_vport_tbl_lookup(esw, &skey, hkey);
153 if (e) {
154 e->num_rules++;
155 goto out;
156 }
157
158 e = kzalloc(sizeof(*e), GFP_KERNEL);
159 if (!e) {
160 fdb = ERR_PTR(-ENOMEM);
161 goto err_alloc;
162 }
163
164 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
165 if (!ns) {
166 esw_warn(dev, "Failed to get FDB namespace\n");
167 fdb = ERR_PTR(-ENOENT);
168 goto err_ns;
169 }
170
171 fdb = esw_vport_tbl_create(esw, ns);
172 if (IS_ERR(fdb))
173 goto err_ns;
174
175 e->fdb = fdb;
176 e->num_rules = 1;
177 e->key = skey;
178 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
179out:
180 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
181 return e->fdb;
182
183err_ns:
184 kfree(e);
185err_alloc:
186 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
187 return fdb;
188}
189
190int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
191{
192 struct mlx5_esw_flow_attr attr = {};
193 struct mlx5_eswitch_rep rep = {};
194 struct mlx5_flow_table *fdb;
195 struct mlx5_vport *vport;
196 int i;
197
198 attr.prio = 1;
199 attr.in_rep = &rep;
200 mlx5_esw_for_all_vports(esw, i, vport) {
201 attr.in_rep->vport = vport->vport;
202 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 203 if (IS_ERR(fdb))
96e32687
EC
204 goto out;
205 }
206 return 0;
207
208out:
209 mlx5_esw_vport_tbl_put(esw);
210 return PTR_ERR(fdb);
211}
212
213void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
214{
215 struct mlx5_esw_flow_attr attr = {};
216 struct mlx5_eswitch_rep rep = {};
217 struct mlx5_vport *vport;
218 int i;
219
220 attr.prio = 1;
221 attr.in_rep = &rep;
222 mlx5_esw_for_all_vports(esw, i, vport) {
223 attr.in_rep->vport = vport->vport;
224 esw_vport_tbl_put(esw, &attr);
225 }
226}
227
228/* End: Per vport tables */
229
879c8f84
BW
230static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
231 u16 vport_num)
232{
02f3afd9 233 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
234
235 WARN_ON(idx > esw->total_vports - 1);
236 return &esw->offloads.vport_reps[idx];
237}
238
b7826076 239
c01cfd0f
JL
240static void
241mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
242 struct mlx5_flow_spec *spec,
243 struct mlx5_esw_flow_attr *attr)
244{
245 void *misc2;
246 void *misc;
247
248 /* Use metadata matching because vport is not represented by single
249 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
250 */
251 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
252 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
253 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
254 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
255 attr->in_rep->vport));
256
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
260
261 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
262 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
263 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
264 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
265 } else {
266 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
267 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
268
269 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
270 MLX5_SET(fte_match_set_misc, misc,
271 source_eswitch_owner_vhca_id,
272 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
273
274 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
275 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
276 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
277 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
278 source_eswitch_owner_vhca_id);
279
280 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
281 }
282
283 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
284 attr->in_rep->vport == MLX5_VPORT_UPLINK)
285 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
286}
287
74491de9 288struct mlx5_flow_handle *
3d80d1a2
OG
289mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
290 struct mlx5_flow_spec *spec,
776b12b6 291 struct mlx5_esw_flow_attr *attr)
3d80d1a2 292{
592d3651 293 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 294 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 295 bool split = !!(attr->split_count);
74491de9 296 struct mlx5_flow_handle *rule;
e52c2802 297 struct mlx5_flow_table *fdb;
592d3651 298 int j, i = 0;
3d80d1a2 299
f6455de0 300 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
301 return ERR_PTR(-EOPNOTSUPP);
302
6acfbf38
OG
303 flow_act.action = attr->action;
304 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 305 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
306 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
307 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
308 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
309 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
310 flow_act.vlan[0].vid = attr->vlan_vid[0];
311 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
312 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
313 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
314 flow_act.vlan[1].vid = attr->vlan_vid[1];
315 flow_act.vlan[1].prio = attr->vlan_prio[1];
316 }
6acfbf38 317 }
776b12b6 318
66958ed9 319 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
39ac237c 320 struct mlx5_flow_table *ft;
e52c2802 321
d18296ff
PB
322 if (attr->dest_ft) {
323 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
324 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
325 dest[i].ft = attr->dest_ft;
326 i++;
327 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
39ac237c
PB
328 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
329 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278d51f2 330 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
39ac237c
PB
331 i++;
332 } else if (attr->dest_chain) {
333 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
334 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
335 1, 0);
e52c2802
PB
336 if (IS_ERR(ft)) {
337 rule = ERR_CAST(ft);
338 goto err_create_goto_table;
339 }
340
341 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
342 dest[i].ft = ft;
592d3651 343 i++;
e52c2802 344 } else {
e85e02ba 345 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 346 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 347 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 348 dest[i].vport.vhca_id =
df65a573 349 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
350 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
351 dest[i].vport.flags |=
352 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
353 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
354 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 355 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 356 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
357 dest[i].vport.pkt_reformat =
358 attr->dests[j].pkt_reformat;
f493f155 359 }
e52c2802
PB
360 i++;
361 }
56e858df 362 }
e37a79e5 363 }
14e6b038
EC
364
365 if (attr->decap_pkt_reformat)
366 flow_act.pkt_reformat = attr->decap_pkt_reformat;
367
66958ed9 368 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 369 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 370 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 371 i++;
3d80d1a2
OG
372 }
373
93b3586e 374 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 375 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
376 if (attr->inner_match_level != MLX5_MATCH_NONE)
377 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 378
aa24670e 379 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 380 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 381
96e32687
EC
382 if (split) {
383 fdb = esw_vport_tbl_get(esw, attr);
384 } else {
d18296ff
PB
385 if (attr->chain || attr->prio)
386 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
387 attr->prio, 0);
388 else
389 fdb = attr->fdb;
6fb0701a
PB
390
391 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
392 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
96e32687 393 }
e52c2802
PB
394 if (IS_ERR(fdb)) {
395 rule = ERR_CAST(fdb);
396 goto err_esw_get;
397 }
398
84be2fda 399 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
10caabda
OS
400 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
401 &flow_act, dest, i);
84be2fda 402 else
10caabda 403 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 404 if (IS_ERR(rule))
e52c2802 405 goto err_add_rule;
375f51e2 406 else
525e84be 407 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 408
e52c2802
PB
409 return rule;
410
411err_add_rule:
96e32687
EC
412 if (split)
413 esw_vport_tbl_put(esw, attr);
d18296ff 414 else if (attr->chain || attr->prio)
96e32687 415 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 416err_esw_get:
39ac237c
PB
417 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
418 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802 419err_create_goto_table:
aa0cbbae 420 return rule;
3d80d1a2
OG
421}
422
e4ad91f2
CM
423struct mlx5_flow_handle *
424mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
425 struct mlx5_flow_spec *spec,
426 struct mlx5_esw_flow_attr *attr)
427{
428 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 429 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
430 struct mlx5_flow_table *fast_fdb;
431 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 432 struct mlx5_flow_handle *rule;
e4ad91f2
CM
433 int i;
434
39ac237c 435 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
e52c2802
PB
436 if (IS_ERR(fast_fdb)) {
437 rule = ERR_CAST(fast_fdb);
438 goto err_get_fast;
439 }
440
96e32687 441 fwd_fdb = esw_vport_tbl_get(esw, attr);
e52c2802
PB
442 if (IS_ERR(fwd_fdb)) {
443 rule = ERR_CAST(fwd_fdb);
444 goto err_get_fwd;
445 }
446
e4ad91f2 447 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 448 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 449 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 450 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 451 dest[i].vport.vhca_id =
df65a573 452 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
453 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
454 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
455 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
456 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 457 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 458 }
e4ad91f2
CM
459 }
460 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 461 dest[i].ft = fwd_fdb,
e4ad91f2
CM
462 i++;
463
c01cfd0f 464 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 465
93b3586e 466 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 467 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 468
278d51f2 469 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 470 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 471
e52c2802
PB
472 if (IS_ERR(rule))
473 goto add_err;
e4ad91f2 474
525e84be 475 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
476
477 return rule;
478add_err:
96e32687 479 esw_vport_tbl_put(esw, attr);
e52c2802 480err_get_fwd:
39ac237c 481 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 482err_get_fast:
e4ad91f2
CM
483 return rule;
484}
485
e52c2802
PB
486static void
487__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
488 struct mlx5_flow_handle *rule,
489 struct mlx5_esw_flow_attr *attr,
490 bool fwd_rule)
491{
e85e02ba 492 bool split = (attr->split_count > 0);
10caabda 493 int i;
e52c2802
PB
494
495 mlx5_del_flow_rules(rule);
10caabda 496
84be2fda 497 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
d8a2034f
EC
498 /* unref the term table */
499 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
500 if (attr->dests[i].termtbl)
501 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
502 }
10caabda
OS
503 }
504
525e84be 505 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
506
507 if (fwd_rule) {
96e32687 508 esw_vport_tbl_put(esw, attr);
39ac237c 509 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 510 } else {
96e32687
EC
511 if (split)
512 esw_vport_tbl_put(esw, attr);
d18296ff 513 else if (attr->chain || attr->prio)
96e32687
EC
514 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
515 0);
e52c2802 516 if (attr->dest_chain)
39ac237c 517 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802
PB
518 }
519}
520
d85cdccb
OG
521void
522mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
523 struct mlx5_flow_handle *rule,
524 struct mlx5_esw_flow_attr *attr)
525{
e52c2802 526 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
527}
528
48265006
OG
529void
530mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
531 struct mlx5_flow_handle *rule,
532 struct mlx5_esw_flow_attr *attr)
533{
e52c2802 534 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
535}
536
f5f82476
OG
537static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
538{
539 struct mlx5_eswitch_rep *rep;
411ec9e0 540 int i, err = 0;
f5f82476
OG
541
542 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 543 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 544 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
545 continue;
546
547 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
548 if (err)
549 goto out;
550 }
551
552out:
553 return err;
554}
555
556static struct mlx5_eswitch_rep *
557esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
558{
559 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
560
561 in_rep = attr->in_rep;
df65a573 562 out_rep = attr->dests[0].rep;
f5f82476
OG
563
564 if (push)
565 vport = in_rep;
566 else if (pop)
567 vport = out_rep;
568 else
569 vport = in_rep;
570
571 return vport;
572}
573
574static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
575 bool push, bool pop, bool fwd)
576{
577 struct mlx5_eswitch_rep *in_rep, *out_rep;
578
579 if ((push || pop) && !fwd)
580 goto out_notsupp;
581
582 in_rep = attr->in_rep;
df65a573 583 out_rep = attr->dests[0].rep;
f5f82476 584
b05af6aa 585 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
586 goto out_notsupp;
587
b05af6aa 588 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
589 goto out_notsupp;
590
591 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
592 if (!push && !pop && fwd)
b05af6aa 593 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
594 goto out_notsupp;
595
596 /* protects against (1) setting rules with different vlans to push and
597 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
598 */
1482bd3d 599 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
600 goto out_notsupp;
601
602 return 0;
603
604out_notsupp:
9eb78923 605 return -EOPNOTSUPP;
f5f82476
OG
606}
607
608int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
609 struct mlx5_esw_flow_attr *attr)
610{
611 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
612 struct mlx5_eswitch_rep *vport = NULL;
613 bool push, pop, fwd;
614 int err = 0;
615
6acfbf38 616 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 617 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
618 return 0;
619
f5f82476
OG
620 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
621 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
622 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
623 !attr->dest_chain);
f5f82476 624
0e18134f
VB
625 mutex_lock(&esw->state_lock);
626
f5f82476
OG
627 err = esw_add_vlan_action_check(attr, push, pop, fwd);
628 if (err)
0e18134f 629 goto unlock;
f5f82476 630
39ac237c 631 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
632
633 vport = esw_vlan_action_get_vport(attr, push, pop);
634
635 if (!push && !pop && fwd) {
636 /* tracks VF --> wire rules without vlan push action */
b05af6aa 637 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 638 vport->vlan_refcount++;
39ac237c 639 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
640 }
641
0e18134f 642 goto unlock;
f5f82476
OG
643 }
644
645 if (!push && !pop)
0e18134f 646 goto unlock;
f5f82476
OG
647
648 if (!(offloads->vlan_push_pop_refcount)) {
649 /* it's the 1st vlan rule, apply global vlan pop policy */
650 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
651 if (err)
652 goto out;
653 }
654 offloads->vlan_push_pop_refcount++;
655
656 if (push) {
657 if (vport->vlan_refcount)
658 goto skip_set_push;
659
1482bd3d 660 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
661 SET_VLAN_INSERT | SET_VLAN_STRIP);
662 if (err)
663 goto out;
1482bd3d 664 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
665skip_set_push:
666 vport->vlan_refcount++;
667 }
668out:
669 if (!err)
39ac237c 670 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
671unlock:
672 mutex_unlock(&esw->state_lock);
f5f82476
OG
673 return err;
674}
675
676int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
677 struct mlx5_esw_flow_attr *attr)
678{
679 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
680 struct mlx5_eswitch_rep *vport = NULL;
681 bool push, pop, fwd;
682 int err = 0;
683
6acfbf38 684 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 685 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
686 return 0;
687
39ac237c 688 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
689 return 0;
690
691 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
692 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
693 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
694
0e18134f
VB
695 mutex_lock(&esw->state_lock);
696
f5f82476
OG
697 vport = esw_vlan_action_get_vport(attr, push, pop);
698
699 if (!push && !pop && fwd) {
700 /* tracks VF --> wire rules without vlan push action */
b05af6aa 701 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
702 vport->vlan_refcount--;
703
0e18134f 704 goto out;
f5f82476
OG
705 }
706
707 if (push) {
708 vport->vlan_refcount--;
709 if (vport->vlan_refcount)
710 goto skip_unset_push;
711
712 vport->vlan = 0;
713 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
714 0, 0, SET_VLAN_STRIP);
715 if (err)
716 goto out;
717 }
718
719skip_unset_push:
720 offloads->vlan_push_pop_refcount--;
721 if (offloads->vlan_push_pop_refcount)
0e18134f 722 goto out;
f5f82476
OG
723
724 /* no more vlan rules, stop global vlan pop policy */
725 err = esw_set_global_vlan_pop(esw, 0);
726
727out:
0e18134f 728 mutex_unlock(&esw->state_lock);
f5f82476
OG
729 return err;
730}
731
f7a68945 732struct mlx5_flow_handle *
02f3afd9
PP
733mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
734 u32 sqn)
ab22be9b 735{
66958ed9 736 struct mlx5_flow_act flow_act = {0};
4c5009c5 737 struct mlx5_flow_destination dest = {};
74491de9 738 struct mlx5_flow_handle *flow_rule;
c5bb1730 739 struct mlx5_flow_spec *spec;
ab22be9b
OG
740 void *misc;
741
1b9a07ee 742 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 743 if (!spec) {
ab22be9b
OG
744 flow_rule = ERR_PTR(-ENOMEM);
745 goto out;
746 }
747
c5bb1730 748 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 749 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
750 /* source vport is the esw manager */
751 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 752
c5bb1730 753 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
754 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
755 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
756
c5bb1730 757 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 758 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 759 dest.vport.num = vport;
66958ed9 760 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 761
39ac237c
PB
762 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
763 spec, &flow_act, &dest, 1);
ab22be9b
OG
764 if (IS_ERR(flow_rule))
765 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
766out:
c5bb1730 767 kvfree(spec);
ab22be9b
OG
768 return flow_rule;
769}
57cbd893 770EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 771
159fe639
MB
772void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
773{
774 mlx5_del_flow_rules(rule);
775}
776
5b7cb745
PB
777static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
778{
779 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
780 MLX5_FDB_TO_VPORT_REG_C_1;
781}
782
332bd3a5 783static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
784{
785 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
786 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
787 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 788 u8 curr, wanted;
c1286050
JL
789 int err;
790
5b7cb745
PB
791 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
792 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 793 return 0;
c1286050 794
e08a6832
LR
795 MLX5_SET(query_esw_vport_context_in, in, opcode,
796 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
797 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
798 if (err)
799 return err;
800
5b7cb745
PB
801 curr = MLX5_GET(query_esw_vport_context_out, out,
802 esw_vport_context.fdb_to_vport_reg_c_id);
803 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
804 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
805 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 806
332bd3a5 807 if (enable)
5b7cb745 808 curr |= wanted;
332bd3a5 809 else
5b7cb745 810 curr &= ~wanted;
c1286050 811
e08a6832 812 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 813 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 814 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
815 field_select.fdb_to_vport_reg_c_id, 1);
816
e08a6832 817 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
818 if (!err) {
819 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
820 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
821 else
822 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
823 }
824
825 return err;
c1286050
JL
826}
827
a5641cb5
JL
828static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
829 struct mlx5_core_dev *peer_dev,
ac004b83
RD
830 struct mlx5_flow_spec *spec,
831 struct mlx5_flow_destination *dest)
832{
a5641cb5 833 void *misc;
ac004b83 834
a5641cb5
JL
835 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
836 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
837 misc_parameters_2);
0f0d3827
PB
838 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
839 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 840
a5641cb5
JL
841 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
842 } else {
843 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
844 misc_parameters);
ac004b83 845
a5641cb5
JL
846 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
847 MLX5_CAP_GEN(peer_dev, vhca_id));
848
849 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
850
851 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
852 misc_parameters);
853 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
854 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
855 source_eswitch_owner_vhca_id);
856 }
ac004b83
RD
857
858 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 859 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 860 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 861 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
862}
863
a5641cb5
JL
864static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
865 struct mlx5_eswitch *peer_esw,
866 struct mlx5_flow_spec *spec,
867 u16 vport)
868{
869 void *misc;
870
871 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
872 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
873 misc_parameters_2);
874 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
875 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
876 vport));
877 } else {
878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
879 misc_parameters);
880 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
881 }
882}
883
ac004b83
RD
884static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
885 struct mlx5_core_dev *peer_dev)
886{
887 struct mlx5_flow_destination dest = {};
888 struct mlx5_flow_act flow_act = {0};
889 struct mlx5_flow_handle **flows;
890 struct mlx5_flow_handle *flow;
891 struct mlx5_flow_spec *spec;
892 /* total vports is the same for both e-switches */
893 int nvports = esw->total_vports;
894 void *misc;
895 int err, i;
896
897 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
898 if (!spec)
899 return -ENOMEM;
900
a5641cb5 901 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
902
903 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
904 if (!flows) {
905 err = -ENOMEM;
906 goto alloc_flows_err;
907 }
908
909 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
910 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
911 misc_parameters);
912
81cd229c 913 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
914 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
915 spec, MLX5_VPORT_PF);
916
81cd229c
BW
917 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
918 spec, &flow_act, &dest, 1);
919 if (IS_ERR(flow)) {
920 err = PTR_ERR(flow);
921 goto add_pf_flow_err;
922 }
923 flows[MLX5_VPORT_PF] = flow;
924 }
925
926 if (mlx5_ecpf_vport_exists(esw->dev)) {
927 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
928 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
929 spec, &flow_act, &dest, 1);
930 if (IS_ERR(flow)) {
931 err = PTR_ERR(flow);
932 goto add_ecpf_flow_err;
933 }
934 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
935 }
936
786ef904 937 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
938 esw_set_peer_miss_rule_source_port(esw,
939 peer_dev->priv.eswitch,
940 spec, i);
941
ac004b83
RD
942 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
943 spec, &flow_act, &dest, 1);
944 if (IS_ERR(flow)) {
945 err = PTR_ERR(flow);
81cd229c 946 goto add_vf_flow_err;
ac004b83
RD
947 }
948 flows[i] = flow;
949 }
950
951 esw->fdb_table.offloads.peer_miss_rules = flows;
952
953 kvfree(spec);
954 return 0;
955
81cd229c 956add_vf_flow_err:
879c8f84 957 nvports = --i;
786ef904 958 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 959 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
960
961 if (mlx5_ecpf_vport_exists(esw->dev))
962 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
963add_ecpf_flow_err:
964 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
965 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
966add_pf_flow_err:
967 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
968 kvfree(flows);
969alloc_flows_err:
970 kvfree(spec);
971 return err;
972}
973
974static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
975{
976 struct mlx5_flow_handle **flows;
977 int i;
978
979 flows = esw->fdb_table.offloads.peer_miss_rules;
980
786ef904
PP
981 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
982 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
983 mlx5_del_flow_rules(flows[i]);
984
81cd229c
BW
985 if (mlx5_ecpf_vport_exists(esw->dev))
986 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
987
988 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
989 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
990
ac004b83
RD
991 kvfree(flows);
992}
993
3aa33572
OG
994static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
995{
66958ed9 996 struct mlx5_flow_act flow_act = {0};
4c5009c5 997 struct mlx5_flow_destination dest = {};
74491de9 998 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 999 struct mlx5_flow_spec *spec;
f80be543
MB
1000 void *headers_c;
1001 void *headers_v;
3aa33572 1002 int err = 0;
f80be543
MB
1003 u8 *dmac_c;
1004 u8 *dmac_v;
3aa33572 1005
1b9a07ee 1006 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1007 if (!spec) {
3aa33572
OG
1008 err = -ENOMEM;
1009 goto out;
1010 }
1011
f80be543
MB
1012 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1013 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1014 outer_headers);
1015 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1016 outer_headers.dmac_47_16);
1017 dmac_c[0] = 0x01;
1018
3aa33572 1019 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1020 dest.vport.num = esw->manager_vport;
66958ed9 1021 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1022
39ac237c
PB
1023 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1024 spec, &flow_act, &dest, 1);
3aa33572
OG
1025 if (IS_ERR(flow_rule)) {
1026 err = PTR_ERR(flow_rule);
f80be543 1027 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1028 goto out;
1029 }
1030
f80be543
MB
1031 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1032
1033 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1034 outer_headers);
1035 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1036 outer_headers.dmac_47_16);
1037 dmac_v[0] = 0x01;
39ac237c
PB
1038 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1039 spec, &flow_act, &dest, 1);
f80be543
MB
1040 if (IS_ERR(flow_rule)) {
1041 err = PTR_ERR(flow_rule);
1042 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1043 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1044 goto out;
1045 }
1046
1047 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1048
3aa33572 1049out:
c5bb1730 1050 kvfree(spec);
3aa33572
OG
1051 return err;
1052}
1053
11b717d6
PB
1054struct mlx5_flow_handle *
1055esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1056{
1057 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1058 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1059 struct mlx5_flow_context *flow_context;
1060 struct mlx5_flow_handle *flow_rule;
1061 struct mlx5_flow_destination dest;
1062 struct mlx5_flow_spec *spec;
1063 void *misc;
1064
60acc105
PB
1065 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1066 return ERR_PTR(-EOPNOTSUPP);
1067
11b717d6
PB
1068 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1069 if (!spec)
1070 return ERR_PTR(-ENOMEM);
1071
1072 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1073 misc_parameters_2);
1074 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1075 ESW_CHAIN_TAG_METADATA_MASK);
1076 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1077 misc_parameters_2);
1078 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1079 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1080 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1081 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1082 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1083
1084 flow_context = &spec->flow_context;
1085 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1086 flow_context->flow_tag = tag;
1087 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1088 dest.ft = esw->offloads.ft_offloads;
1089
1090 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1091 kfree(spec);
1092
1093 if (IS_ERR(flow_rule))
1094 esw_warn(esw->dev,
1095 "Failed to create restore rule for tag: %d, err(%d)\n",
1096 tag, (int)PTR_ERR(flow_rule));
1097
1098 return flow_rule;
1099}
1100
1101u32
1102esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1103{
1104 return ESW_CHAIN_TAG_METADATA_MASK;
1105}
1106
1967ce6e 1107#define MAX_PF_SQ 256
cd3d07e7 1108#define MAX_SQ_NVPORTS 32
1967ce6e 1109
a5641cb5
JL
1110static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1111 u32 *flow_group_in)
1112{
1113 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1114 flow_group_in,
1115 match_criteria);
1116
1117 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1118 MLX5_SET(create_flow_group_in, flow_group_in,
1119 match_criteria_enable,
1120 MLX5_MATCH_MISC_PARAMETERS_2);
1121
0f0d3827
PB
1122 MLX5_SET(fte_match_param, match_criteria,
1123 misc_parameters_2.metadata_reg_c_0,
1124 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1125 } else {
1126 MLX5_SET(create_flow_group_in, flow_group_in,
1127 match_criteria_enable,
1128 MLX5_MATCH_MISC_PARAMETERS);
1129
1130 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1131 misc_parameters.source_port);
1132 }
1133}
1134
0da3c12d 1135static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1967ce6e
OG
1136{
1137 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1138 struct mlx5_flow_table_attr ft_attr = {};
1139 struct mlx5_core_dev *dev = esw->dev;
1140 struct mlx5_flow_namespace *root_ns;
1141 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
1142 u32 flags = 0, *flow_group_in;
1143 int table_size, ix, err = 0;
1967ce6e
OG
1144 struct mlx5_flow_group *g;
1145 void *match_criteria;
f80be543 1146 u8 *dmac;
1967ce6e
OG
1147
1148 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1149
1b9a07ee 1150 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1151 if (!flow_group_in)
1152 return -ENOMEM;
1153
1154 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1155 if (!root_ns) {
1156 esw_warn(dev, "Failed to get FDB flow namespace\n");
1157 err = -EOPNOTSUPP;
1158 goto ns_err;
1159 }
8463daf1
MG
1160 esw->fdb_table.offloads.ns = root_ns;
1161 err = mlx5_flow_namespace_set_mode(root_ns,
1162 esw->dev->priv.steering->mode);
1163 if (err) {
1164 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1165 goto ns_err;
1166 }
1967ce6e 1167
0da3c12d 1168 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
cd7e4186 1169 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1170
e52c2802
PB
1171 /* create the slow path fdb with encap set, so further table instances
1172 * can be created at run time while VFs are probed if the FW allows that.
1173 */
1174 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1175 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1176 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1177
1178 ft_attr.flags = flags;
b3ba5149
ES
1179 ft_attr.max_fte = table_size;
1180 ft_attr.prio = FDB_SLOW_PATH;
1181
1182 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1183 if (IS_ERR(fdb)) {
1184 err = PTR_ERR(fdb);
1185 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1186 goto slow_fdb_err;
1187 }
52fff327 1188 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1189
39ac237c
PB
1190 err = mlx5_esw_chains_create(esw);
1191 if (err) {
1192 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1193 goto fdb_chains_err;
e52c2802
PB
1194 }
1195
69697b6e 1196 /* create send-to-vport group */
69697b6e
OG
1197 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1198 MLX5_MATCH_MISC_PARAMETERS);
1199
1200 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1201
1202 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1203 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1204
0da3c12d 1205 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1206 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1207 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1208
1209 g = mlx5_create_flow_group(fdb, flow_group_in);
1210 if (IS_ERR(g)) {
1211 err = PTR_ERR(g);
1212 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1213 goto send_vport_err;
1214 }
1215 esw->fdb_table.offloads.send_to_vport_grp = g;
1216
ac004b83
RD
1217 /* create peer esw miss group */
1218 memset(flow_group_in, 0, inlen);
ac004b83 1219
a5641cb5
JL
1220 esw_set_flow_group_source_port(esw, flow_group_in);
1221
1222 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1223 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1224 flow_group_in,
1225 match_criteria);
ac004b83 1226
a5641cb5
JL
1227 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1228 misc_parameters.source_eswitch_owner_vhca_id);
1229
1230 MLX5_SET(create_flow_group_in, flow_group_in,
1231 source_eswitch_owner_vhca_id_valid, 1);
1232 }
ac004b83 1233
ac004b83
RD
1234 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1235 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1236 ix + esw->total_vports - 1);
1237 ix += esw->total_vports;
1238
1239 g = mlx5_create_flow_group(fdb, flow_group_in);
1240 if (IS_ERR(g)) {
1241 err = PTR_ERR(g);
1242 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1243 goto peer_miss_err;
1244 }
1245 esw->fdb_table.offloads.peer_miss_grp = g;
1246
69697b6e
OG
1247 /* create miss group */
1248 memset(flow_group_in, 0, inlen);
f80be543
MB
1249 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1250 MLX5_MATCH_OUTER_HEADERS);
1251 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1252 match_criteria);
1253 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1254 outer_headers.dmac_47_16);
1255 dmac[0] = 0x01;
69697b6e
OG
1256
1257 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1258 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1259 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1260
1261 g = mlx5_create_flow_group(fdb, flow_group_in);
1262 if (IS_ERR(g)) {
1263 err = PTR_ERR(g);
1264 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1265 goto miss_err;
1266 }
1267 esw->fdb_table.offloads.miss_grp = g;
1268
3aa33572
OG
1269 err = esw_add_fdb_miss_rule(esw);
1270 if (err)
1271 goto miss_rule_err;
1272
c88a026e 1273 kvfree(flow_group_in);
69697b6e
OG
1274 return 0;
1275
3aa33572
OG
1276miss_rule_err:
1277 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1278miss_err:
ac004b83
RD
1279 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1280peer_miss_err:
69697b6e
OG
1281 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1282send_vport_err:
39ac237c
PB
1283 mlx5_esw_chains_destroy(esw);
1284fdb_chains_err:
52fff327 1285 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1286slow_fdb_err:
8463daf1
MG
1287 /* Holds true only as long as DMFS is the default */
1288 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1289ns_err:
1290 kvfree(flow_group_in);
1291 return err;
1292}
1293
1967ce6e 1294static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1295{
e52c2802 1296 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1297 return;
1298
1967ce6e 1299 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1300 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1301 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1302 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1303 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1304 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1305
39ac237c 1306 mlx5_esw_chains_destroy(esw);
52fff327 1307 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1308 /* Holds true only as long as DMFS is the default */
1309 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1310 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1311}
c116c6ee 1312
cd7e4186 1313static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1314{
b3ba5149 1315 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1316 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1317 struct mlx5_flow_table *ft_offloads;
1318 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1319 int err = 0;
1320
1321 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1322 if (!ns) {
1323 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1324 return -EOPNOTSUPP;
c116c6ee
OG
1325 }
1326
cd7e4186 1327 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
11b717d6 1328 ft_attr.prio = 1;
b3ba5149
ES
1329
1330 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1331 if (IS_ERR(ft_offloads)) {
1332 err = PTR_ERR(ft_offloads);
1333 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1334 return err;
1335 }
1336
1337 esw->offloads.ft_offloads = ft_offloads;
1338 return 0;
1339}
1340
1341static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1342{
1343 struct mlx5_esw_offload *offloads = &esw->offloads;
1344
1345 mlx5_destroy_flow_table(offloads->ft_offloads);
1346}
fed9ce22 1347
cd7e4186 1348static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1349{
1350 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1351 struct mlx5_flow_group *g;
fed9ce22 1352 u32 *flow_group_in;
fed9ce22 1353 int err = 0;
fed9ce22 1354
cd7e4186 1355 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1356 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1357 if (!flow_group_in)
1358 return -ENOMEM;
1359
1360 /* create vport rx group */
a5641cb5 1361 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1362
1363 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1364 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1365
1366 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1367
1368 if (IS_ERR(g)) {
1369 err = PTR_ERR(g);
1370 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1371 goto out;
1372 }
1373
1374 esw->offloads.vport_rx_group = g;
1375out:
e574978a 1376 kvfree(flow_group_in);
fed9ce22
OG
1377 return err;
1378}
1379
1380static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1381{
1382 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1383}
1384
74491de9 1385struct mlx5_flow_handle *
02f3afd9 1386mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1387 struct mlx5_flow_destination *dest)
fed9ce22 1388{
66958ed9 1389 struct mlx5_flow_act flow_act = {0};
74491de9 1390 struct mlx5_flow_handle *flow_rule;
c5bb1730 1391 struct mlx5_flow_spec *spec;
fed9ce22
OG
1392 void *misc;
1393
1b9a07ee 1394 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1395 if (!spec) {
fed9ce22
OG
1396 flow_rule = ERR_PTR(-ENOMEM);
1397 goto out;
1398 }
1399
a5641cb5
JL
1400 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1401 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1402 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1403 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1404
a5641cb5 1405 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1406 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1407 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1408
a5641cb5
JL
1409 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1410 } else {
1411 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1412 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1413
1414 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1415 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1416
1417 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1418 }
fed9ce22 1419
66958ed9 1420 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1421 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1422 &flow_act, dest, 1);
fed9ce22
OG
1423 if (IS_ERR(flow_rule)) {
1424 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1425 goto out;
1426 }
1427
1428out:
c5bb1730 1429 kvfree(spec);
fed9ce22
OG
1430 return flow_rule;
1431}
feae9087 1432
bf3347c4 1433
cc617ced
PP
1434static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1435{
1436 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1437 struct mlx5_core_dev *dev = esw->dev;
1438 int vport;
1439
1440 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1441 return -EOPNOTSUPP;
1442
1443 if (esw->mode == MLX5_ESWITCH_NONE)
1444 return -EOPNOTSUPP;
1445
1446 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1447 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1448 mlx5_mode = MLX5_INLINE_MODE_NONE;
1449 goto out;
1450 case MLX5_CAP_INLINE_MODE_L2:
1451 mlx5_mode = MLX5_INLINE_MODE_L2;
1452 goto out;
1453 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1454 goto query_vports;
1455 }
1456
1457query_vports:
1458 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1459 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1460 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1461 if (prev_mlx5_mode != mlx5_mode)
1462 return -EINVAL;
1463 prev_mlx5_mode = mlx5_mode;
1464 }
1465
1466out:
1467 *mode = mlx5_mode;
1468 return 0;
e08a6832 1469}
bf3347c4 1470
11b717d6
PB
1471static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1472{
1473 struct mlx5_esw_offload *offloads = &esw->offloads;
1474
60acc105
PB
1475 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1476 return;
1477
6724e66b 1478 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1479 mlx5_destroy_flow_group(offloads->restore_group);
1480 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1481}
1482
1483static int esw_create_restore_table(struct mlx5_eswitch *esw)
1484{
d65dbedf 1485 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
1486 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1487 struct mlx5_flow_table_attr ft_attr = {};
1488 struct mlx5_core_dev *dev = esw->dev;
1489 struct mlx5_flow_namespace *ns;
6724e66b 1490 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1491 void *match_criteria, *misc;
1492 struct mlx5_flow_table *ft;
1493 struct mlx5_flow_group *g;
1494 u32 *flow_group_in;
1495 int err = 0;
1496
60acc105
PB
1497 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1498 return 0;
1499
11b717d6
PB
1500 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1501 if (!ns) {
1502 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1503 return -EOPNOTSUPP;
1504 }
1505
1506 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1507 if (!flow_group_in) {
1508 err = -ENOMEM;
1509 goto out_free;
1510 }
1511
1512 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1513 ft = mlx5_create_flow_table(ns, &ft_attr);
1514 if (IS_ERR(ft)) {
1515 err = PTR_ERR(ft);
1516 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1517 err);
1518 goto out_free;
1519 }
1520
1521 memset(flow_group_in, 0, inlen);
1522 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1523 match_criteria);
1524 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1525 misc_parameters_2);
1526
1527 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1528 ESW_CHAIN_TAG_METADATA_MASK);
1529 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1530 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1531 ft_attr.max_fte - 1);
1532 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1533 MLX5_MATCH_MISC_PARAMETERS_2);
1534 g = mlx5_create_flow_group(ft, flow_group_in);
1535 if (IS_ERR(g)) {
1536 err = PTR_ERR(g);
1537 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1538 err);
1539 goto err_group;
1540 }
1541
6724e66b
PB
1542 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1543 MLX5_SET(copy_action_in, modact, src_field,
1544 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1545 MLX5_SET(copy_action_in, modact, dst_field,
1546 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1547 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1548 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1549 modact);
1550 if (IS_ERR(mod_hdr)) {
e9864539 1551 err = PTR_ERR(mod_hdr);
6724e66b
PB
1552 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1553 err);
6724e66b
PB
1554 goto err_mod_hdr;
1555 }
1556
11b717d6
PB
1557 esw->offloads.ft_offloads_restore = ft;
1558 esw->offloads.restore_group = g;
6724e66b 1559 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 1560
c8508713
RD
1561 kvfree(flow_group_in);
1562
11b717d6
PB
1563 return 0;
1564
6724e66b
PB
1565err_mod_hdr:
1566 mlx5_destroy_flow_group(g);
11b717d6
PB
1567err_group:
1568 mlx5_destroy_flow_table(ft);
1569out_free:
1570 kvfree(flow_group_in);
1571
1572 return err;
cc617ced
PP
1573}
1574
db7ff19e
EB
1575static int esw_offloads_start(struct mlx5_eswitch *esw,
1576 struct netlink_ext_ack *extack)
c930a3ad 1577{
062f4bf4 1578 int err, err1;
c930a3ad 1579
8e0aa4bc
PP
1580 mlx5_eswitch_disable_locked(esw, false);
1581 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1582 esw->dev->priv.sriov.num_vfs);
6c419ba8 1583 if (err) {
8c98ee77
EB
1584 NL_SET_ERR_MSG_MOD(extack,
1585 "Failed setting eswitch to offloads");
8e0aa4bc
PP
1586 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1587 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
1588 if (err1) {
1589 NL_SET_ERR_MSG_MOD(extack,
1590 "Failed setting eswitch back to legacy");
1591 }
6c419ba8 1592 }
bffaa916
RD
1593 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1594 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1595 &esw->offloads.inline_mode)) {
1596 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1597 NL_SET_ERR_MSG_MOD(extack,
1598 "Inline mode is different between vports");
bffaa916
RD
1599 }
1600 }
c930a3ad
OG
1601 return err;
1602}
1603
e8d31c4d
MB
1604void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1605{
1606 kfree(esw->offloads.vport_reps);
1607}
1608
1609int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1610{
2752b823 1611 int total_vports = esw->total_vports;
e8d31c4d 1612 struct mlx5_eswitch_rep *rep;
d6518db2 1613 int vport_index;
ef2e4094 1614 u8 rep_type;
e8d31c4d 1615
2aca1787 1616 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1617 sizeof(struct mlx5_eswitch_rep),
1618 GFP_KERNEL);
1619 if (!esw->offloads.vport_reps)
1620 return -ENOMEM;
1621
d6518db2
BW
1622 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1623 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1624 rep->vport_index = vport_index;
f121e0ea
BW
1625
1626 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1627 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1628 REP_UNREGISTERED);
e8d31c4d
MB
1629 }
1630
e8d31c4d
MB
1631 return 0;
1632}
1633
c9b99abc
BW
1634static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1635 struct mlx5_eswitch_rep *rep, u8 rep_type)
1636{
8693115a 1637 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1638 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1639 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1640}
1641
4110fc59 1642static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1643{
1644 struct mlx5_eswitch_rep *rep;
4110fc59
BW
1645 int i;
1646
1647 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1648 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 1649
81cd229c
BW
1650 if (mlx5_ecpf_vport_exists(esw->dev)) {
1651 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1652 __esw_offloads_unload_rep(esw, rep, rep_type);
1653 }
1654
1655 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1656 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1657 __esw_offloads_unload_rep(esw, rep, rep_type);
1658 }
1659
879c8f84 1660 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1661 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1662}
1663
c2d7712c 1664int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 1665{
c2d7712c
BW
1666 struct mlx5_eswitch_rep *rep;
1667 int rep_type;
a4b97ab4
MB
1668 int err;
1669
c2d7712c
BW
1670 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1671 return 0;
a4b97ab4 1672
c2d7712c
BW
1673 rep = mlx5_eswitch_get_rep(esw, vport_num);
1674 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1675 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1676 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1677 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1678 if (err)
1679 goto err_reps;
1680 }
1681
1682 return 0;
a4b97ab4
MB
1683
1684err_reps:
c2d7712c
BW
1685 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1686 for (--rep_type; rep_type >= 0; rep_type--)
1687 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1688 return err;
1689}
1690
c2d7712c
BW
1691void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1692{
1693 struct mlx5_eswitch_rep *rep;
1694 int rep_type;
1695
1696 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1697 return;
1698
1699 rep = mlx5_eswitch_get_rep(esw, vport_num);
1700 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1701 __esw_offloads_unload_rep(esw, rep, rep_type);
1702}
1703
ac004b83
RD
1704#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1705#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1706
1707static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1708 struct mlx5_eswitch *peer_esw)
1709{
1710 int err;
1711
1712 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1713 if (err)
1714 return err;
1715
1716 return 0;
1717}
1718
1719static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1720{
d956873f 1721#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 1722 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 1723#endif
ac004b83
RD
1724 esw_del_fdb_peer_miss_rules(esw);
1725}
1726
8463daf1
MG
1727static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1728 struct mlx5_eswitch *peer_esw,
1729 bool pair)
1730{
1731 struct mlx5_flow_root_namespace *peer_ns;
1732 struct mlx5_flow_root_namespace *ns;
1733 int err;
1734
1735 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1736 ns = esw->dev->priv.steering->fdb_root_ns;
1737
1738 if (pair) {
1739 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1740 if (err)
1741 return err;
1742
e53e6655 1743 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1744 if (err) {
1745 mlx5_flow_namespace_set_peer(ns, NULL);
1746 return err;
1747 }
1748 } else {
1749 mlx5_flow_namespace_set_peer(ns, NULL);
1750 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1751 }
1752
1753 return 0;
1754}
1755
ac004b83
RD
1756static int mlx5_esw_offloads_devcom_event(int event,
1757 void *my_data,
1758 void *event_data)
1759{
1760 struct mlx5_eswitch *esw = my_data;
ac004b83 1761 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1762 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1763 int err;
1764
1765 switch (event) {
1766 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1767 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1768 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1769 break;
1770
8463daf1 1771 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1772 if (err)
1773 goto err_out;
8463daf1
MG
1774 err = mlx5_esw_offloads_pair(esw, peer_esw);
1775 if (err)
1776 goto err_peer;
ac004b83
RD
1777
1778 err = mlx5_esw_offloads_pair(peer_esw, esw);
1779 if (err)
1780 goto err_pair;
1781
1782 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1783 break;
1784
1785 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1786 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1787 break;
1788
1789 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1790 mlx5_esw_offloads_unpair(peer_esw);
1791 mlx5_esw_offloads_unpair(esw);
8463daf1 1792 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1793 break;
1794 }
1795
1796 return 0;
1797
1798err_pair:
1799 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1800err_peer:
1801 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1802err_out:
1803 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1804 event, err);
1805 return err;
1806}
1807
1808static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1809{
1810 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1811
04de7dda
RD
1812 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1813 mutex_init(&esw->offloads.peer_mutex);
1814
ac004b83
RD
1815 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1816 return;
1817
1818 mlx5_devcom_register_component(devcom,
1819 MLX5_DEVCOM_ESW_OFFLOADS,
1820 mlx5_esw_offloads_devcom_event,
1821 esw);
1822
1823 mlx5_devcom_send_event(devcom,
1824 MLX5_DEVCOM_ESW_OFFLOADS,
1825 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1826}
1827
1828static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1829{
1830 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1831
1832 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1833 return;
1834
1835 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1836 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1837
1838 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1839}
1840
92ab1eb3
JL
1841static bool
1842esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1843{
1844 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1845 return false;
1846
1847 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1848 MLX5_FDB_TO_VPORT_REG_C_0))
1849 return false;
1850
1851 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1852 return false;
1853
1854 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1855 mlx5_ecpf_vport_exists(esw->dev))
1856 return false;
1857
1858 return true;
1859}
1860
1e62e222
MD
1861static bool
1862esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
1863{
1864 return mlx5_core_mp_enabled(esw->dev);
1865}
1866
1867static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
1868{
1869 return esw_check_vport_match_metadata_mandatory(esw) &&
1870 esw_check_vport_match_metadata_supported(esw);
1871}
1872
133dcfc5
VP
1873u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
1874{
1875 u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
1876 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
1877 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
1878 u32 start;
1879 u32 end;
1880 int id;
1881
1882 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
1883 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
1884
1885 /* Trim vhca_id to ESW_VHCA_ID_BITS */
1886 vhca_id &= vhca_id_mask;
1887
1888 start = (vhca_id << ESW_VPORT_BITS);
1889 end = start + num_vports;
1890 if (!vhca_id)
1891 start += 1; /* zero is reserved/invalid metadata */
1892 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
1893
1894 return (id < 0) ? 0 : id;
1895}
1896
1897void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
1898{
1899 ida_free(&esw->offloads.vport_metadata_ida, metadata);
1900}
1901
1902static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
1903 struct mlx5_vport *vport)
1904{
1905 if (vport->vport == MLX5_VPORT_UPLINK)
1906 return 0;
1907
1908 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
1909 vport->metadata = vport->default_metadata;
1910 return vport->metadata ? 0 : -ENOSPC;
1911}
1912
1913static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
1914 struct mlx5_vport *vport)
1915{
1916 if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
1917 return;
1918
1919 WARN_ON(vport->metadata != vport->default_metadata);
1920 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
1921}
1922
748da30b 1923int
89a0f1fb
PP
1924esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
1925 struct mlx5_vport *vport)
7445cfb1 1926{
7445cfb1
JL
1927 int err;
1928
133dcfc5
VP
1929 err = esw_offloads_vport_metadata_setup(esw, vport);
1930 if (err)
1931 goto metadata_err;
1932
07bab950 1933 err = esw_acl_ingress_ofld_setup(esw, vport);
89a0f1fb 1934 if (err)
133dcfc5 1935 goto ingress_err;
7445cfb1 1936
89a0f1fb 1937 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
ea651a86 1938 err = esw_acl_egress_ofld_setup(esw, vport);
07bab950
VP
1939 if (err)
1940 goto egress_err;
18486737 1941 }
07bab950
VP
1942
1943 return 0;
1944
1945egress_err:
1946 esw_acl_ingress_ofld_cleanup(esw, vport);
133dcfc5
VP
1947ingress_err:
1948 esw_offloads_vport_metadata_cleanup(esw, vport);
1949metadata_err:
89a0f1fb
PP
1950 return err;
1951}
18486737 1952
748da30b 1953void
89a0f1fb
PP
1954esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
1955 struct mlx5_vport *vport)
1956{
ea651a86 1957 esw_acl_egress_ofld_cleanup(vport);
07bab950 1958 esw_acl_ingress_ofld_cleanup(esw, vport);
133dcfc5 1959 esw_offloads_vport_metadata_cleanup(esw, vport);
89a0f1fb 1960}
7445cfb1 1961
748da30b 1962static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
1963{
1964 struct mlx5_vport *vport;
7445cfb1 1965 int err;
18486737 1966
1e62e222 1967 if (esw_use_vport_metadata(esw))
92ab1eb3 1968 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 1969
748da30b
VP
1970 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1971 err = esw_vport_create_offloads_acl_tables(esw, vport);
1972 if (err)
1973 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1974 return err;
1975}
1976
748da30b 1977static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 1978{
786ef904 1979 struct mlx5_vport *vport;
7445cfb1 1980
748da30b
VP
1981 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1982 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 1983 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1984}
1985
062f4bf4 1986static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 1987{
8b95bda4 1988 int total_vports = esw->total_vports;
6ed1803a
MB
1989 int err;
1990
5c1d260e 1991 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
1992 mutex_init(&esw->fdb_table.offloads.vports.lock);
1993 hash_init(esw->fdb_table.offloads.vports.table);
e52c2802 1994
748da30b 1995 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 1996 if (err)
f8d1edda 1997 goto create_acl_err;
18486737 1998
11b717d6 1999 err = esw_create_offloads_table(esw, total_vports);
c930a3ad 2000 if (err)
11b717d6 2001 goto create_offloads_err;
c930a3ad 2002
11b717d6 2003 err = esw_create_restore_table(esw);
c930a3ad 2004 if (err)
11b717d6
PB
2005 goto create_restore_err;
2006
0da3c12d 2007 err = esw_create_offloads_fdb_tables(esw);
11b717d6
PB
2008 if (err)
2009 goto create_fdb_err;
c930a3ad 2010
062f4bf4 2011 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2012 if (err)
2013 goto create_fg_err;
2014
2015 return 0;
2016
2017create_fg_err:
1967ce6e 2018 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2019create_fdb_err:
11b717d6
PB
2020 esw_destroy_restore_table(esw);
2021create_restore_err:
2022 esw_destroy_offloads_table(esw);
2023create_offloads_err:
748da30b 2024 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda
PP
2025create_acl_err:
2026 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
2027 return err;
2028}
2029
eca8cc38
BW
2030static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2031{
2032 esw_destroy_vport_rx_group(esw);
eca8cc38 2033 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2034 esw_destroy_restore_table(esw);
2035 esw_destroy_offloads_table(esw);
748da30b 2036 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 2037 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
2038}
2039
7e736f9a
PP
2040static void
2041esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2042{
5ccf2770 2043 bool host_pf_disabled;
7e736f9a 2044 u16 new_num_vfs;
a3888f33 2045
7e736f9a
PP
2046 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2047 host_params_context.host_num_of_vfs);
5ccf2770
BW
2048 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2049 host_params_context.host_pf_disabled);
a3888f33 2050
7e736f9a
PP
2051 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2052 return;
a3888f33
BW
2053
2054 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2055 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2056 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2057 } else {
7e736f9a 2058 int err;
a3888f33 2059
23bb50cf
BW
2060 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2061 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2062 if (err)
7e736f9a 2063 return;
a3888f33 2064 }
7e736f9a 2065 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2066}
2067
7e736f9a 2068static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2069{
7e736f9a
PP
2070 struct mlx5_host_work *host_work;
2071 struct mlx5_eswitch *esw;
dd28087c 2072 const u32 *out;
ac35dcd6 2073
7e736f9a
PP
2074 host_work = container_of(work, struct mlx5_host_work, work);
2075 esw = host_work->esw;
a3888f33 2076
dd28087c
PP
2077 out = mlx5_esw_query_functions(esw->dev);
2078 if (IS_ERR(out))
7e736f9a 2079 goto out;
a3888f33 2080
7e736f9a 2081 esw_vfs_changed_event_handler(esw, out);
dd28087c 2082 kvfree(out);
a3888f33 2083out:
ac35dcd6
VP
2084 kfree(host_work);
2085}
2086
16fff98a 2087int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2088{
cd56f929 2089 struct mlx5_esw_functions *esw_funcs;
a3888f33 2090 struct mlx5_host_work *host_work;
a3888f33
BW
2091 struct mlx5_eswitch *esw;
2092
2093 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2094 if (!host_work)
2095 return NOTIFY_DONE;
2096
cd56f929
VP
2097 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2098 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2099
2100 host_work->esw = esw;
2101
062f4bf4 2102 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2103 queue_work(esw->work_queue, &host_work->work);
2104
2105 return NOTIFY_OK;
2106}
2107
5896b972 2108int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2109{
3b83b6c2
DL
2110 struct mlx5_vport *vport;
2111 int err, i;
eca8cc38 2112
9a64144d
MG
2113 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2114 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2115 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2116 else
2117 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2118
2bb72e7e 2119 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2120 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2121
332bd3a5
PP
2122 err = esw_set_passing_vport_metadata(esw, true);
2123 if (err)
2124 goto err_vport_metadata;
c1286050 2125
7983a675
PB
2126 err = esw_offloads_steering_init(esw);
2127 if (err)
2128 goto err_steering_init;
2129
3b83b6c2
DL
2130 /* Representor will control the vport link state */
2131 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2132 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2133
c2d7712c
BW
2134 /* Uplink vport rep must load first. */
2135 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2136 if (err)
c2d7712c 2137 goto err_uplink;
c1286050 2138
c2d7712c 2139 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2140 if (err)
c2d7712c 2141 goto err_vports;
eca8cc38
BW
2142
2143 esw_offloads_devcom_init(esw);
a3888f33 2144
eca8cc38
BW
2145 return 0;
2146
925a6acc 2147err_vports:
c2d7712c
BW
2148 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2149err_uplink:
7983a675 2150 esw_offloads_steering_cleanup(esw);
79949985
PP
2151err_steering_init:
2152 esw_set_passing_vport_metadata(esw, false);
7983a675 2153err_vport_metadata:
8463daf1 2154 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2155 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2156 return err;
2157}
2158
db7ff19e
EB
2159static int esw_offloads_stop(struct mlx5_eswitch *esw,
2160 struct netlink_ext_ack *extack)
c930a3ad 2161{
062f4bf4 2162 int err, err1;
c930a3ad 2163
8e0aa4bc
PP
2164 mlx5_eswitch_disable_locked(esw, false);
2165 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2166 MLX5_ESWITCH_IGNORE_NUM_VFS);
6c419ba8 2167 if (err) {
8c98ee77 2168 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
8e0aa4bc
PP
2169 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2170 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2171 if (err1) {
2172 NL_SET_ERR_MSG_MOD(extack,
2173 "Failed setting eswitch back to offloads");
2174 }
6c419ba8 2175 }
c930a3ad
OG
2176
2177 return err;
2178}
2179
5896b972 2180void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2181{
ac004b83 2182 esw_offloads_devcom_cleanup(esw);
5896b972 2183 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2184 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2185 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2186 esw_offloads_steering_cleanup(esw);
8463daf1 2187 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2188 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2189 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2190}
2191
ef78618b 2192static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2193{
2194 switch (mode) {
2195 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2196 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2197 break;
2198 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2199 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2200 break;
2201 default:
2202 return -EINVAL;
2203 }
2204
2205 return 0;
2206}
2207
ef78618b
OG
2208static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2209{
2210 switch (mlx5_mode) {
f6455de0 2211 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2212 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2213 break;
f6455de0 2214 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2215 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2216 break;
2217 default:
2218 return -EINVAL;
2219 }
2220
2221 return 0;
2222}
2223
bffaa916
RD
2224static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2225{
2226 switch (mode) {
2227 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2228 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2229 break;
2230 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2231 *mlx5_mode = MLX5_INLINE_MODE_L2;
2232 break;
2233 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2234 *mlx5_mode = MLX5_INLINE_MODE_IP;
2235 break;
2236 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2237 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2238 break;
2239 default:
2240 return -EINVAL;
2241 }
2242
2243 return 0;
2244}
2245
2246static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2247{
2248 switch (mlx5_mode) {
2249 case MLX5_INLINE_MODE_NONE:
2250 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2251 break;
2252 case MLX5_INLINE_MODE_L2:
2253 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2254 break;
2255 case MLX5_INLINE_MODE_IP:
2256 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2257 break;
2258 case MLX5_INLINE_MODE_TCP_UDP:
2259 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2260 break;
2261 default:
2262 return -EINVAL;
2263 }
2264
2265 return 0;
2266}
2267
ae24432c
PP
2268static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2269{
2270 /* devlink commands in NONE eswitch mode are currently supported only
2271 * on ECPF.
2272 */
2273 return (esw->mode == MLX5_ESWITCH_NONE &&
2274 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2275}
2276
db7ff19e
EB
2277int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2278 struct netlink_ext_ack *extack)
9d1cef19 2279{
9d1cef19 2280 u16 cur_mlx5_mode, mlx5_mode = 0;
bd939753 2281 struct mlx5_eswitch *esw;
ea2128fd 2282 int err = 0;
9d1cef19 2283
bd939753
PP
2284 esw = mlx5_devlink_eswitch_get(devlink);
2285 if (IS_ERR(esw))
2286 return PTR_ERR(esw);
9d1cef19 2287
ef78618b 2288 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2289 return -EINVAL;
2290
8e0aa4bc 2291 mutex_lock(&esw->mode_lock);
8e0aa4bc 2292 cur_mlx5_mode = esw->mode;
c930a3ad 2293 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 2294 goto unlock;
c930a3ad
OG
2295
2296 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
8e0aa4bc 2297 err = esw_offloads_start(esw, extack);
c930a3ad 2298 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
8e0aa4bc 2299 err = esw_offloads_stop(esw, extack);
c930a3ad 2300 else
8e0aa4bc
PP
2301 err = -EINVAL;
2302
2303unlock:
2304 mutex_unlock(&esw->mode_lock);
2305 return err;
feae9087
OG
2306}
2307
2308int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2309{
bd939753 2310 struct mlx5_eswitch *esw;
9d1cef19 2311 int err;
c930a3ad 2312
bd939753
PP
2313 esw = mlx5_devlink_eswitch_get(devlink);
2314 if (IS_ERR(esw))
2315 return PTR_ERR(esw);
c930a3ad 2316
8e0aa4bc 2317 mutex_lock(&esw->mode_lock);
bd939753 2318 err = eswitch_devlink_esw_mode_check(esw);
ae24432c 2319 if (err)
8e0aa4bc 2320 goto unlock;
ae24432c 2321
8e0aa4bc
PP
2322 err = esw_mode_to_devlink(esw->mode, mode);
2323unlock:
2324 mutex_unlock(&esw->mode_lock);
2325 return err;
feae9087 2326}
127ea380 2327
db7ff19e
EB
2328int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2329 struct netlink_ext_ack *extack)
bffaa916
RD
2330{
2331 struct mlx5_core_dev *dev = devlink_priv(devlink);
db68cc56 2332 int err, vport, num_vport;
bd939753 2333 struct mlx5_eswitch *esw;
bffaa916
RD
2334 u8 mlx5_mode;
2335
bd939753
PP
2336 esw = mlx5_devlink_eswitch_get(devlink);
2337 if (IS_ERR(esw))
2338 return PTR_ERR(esw);
bffaa916 2339
8e0aa4bc 2340 mutex_lock(&esw->mode_lock);
ae24432c
PP
2341 err = eswitch_devlink_esw_mode_check(esw);
2342 if (err)
8e0aa4bc 2343 goto out;
ae24432c 2344
c415f704
OG
2345 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2346 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2347 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
8e0aa4bc 2348 goto out;
c415f704
OG
2349 /* fall through */
2350 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2351 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
2352 err = -EOPNOTSUPP;
2353 goto out;
c415f704
OG
2354 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2355 break;
2356 }
bffaa916 2357
525e84be 2358 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2359 NL_SET_ERR_MSG_MOD(extack,
2360 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
2361 err = -EOPNOTSUPP;
2362 goto out;
375f51e2
RD
2363 }
2364
bffaa916
RD
2365 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2366 if (err)
2367 goto out;
2368
411ec9e0 2369 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2370 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2371 if (err) {
8c98ee77
EB
2372 NL_SET_ERR_MSG_MOD(extack,
2373 "Failed to set min inline on vport");
bffaa916
RD
2374 goto revert_inline_mode;
2375 }
2376 }
2377
2378 esw->offloads.inline_mode = mlx5_mode;
8e0aa4bc 2379 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2380 return 0;
2381
2382revert_inline_mode:
db68cc56 2383 num_vport = --vport;
411ec9e0 2384 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2385 mlx5_modify_nic_vport_min_inline(dev,
2386 vport,
2387 esw->offloads.inline_mode);
2388out:
8e0aa4bc 2389 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2390 return err;
2391}
2392
2393int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2394{
bd939753 2395 struct mlx5_eswitch *esw;
9d1cef19 2396 int err;
bffaa916 2397
bd939753
PP
2398 esw = mlx5_devlink_eswitch_get(devlink);
2399 if (IS_ERR(esw))
2400 return PTR_ERR(esw);
bffaa916 2401
8e0aa4bc 2402 mutex_lock(&esw->mode_lock);
ae24432c
PP
2403 err = eswitch_devlink_esw_mode_check(esw);
2404 if (err)
8e0aa4bc 2405 goto unlock;
ae24432c 2406
8e0aa4bc
PP
2407 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2408unlock:
2409 mutex_unlock(&esw->mode_lock);
2410 return err;
bffaa916
RD
2411}
2412
98fdbea5
LR
2413int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2414 enum devlink_eswitch_encap_mode encap,
db7ff19e 2415 struct netlink_ext_ack *extack)
7768d197
RD
2416{
2417 struct mlx5_core_dev *dev = devlink_priv(devlink);
bd939753 2418 struct mlx5_eswitch *esw;
7768d197
RD
2419 int err;
2420
bd939753
PP
2421 esw = mlx5_devlink_eswitch_get(devlink);
2422 if (IS_ERR(esw))
2423 return PTR_ERR(esw);
7768d197 2424
8e0aa4bc 2425 mutex_lock(&esw->mode_lock);
ae24432c
PP
2426 err = eswitch_devlink_esw_mode_check(esw);
2427 if (err)
8e0aa4bc 2428 goto unlock;
ae24432c 2429
7768d197 2430 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2431 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
2432 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2433 err = -EOPNOTSUPP;
2434 goto unlock;
2435 }
7768d197 2436
8e0aa4bc
PP
2437 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2438 err = -EOPNOTSUPP;
2439 goto unlock;
2440 }
7768d197 2441
f6455de0 2442 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 2443 esw->offloads.encap = encap;
8e0aa4bc 2444 goto unlock;
7768d197
RD
2445 }
2446
2447 if (esw->offloads.encap == encap)
8e0aa4bc 2448 goto unlock;
7768d197 2449
525e84be 2450 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2451 NL_SET_ERR_MSG_MOD(extack,
2452 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
2453 err = -EOPNOTSUPP;
2454 goto unlock;
7768d197
RD
2455 }
2456
e52c2802 2457 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2458
2459 esw->offloads.encap = encap;
e52c2802 2460
0da3c12d 2461 err = esw_create_offloads_fdb_tables(esw);
e52c2802 2462
7768d197 2463 if (err) {
8c98ee77
EB
2464 NL_SET_ERR_MSG_MOD(extack,
2465 "Failed re-creating fast FDB table");
7768d197 2466 esw->offloads.encap = !encap;
0da3c12d 2467 (void)esw_create_offloads_fdb_tables(esw);
7768d197 2468 }
e52c2802 2469
8e0aa4bc
PP
2470unlock:
2471 mutex_unlock(&esw->mode_lock);
7768d197
RD
2472 return err;
2473}
2474
98fdbea5
LR
2475int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2476 enum devlink_eswitch_encap_mode *encap)
7768d197 2477{
bd939753 2478 struct mlx5_eswitch *esw;
9d1cef19 2479 int err;
7768d197 2480
bd939753
PP
2481 esw = mlx5_devlink_eswitch_get(devlink);
2482 if (IS_ERR(esw))
2483 return PTR_ERR(esw);
2484
7768d197 2485
8e0aa4bc 2486 mutex_lock(&esw->mode_lock);
ae24432c
PP
2487 err = eswitch_devlink_esw_mode_check(esw);
2488 if (err)
8e0aa4bc 2489 goto unlock;
ae24432c 2490
7768d197 2491 *encap = esw->offloads.encap;
8e0aa4bc
PP
2492unlock:
2493 mutex_unlock(&esw->mode_lock);
7768d197
RD
2494 return 0;
2495}
2496
c2d7712c
BW
2497static bool
2498mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2499{
2500 /* Currently, only ECPF based device has representor for host PF. */
2501 if (vport_num == MLX5_VPORT_PF &&
2502 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2503 return false;
2504
2505 if (vport_num == MLX5_VPORT_ECPF &&
2506 !mlx5_ecpf_vport_exists(esw->dev))
2507 return false;
2508
2509 return true;
2510}
2511
f8e8fa02 2512void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2513 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2514 u8 rep_type)
127ea380 2515{
8693115a 2516 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2517 struct mlx5_eswitch_rep *rep;
2518 int i;
9deb2241 2519
8693115a 2520 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2521 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
2522 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2523 rep_data = &rep->rep_data[rep_type];
2524 atomic_set(&rep_data->state, REP_REGISTERED);
2525 }
f8e8fa02 2526 }
127ea380 2527}
f8e8fa02 2528EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2529
f8e8fa02 2530void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2531{
cb67b832 2532 struct mlx5_eswitch_rep *rep;
f8e8fa02 2533 int i;
cb67b832 2534
f6455de0 2535 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2536 __unload_reps_all_vport(esw, rep_type);
127ea380 2537
f8e8fa02 2538 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2539 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2540}
f8e8fa02 2541EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2542
a4b97ab4 2543void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2544{
726293f1
HHZ
2545 struct mlx5_eswitch_rep *rep;
2546
879c8f84 2547 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2548 return rep->rep_data[rep_type].priv;
726293f1 2549}
22215908
MB
2550
2551void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2552 u16 vport,
22215908
MB
2553 u8 rep_type)
2554{
22215908
MB
2555 struct mlx5_eswitch_rep *rep;
2556
879c8f84 2557 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2558
8693115a
PP
2559 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2560 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2561 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2562 return NULL;
2563}
57cbd893 2564EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2565
2566void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2567{
879c8f84 2568 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2569}
57cbd893
MB
2570EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2571
2572struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2573 u16 vport)
57cbd893 2574{
879c8f84 2575 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2576}
2577EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2578
2579bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2580{
2581 return vport_num >= MLX5_VPORT_FIRST_VF &&
2582 vport_num <= esw->dev->priv.sriov.max_vfs;
2583}
7445cfb1 2584
5b7cb745
PB
2585bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2586{
2587 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2588}
2589EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2590
7445cfb1
JL
2591bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2592{
2593 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2594}
2595EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2596
0f0d3827 2597u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
2598 u16 vport_num)
2599{
133dcfc5 2600 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0f0d3827 2601
133dcfc5
VP
2602 if (WARN_ON_ONCE(IS_ERR(vport)))
2603 return 0;
0f0d3827 2604
133dcfc5 2605 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
2606}
2607EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);