]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: E-Switch, Move mlx5e only logic outside E-Switch
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
d7e75a32
OG
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 95 flow_act.encap_id = attr->encap_id;
a54e20b4 96
74491de9 97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 98 spec, &flow_act, dest, i);
3d80d1a2 99 if (IS_ERR(rule))
aa0cbbae 100 goto err_add_rule;
375f51e2
RD
101 else
102 esw->offloads.num_flows++;
3d80d1a2
OG
103
104 return rule;
aa0cbbae
OG
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
3d80d1a2
OG
110}
111
d85cdccb
OG
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
aa0cbbae
OG
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
d85cdccb
OG
123}
124
f5f82476
OG
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
9eb78923 194 return -EOPNOTSUPP;
f5f82476
OG
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
f7a68945 305struct mlx5_flow_handle *
ab22be9b
OG
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
66958ed9 308 struct mlx5_flow_act flow_act = {0};
4c5009c5 309 struct mlx5_flow_destination dest = {};
74491de9 310 struct mlx5_flow_handle *flow_rule;
c5bb1730 311 struct mlx5_flow_spec *spec;
ab22be9b
OG
312 void *misc;
313
1b9a07ee 314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 315 if (!spec) {
ab22be9b
OG
316 flow_rule = ERR_PTR(-ENOMEM);
317 goto out;
318 }
319
c5bb1730 320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
321 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
322 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
323
c5bb1730 324 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
327
c5bb1730 328 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
330 dest.vport_num = vport;
66958ed9 331 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 332
74491de9 333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 334 &flow_act, &dest, 1);
ab22be9b
OG
335 if (IS_ERR(flow_rule))
336 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
337out:
c5bb1730 338 kvfree(spec);
ab22be9b
OG
339 return flow_rule;
340}
341
3aa33572
OG
342static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
343{
66958ed9 344 struct mlx5_flow_act flow_act = {0};
4c5009c5 345 struct mlx5_flow_destination dest = {};
74491de9 346 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 347 struct mlx5_flow_spec *spec;
3aa33572
OG
348 int err = 0;
349
1b9a07ee 350 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 351 if (!spec) {
3aa33572
OG
352 err = -ENOMEM;
353 goto out;
354 }
355
356 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
357 dest.vport_num = 0;
66958ed9 358 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 359
74491de9 360 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 361 &flow_act, &dest, 1);
3aa33572
OG
362 if (IS_ERR(flow_rule)) {
363 err = PTR_ERR(flow_rule);
364 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
365 goto out;
366 }
367
368 esw->fdb_table.offloads.miss_rule = flow_rule;
369out:
c5bb1730 370 kvfree(spec);
3aa33572
OG
371 return err;
372}
373
1033665e 374#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 375
1967ce6e 376static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 377{
69697b6e
OG
378 struct mlx5_core_dev *dev = esw->dev;
379 struct mlx5_flow_namespace *root_ns;
380 struct mlx5_flow_table *fdb = NULL;
1967ce6e 381 int esw_size, err = 0;
bbd00f7e 382 u32 flags = 0;
a8ffcc74
RL
383 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
384 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 385
69697b6e
OG
386 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
387 if (!root_ns) {
388 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 389 err = -EOPNOTSUPP;
1967ce6e 390 goto out;
69697b6e
OG
391 }
392
264d7bf3
OG
393 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
394 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 395 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 396
a8ffcc74 397 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 398 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 399
7768d197 400 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
bbd00f7e
HHZ
401 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
402
1033665e 403 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 404 esw_size,
c9f1b073 405 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 406 flags);
69697b6e
OG
407 if (IS_ERR(fdb)) {
408 err = PTR_ERR(fdb);
1033665e 409 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
1967ce6e 410 goto out;
69697b6e
OG
411 }
412 esw->fdb_table.fdb = fdb;
413
1967ce6e
OG
414out:
415 return err;
416}
417
418static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
419{
420 mlx5_destroy_flow_table(esw->fdb_table.fdb);
421}
422
423#define MAX_PF_SQ 256
424
425static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
426{
427 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
428 struct mlx5_flow_table_attr ft_attr = {};
429 struct mlx5_core_dev *dev = esw->dev;
430 struct mlx5_flow_namespace *root_ns;
431 struct mlx5_flow_table *fdb = NULL;
432 int table_size, ix, err = 0;
433 struct mlx5_flow_group *g;
434 void *match_criteria;
435 u32 *flow_group_in;
436
437 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 438 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
439 if (!flow_group_in)
440 return -ENOMEM;
441
442 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
443 if (!root_ns) {
444 esw_warn(dev, "Failed to get FDB flow namespace\n");
445 err = -EOPNOTSUPP;
446 goto ns_err;
447 }
448
449 err = esw_create_offloads_fast_fdb_table(esw);
450 if (err)
451 goto fast_fdb_err;
452
1033665e 453 table_size = nvports + MAX_PF_SQ + 1;
b3ba5149
ES
454
455 ft_attr.max_fte = table_size;
456 ft_attr.prio = FDB_SLOW_PATH;
457
458 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
459 if (IS_ERR(fdb)) {
460 err = PTR_ERR(fdb);
461 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
462 goto slow_fdb_err;
463 }
464 esw->fdb_table.offloads.fdb = fdb;
465
69697b6e
OG
466 /* create send-to-vport group */
467 memset(flow_group_in, 0, inlen);
468 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
469 MLX5_MATCH_MISC_PARAMETERS);
470
471 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
472
473 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
474 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
475
476 ix = nvports + MAX_PF_SQ;
477 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
478 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
479
480 g = mlx5_create_flow_group(fdb, flow_group_in);
481 if (IS_ERR(g)) {
482 err = PTR_ERR(g);
483 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
484 goto send_vport_err;
485 }
486 esw->fdb_table.offloads.send_to_vport_grp = g;
487
488 /* create miss group */
489 memset(flow_group_in, 0, inlen);
490 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
491
492 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
493 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
494
495 g = mlx5_create_flow_group(fdb, flow_group_in);
496 if (IS_ERR(g)) {
497 err = PTR_ERR(g);
498 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
499 goto miss_err;
500 }
501 esw->fdb_table.offloads.miss_grp = g;
502
3aa33572
OG
503 err = esw_add_fdb_miss_rule(esw);
504 if (err)
505 goto miss_rule_err;
506
69697b6e
OG
507 return 0;
508
3aa33572
OG
509miss_rule_err:
510 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
511miss_err:
512 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
513send_vport_err:
1033665e
OG
514 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
515slow_fdb_err:
516 mlx5_destroy_flow_table(esw->fdb_table.fdb);
517fast_fdb_err:
69697b6e
OG
518ns_err:
519 kvfree(flow_group_in);
520 return err;
521}
522
1967ce6e 523static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e
OG
524{
525 if (!esw->fdb_table.fdb)
526 return;
527
1967ce6e 528 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
74491de9 529 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
530 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
531 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
532
1033665e 533 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
1967ce6e 534 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 535}
c116c6ee
OG
536
537static int esw_create_offloads_table(struct mlx5_eswitch *esw)
538{
b3ba5149 539 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 540 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
541 struct mlx5_flow_table *ft_offloads;
542 struct mlx5_flow_namespace *ns;
c116c6ee
OG
543 int err = 0;
544
545 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
546 if (!ns) {
547 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 548 return -EOPNOTSUPP;
c116c6ee
OG
549 }
550
b3ba5149
ES
551 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
552
553 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
554 if (IS_ERR(ft_offloads)) {
555 err = PTR_ERR(ft_offloads);
556 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
557 return err;
558 }
559
560 esw->offloads.ft_offloads = ft_offloads;
561 return 0;
562}
563
564static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
565{
566 struct mlx5_esw_offload *offloads = &esw->offloads;
567
568 mlx5_destroy_flow_table(offloads->ft_offloads);
569}
fed9ce22
OG
570
571static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
572{
573 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
574 struct mlx5_flow_group *g;
575 struct mlx5_priv *priv = &esw->dev->priv;
576 u32 *flow_group_in;
577 void *match_criteria, *misc;
578 int err = 0;
579 int nvports = priv->sriov.num_vfs + 2;
580
1b9a07ee 581 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
582 if (!flow_group_in)
583 return -ENOMEM;
584
585 /* create vport rx group */
586 memset(flow_group_in, 0, inlen);
587 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
588 MLX5_MATCH_MISC_PARAMETERS);
589
590 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
591 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
592 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
593
594 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
595 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
596
597 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
598
599 if (IS_ERR(g)) {
600 err = PTR_ERR(g);
601 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
602 goto out;
603 }
604
605 esw->offloads.vport_rx_group = g;
606out:
607 kfree(flow_group_in);
608 return err;
609}
610
611static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
612{
613 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
614}
615
74491de9 616struct mlx5_flow_handle *
fed9ce22
OG
617mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
618{
66958ed9 619 struct mlx5_flow_act flow_act = {0};
4c5009c5 620 struct mlx5_flow_destination dest = {};
74491de9 621 struct mlx5_flow_handle *flow_rule;
c5bb1730 622 struct mlx5_flow_spec *spec;
fed9ce22
OG
623 void *misc;
624
1b9a07ee 625 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 626 if (!spec) {
fed9ce22
OG
627 flow_rule = ERR_PTR(-ENOMEM);
628 goto out;
629 }
630
c5bb1730 631 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
632 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
633
c5bb1730 634 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
635 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
636
c5bb1730 637 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
638 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
639 dest.tir_num = tirn;
640
66958ed9 641 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 642 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
e53eef63 643 &flow_act, &dest, 1);
fed9ce22
OG
644 if (IS_ERR(flow_rule)) {
645 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
646 goto out;
647 }
648
649out:
c5bb1730 650 kvfree(spec);
fed9ce22
OG
651 return flow_rule;
652}
feae9087 653
c930a3ad
OG
654static int esw_offloads_start(struct mlx5_eswitch *esw)
655{
6c419ba8 656 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
657
658 if (esw->mode != SRIOV_LEGACY) {
659 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
660 return -EINVAL;
661 }
662
663 mlx5_eswitch_disable_sriov(esw);
664 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
665 if (err) {
666 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
667 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
668 if (err1)
5403dc70 669 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 670 }
bffaa916
RD
671 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
672 if (mlx5_eswitch_inline_mode_get(esw,
673 num_vfs,
674 &esw->offloads.inline_mode)) {
675 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
676 esw_warn(esw->dev, "Inline mode is different between vports\n");
677 }
678 }
c930a3ad
OG
679 return err;
680}
681
e8d31c4d
MB
682void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
683{
684 kfree(esw->offloads.vport_reps);
685}
686
687int esw_offloads_init_reps(struct mlx5_eswitch *esw)
688{
689 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
690 struct mlx5_core_dev *dev = esw->dev;
691 struct mlx5_esw_offload *offloads;
692 struct mlx5_eswitch_rep *rep;
693 u8 hw_id[ETH_ALEN];
694 int vport;
695
696 esw->offloads.vport_reps = kcalloc(total_vfs,
697 sizeof(struct mlx5_eswitch_rep),
698 GFP_KERNEL);
699 if (!esw->offloads.vport_reps)
700 return -ENOMEM;
701
702 offloads = &esw->offloads;
703 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
704
705 for (vport = 0; vport < total_vfs; vport++) {
706 rep = &offloads->vport_reps[vport];
707
708 rep->vport = vport;
709 ether_addr_copy(rep->hw_id, hw_id);
710 }
711
712 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
713
714 return 0;
715}
716
6ed1803a
MB
717static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
718{
719 struct mlx5_eswitch_rep *rep;
720 int vport;
721
722 for (vport = nvports - 1; vport >= 0; vport--) {
723 rep = &esw->offloads.vport_reps[vport];
724 if (!rep->valid)
725 continue;
726
4c66df01 727 rep->unload(rep);
6ed1803a
MB
728 }
729}
730
731static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
c930a3ad 732{
cb67b832
HHZ
733 struct mlx5_eswitch_rep *rep;
734 int vport;
c930a3ad
OG
735 int err;
736
6ed1803a
MB
737 for (vport = 0; vport < nvports; vport++) {
738 rep = &esw->offloads.vport_reps[vport];
739 if (!rep->valid)
740 continue;
741
4c66df01 742 err = rep->load(esw->dev, rep);
6ed1803a
MB
743 if (err)
744 goto err_reps;
745 }
746
747 return 0;
748
749err_reps:
750 esw_offloads_unload_reps(esw, vport);
751 return err;
752}
753
754int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
755{
756 int err;
757
5bae8c03
OG
758 /* disable PF RoCE so missed packets don't go through RoCE steering */
759 mlx5_dev_list_lock();
760 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
761 mlx5_dev_list_unlock();
762
1967ce6e 763 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 764 if (err)
5bae8c03 765 goto create_fdb_err;
c930a3ad
OG
766
767 err = esw_create_offloads_table(esw);
768 if (err)
769 goto create_ft_err;
770
771 err = esw_create_vport_rx_group(esw);
772 if (err)
773 goto create_fg_err;
774
6ed1803a
MB
775 err = esw_offloads_load_reps(esw, nvports);
776 if (err)
777 goto err_reps;
9da34cd3 778
c930a3ad
OG
779 return 0;
780
cb67b832 781err_reps:
cb67b832
HHZ
782 esw_destroy_vport_rx_group(esw);
783
c930a3ad
OG
784create_fg_err:
785 esw_destroy_offloads_table(esw);
786
787create_ft_err:
1967ce6e 788 esw_destroy_offloads_fdb_tables(esw);
5bae8c03
OG
789
790create_fdb_err:
791 /* enable back PF RoCE */
792 mlx5_dev_list_lock();
793 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
794 mlx5_dev_list_unlock();
795
c930a3ad
OG
796 return err;
797}
798
799static int esw_offloads_stop(struct mlx5_eswitch *esw)
800{
6c419ba8 801 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
802
803 mlx5_eswitch_disable_sriov(esw);
804 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
805 if (err) {
806 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
807 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
808 if (err1)
809 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
810 }
c930a3ad 811
5bae8c03
OG
812 /* enable back PF RoCE */
813 mlx5_dev_list_lock();
814 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
815 mlx5_dev_list_unlock();
816
c930a3ad
OG
817 return err;
818}
819
820void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
821{
6ed1803a 822 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
823 esw_destroy_vport_rx_group(esw);
824 esw_destroy_offloads_table(esw);
1967ce6e 825 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
826}
827
ef78618b 828static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
829{
830 switch (mode) {
831 case DEVLINK_ESWITCH_MODE_LEGACY:
832 *mlx5_mode = SRIOV_LEGACY;
833 break;
834 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
835 *mlx5_mode = SRIOV_OFFLOADS;
836 break;
837 default:
838 return -EINVAL;
839 }
840
841 return 0;
842}
843
ef78618b
OG
844static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
845{
846 switch (mlx5_mode) {
847 case SRIOV_LEGACY:
848 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
849 break;
850 case SRIOV_OFFLOADS:
851 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
852 break;
853 default:
854 return -EINVAL;
855 }
856
857 return 0;
858}
859
bffaa916
RD
860static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
861{
862 switch (mode) {
863 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
864 *mlx5_mode = MLX5_INLINE_MODE_NONE;
865 break;
866 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
867 *mlx5_mode = MLX5_INLINE_MODE_L2;
868 break;
869 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
870 *mlx5_mode = MLX5_INLINE_MODE_IP;
871 break;
872 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
873 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
874 break;
875 default:
876 return -EINVAL;
877 }
878
879 return 0;
880}
881
882static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
883{
884 switch (mlx5_mode) {
885 case MLX5_INLINE_MODE_NONE:
886 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
887 break;
888 case MLX5_INLINE_MODE_L2:
889 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
890 break;
891 case MLX5_INLINE_MODE_IP:
892 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
893 break;
894 case MLX5_INLINE_MODE_TCP_UDP:
895 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
896 break;
897 default:
898 return -EINVAL;
899 }
900
901 return 0;
902}
903
9d1cef19 904static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 905{
9d1cef19 906 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 907
9d1cef19
OG
908 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
909 return -EOPNOTSUPP;
c930a3ad
OG
910
911 if (!MLX5_CAP_GEN(dev, vport_group_manager))
912 return -EOPNOTSUPP;
913
9d1cef19 914 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
915 return -EOPNOTSUPP;
916
9d1cef19
OG
917 return 0;
918}
919
920int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
921{
922 struct mlx5_core_dev *dev = devlink_priv(devlink);
923 u16 cur_mlx5_mode, mlx5_mode = 0;
924 int err;
925
926 err = mlx5_devlink_eswitch_check(devlink);
927 if (err)
928 return err;
929
930 cur_mlx5_mode = dev->priv.eswitch->mode;
931
ef78618b 932 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
933 return -EINVAL;
934
935 if (cur_mlx5_mode == mlx5_mode)
936 return 0;
937
938 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
939 return esw_offloads_start(dev->priv.eswitch);
940 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
941 return esw_offloads_stop(dev->priv.eswitch);
942 else
943 return -EINVAL;
feae9087
OG
944}
945
946int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
947{
9d1cef19
OG
948 struct mlx5_core_dev *dev = devlink_priv(devlink);
949 int err;
c930a3ad 950
9d1cef19
OG
951 err = mlx5_devlink_eswitch_check(devlink);
952 if (err)
953 return err;
c930a3ad 954
ef78618b 955 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 956}
127ea380 957
bffaa916
RD
958int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
959{
960 struct mlx5_core_dev *dev = devlink_priv(devlink);
961 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 962 int err, vport;
bffaa916
RD
963 u8 mlx5_mode;
964
9d1cef19
OG
965 err = mlx5_devlink_eswitch_check(devlink);
966 if (err)
967 return err;
bffaa916 968
c415f704
OG
969 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
970 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
971 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
972 return 0;
973 /* fall through */
974 case MLX5_CAP_INLINE_MODE_L2:
975 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 976 return -EOPNOTSUPP;
c415f704
OG
977 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
978 break;
979 }
bffaa916 980
375f51e2
RD
981 if (esw->offloads.num_flows > 0) {
982 esw_warn(dev, "Can't set inline mode when flows are configured\n");
983 return -EOPNOTSUPP;
984 }
985
bffaa916
RD
986 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
987 if (err)
988 goto out;
989
9d1cef19 990 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
991 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
992 if (err) {
993 esw_warn(dev, "Failed to set min inline on vport %d\n",
994 vport);
995 goto revert_inline_mode;
996 }
997 }
998
999 esw->offloads.inline_mode = mlx5_mode;
1000 return 0;
1001
1002revert_inline_mode:
1003 while (--vport > 0)
1004 mlx5_modify_nic_vport_min_inline(dev,
1005 vport,
1006 esw->offloads.inline_mode);
1007out:
1008 return err;
1009}
1010
1011int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1012{
1013 struct mlx5_core_dev *dev = devlink_priv(devlink);
1014 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1015 int err;
bffaa916 1016
9d1cef19
OG
1017 err = mlx5_devlink_eswitch_check(devlink);
1018 if (err)
1019 return err;
bffaa916 1020
bffaa916
RD
1021 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1022}
1023
1024int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1025{
c415f704 1026 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1027 struct mlx5_core_dev *dev = esw->dev;
1028 int vport;
bffaa916
RD
1029
1030 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1031 return -EOPNOTSUPP;
1032
1033 if (esw->mode == SRIOV_NONE)
1034 return -EOPNOTSUPP;
1035
c415f704
OG
1036 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1037 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1038 mlx5_mode = MLX5_INLINE_MODE_NONE;
1039 goto out;
1040 case MLX5_CAP_INLINE_MODE_L2:
1041 mlx5_mode = MLX5_INLINE_MODE_L2;
1042 goto out;
1043 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1044 goto query_vports;
1045 }
bffaa916 1046
c415f704 1047query_vports:
bffaa916
RD
1048 for (vport = 1; vport <= nvfs; vport++) {
1049 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1050 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1051 return -EINVAL;
1052 prev_mlx5_mode = mlx5_mode;
1053 }
1054
c415f704 1055out:
bffaa916
RD
1056 *mode = mlx5_mode;
1057 return 0;
1058}
1059
7768d197
RD
1060int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1061{
1062 struct mlx5_core_dev *dev = devlink_priv(devlink);
1063 struct mlx5_eswitch *esw = dev->priv.eswitch;
1064 int err;
1065
9d1cef19
OG
1066 err = mlx5_devlink_eswitch_check(devlink);
1067 if (err)
1068 return err;
7768d197
RD
1069
1070 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1071 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1072 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1073 return -EOPNOTSUPP;
1074
1075 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1076 return -EOPNOTSUPP;
1077
1078 if (esw->mode == SRIOV_LEGACY) {
1079 esw->offloads.encap = encap;
1080 return 0;
1081 }
1082
1083 if (esw->offloads.encap == encap)
1084 return 0;
1085
1086 if (esw->offloads.num_flows > 0) {
1087 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1088 return -EOPNOTSUPP;
1089 }
1090
1091 esw_destroy_offloads_fast_fdb_table(esw);
1092
1093 esw->offloads.encap = encap;
1094 err = esw_create_offloads_fast_fdb_table(esw);
1095 if (err) {
1096 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1097 esw->offloads.encap = !encap;
2fe30e23 1098 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1099 }
1100 return err;
1101}
1102
1103int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1104{
1105 struct mlx5_core_dev *dev = devlink_priv(devlink);
1106 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1107 int err;
7768d197 1108
9d1cef19
OG
1109 err = mlx5_devlink_eswitch_check(devlink);
1110 if (err)
1111 return err;
7768d197
RD
1112
1113 *encap = esw->offloads.encap;
1114 return 0;
1115}
1116
127ea380 1117void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
1118 int vport_index,
1119 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
1120{
1121 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1122 struct mlx5_eswitch_rep *rep;
1123
1124 rep = &offloads->vport_reps[vport_index];
127ea380 1125
bac9b6aa
OG
1126 rep->load = __rep->load;
1127 rep->unload = __rep->unload;
726293f1 1128 rep->netdev = __rep->netdev;
127ea380 1129
9deb2241
OG
1130 INIT_LIST_HEAD(&rep->vport_sqs_list);
1131 rep->valid = true;
127ea380
HHZ
1132}
1133
1134void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1135 int vport_index)
127ea380
HHZ
1136{
1137 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1138 struct mlx5_eswitch_rep *rep;
1139
9deb2241 1140 rep = &offloads->vport_reps[vport_index];
cb67b832 1141
9deb2241 1142 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
4c66df01 1143 rep->unload(rep);
127ea380 1144
9deb2241 1145 rep->valid = false;
127ea380 1146}
726293f1
HHZ
1147
1148struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1149{
1150#define UPLINK_REP_INDEX 0
1151 struct mlx5_esw_offload *offloads = &esw->offloads;
1152 struct mlx5_eswitch_rep *rep;
1153
1154 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1155 return rep->netdev;
1156}