]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: Move ethernet representors data into separate struct
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
d7e75a32
OG
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 95 flow_act.encap_id = attr->encap_id;
a54e20b4 96
74491de9 97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 98 spec, &flow_act, dest, i);
3d80d1a2 99 if (IS_ERR(rule))
aa0cbbae 100 goto err_add_rule;
375f51e2
RD
101 else
102 esw->offloads.num_flows++;
3d80d1a2
OG
103
104 return rule;
aa0cbbae
OG
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
3d80d1a2
OG
110}
111
d85cdccb
OG
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
aa0cbbae
OG
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
d85cdccb
OG
123}
124
f5f82476
OG
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
9eb78923 194 return -EOPNOTSUPP;
f5f82476
OG
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
f7a68945 305struct mlx5_flow_handle *
ab22be9b
OG
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
66958ed9 308 struct mlx5_flow_act flow_act = {0};
4c5009c5 309 struct mlx5_flow_destination dest = {};
74491de9 310 struct mlx5_flow_handle *flow_rule;
c5bb1730 311 struct mlx5_flow_spec *spec;
ab22be9b
OG
312 void *misc;
313
1b9a07ee 314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 315 if (!spec) {
ab22be9b
OG
316 flow_rule = ERR_PTR(-ENOMEM);
317 goto out;
318 }
319
c5bb1730 320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
321 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
322 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
323
c5bb1730 324 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
327
c5bb1730 328 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
330 dest.vport_num = vport;
66958ed9 331 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 332
74491de9 333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 334 &flow_act, &dest, 1);
ab22be9b
OG
335 if (IS_ERR(flow_rule))
336 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
337out:
c5bb1730 338 kvfree(spec);
ab22be9b
OG
339 return flow_rule;
340}
341
159fe639
MB
342void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
343{
344 mlx5_del_flow_rules(rule);
345}
346
3aa33572
OG
347static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
348{
66958ed9 349 struct mlx5_flow_act flow_act = {0};
4c5009c5 350 struct mlx5_flow_destination dest = {};
74491de9 351 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 352 struct mlx5_flow_spec *spec;
3aa33572
OG
353 int err = 0;
354
1b9a07ee 355 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 356 if (!spec) {
3aa33572
OG
357 err = -ENOMEM;
358 goto out;
359 }
360
361 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
362 dest.vport_num = 0;
66958ed9 363 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 364
74491de9 365 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 366 &flow_act, &dest, 1);
3aa33572
OG
367 if (IS_ERR(flow_rule)) {
368 err = PTR_ERR(flow_rule);
369 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
370 goto out;
371 }
372
373 esw->fdb_table.offloads.miss_rule = flow_rule;
374out:
c5bb1730 375 kvfree(spec);
3aa33572
OG
376 return err;
377}
378
1033665e 379#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 380
1967ce6e 381static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 382{
69697b6e
OG
383 struct mlx5_core_dev *dev = esw->dev;
384 struct mlx5_flow_namespace *root_ns;
385 struct mlx5_flow_table *fdb = NULL;
1967ce6e 386 int esw_size, err = 0;
bbd00f7e 387 u32 flags = 0;
a8ffcc74
RL
388 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
389 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 390
69697b6e
OG
391 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
392 if (!root_ns) {
393 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 394 err = -EOPNOTSUPP;
1967ce6e 395 goto out;
69697b6e
OG
396 }
397
264d7bf3
OG
398 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
399 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 400 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 401
a8ffcc74 402 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 403 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 404
7768d197 405 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
bbd00f7e
HHZ
406 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
407
1033665e 408 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 409 esw_size,
c9f1b073 410 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 411 flags);
69697b6e
OG
412 if (IS_ERR(fdb)) {
413 err = PTR_ERR(fdb);
1033665e 414 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
1967ce6e 415 goto out;
69697b6e
OG
416 }
417 esw->fdb_table.fdb = fdb;
418
1967ce6e
OG
419out:
420 return err;
421}
422
423static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
424{
425 mlx5_destroy_flow_table(esw->fdb_table.fdb);
426}
427
428#define MAX_PF_SQ 256
429
430static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
431{
432 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
433 struct mlx5_flow_table_attr ft_attr = {};
434 struct mlx5_core_dev *dev = esw->dev;
435 struct mlx5_flow_namespace *root_ns;
436 struct mlx5_flow_table *fdb = NULL;
437 int table_size, ix, err = 0;
438 struct mlx5_flow_group *g;
439 void *match_criteria;
440 u32 *flow_group_in;
441
442 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 443 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
444 if (!flow_group_in)
445 return -ENOMEM;
446
447 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
448 if (!root_ns) {
449 esw_warn(dev, "Failed to get FDB flow namespace\n");
450 err = -EOPNOTSUPP;
451 goto ns_err;
452 }
453
454 err = esw_create_offloads_fast_fdb_table(esw);
455 if (err)
456 goto fast_fdb_err;
457
1033665e 458 table_size = nvports + MAX_PF_SQ + 1;
b3ba5149
ES
459
460 ft_attr.max_fte = table_size;
461 ft_attr.prio = FDB_SLOW_PATH;
462
463 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
464 if (IS_ERR(fdb)) {
465 err = PTR_ERR(fdb);
466 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
467 goto slow_fdb_err;
468 }
469 esw->fdb_table.offloads.fdb = fdb;
470
69697b6e
OG
471 /* create send-to-vport group */
472 memset(flow_group_in, 0, inlen);
473 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
474 MLX5_MATCH_MISC_PARAMETERS);
475
476 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
477
478 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
479 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
480
481 ix = nvports + MAX_PF_SQ;
482 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
483 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
484
485 g = mlx5_create_flow_group(fdb, flow_group_in);
486 if (IS_ERR(g)) {
487 err = PTR_ERR(g);
488 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
489 goto send_vport_err;
490 }
491 esw->fdb_table.offloads.send_to_vport_grp = g;
492
493 /* create miss group */
494 memset(flow_group_in, 0, inlen);
495 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
496
497 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
498 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
499
500 g = mlx5_create_flow_group(fdb, flow_group_in);
501 if (IS_ERR(g)) {
502 err = PTR_ERR(g);
503 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
504 goto miss_err;
505 }
506 esw->fdb_table.offloads.miss_grp = g;
507
3aa33572
OG
508 err = esw_add_fdb_miss_rule(esw);
509 if (err)
510 goto miss_rule_err;
511
69697b6e
OG
512 return 0;
513
3aa33572
OG
514miss_rule_err:
515 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
516miss_err:
517 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
518send_vport_err:
1033665e
OG
519 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
520slow_fdb_err:
521 mlx5_destroy_flow_table(esw->fdb_table.fdb);
522fast_fdb_err:
69697b6e
OG
523ns_err:
524 kvfree(flow_group_in);
525 return err;
526}
527
1967ce6e 528static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e
OG
529{
530 if (!esw->fdb_table.fdb)
531 return;
532
1967ce6e 533 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
74491de9 534 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
535 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
536 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
537
1033665e 538 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
1967ce6e 539 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 540}
c116c6ee
OG
541
542static int esw_create_offloads_table(struct mlx5_eswitch *esw)
543{
b3ba5149 544 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 545 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
546 struct mlx5_flow_table *ft_offloads;
547 struct mlx5_flow_namespace *ns;
c116c6ee
OG
548 int err = 0;
549
550 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
551 if (!ns) {
552 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 553 return -EOPNOTSUPP;
c116c6ee
OG
554 }
555
b3ba5149
ES
556 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
557
558 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
559 if (IS_ERR(ft_offloads)) {
560 err = PTR_ERR(ft_offloads);
561 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
562 return err;
563 }
564
565 esw->offloads.ft_offloads = ft_offloads;
566 return 0;
567}
568
569static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
570{
571 struct mlx5_esw_offload *offloads = &esw->offloads;
572
573 mlx5_destroy_flow_table(offloads->ft_offloads);
574}
fed9ce22
OG
575
576static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
577{
578 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
579 struct mlx5_flow_group *g;
580 struct mlx5_priv *priv = &esw->dev->priv;
581 u32 *flow_group_in;
582 void *match_criteria, *misc;
583 int err = 0;
584 int nvports = priv->sriov.num_vfs + 2;
585
1b9a07ee 586 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
587 if (!flow_group_in)
588 return -ENOMEM;
589
590 /* create vport rx group */
591 memset(flow_group_in, 0, inlen);
592 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
593 MLX5_MATCH_MISC_PARAMETERS);
594
595 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
596 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
597 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
598
599 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
600 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
601
602 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
603
604 if (IS_ERR(g)) {
605 err = PTR_ERR(g);
606 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
607 goto out;
608 }
609
610 esw->offloads.vport_rx_group = g;
611out:
612 kfree(flow_group_in);
613 return err;
614}
615
616static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
617{
618 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
619}
620
74491de9 621struct mlx5_flow_handle *
fed9ce22
OG
622mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
623{
66958ed9 624 struct mlx5_flow_act flow_act = {0};
4c5009c5 625 struct mlx5_flow_destination dest = {};
74491de9 626 struct mlx5_flow_handle *flow_rule;
c5bb1730 627 struct mlx5_flow_spec *spec;
fed9ce22
OG
628 void *misc;
629
1b9a07ee 630 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 631 if (!spec) {
fed9ce22
OG
632 flow_rule = ERR_PTR(-ENOMEM);
633 goto out;
634 }
635
c5bb1730 636 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
637 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
638
c5bb1730 639 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
640 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
641
c5bb1730 642 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
643 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
644 dest.tir_num = tirn;
645
66958ed9 646 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 647 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
e53eef63 648 &flow_act, &dest, 1);
fed9ce22
OG
649 if (IS_ERR(flow_rule)) {
650 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
651 goto out;
652 }
653
654out:
c5bb1730 655 kvfree(spec);
fed9ce22
OG
656 return flow_rule;
657}
feae9087 658
c930a3ad
OG
659static int esw_offloads_start(struct mlx5_eswitch *esw)
660{
6c419ba8 661 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
662
663 if (esw->mode != SRIOV_LEGACY) {
664 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
665 return -EINVAL;
666 }
667
668 mlx5_eswitch_disable_sriov(esw);
669 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
670 if (err) {
671 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
672 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
673 if (err1)
5403dc70 674 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 675 }
bffaa916
RD
676 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
677 if (mlx5_eswitch_inline_mode_get(esw,
678 num_vfs,
679 &esw->offloads.inline_mode)) {
680 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
681 esw_warn(esw->dev, "Inline mode is different between vports\n");
682 }
683 }
c930a3ad
OG
684 return err;
685}
686
e8d31c4d
MB
687void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
688{
689 kfree(esw->offloads.vport_reps);
690}
691
692int esw_offloads_init_reps(struct mlx5_eswitch *esw)
693{
694 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
695 struct mlx5_core_dev *dev = esw->dev;
696 struct mlx5_esw_offload *offloads;
697 struct mlx5_eswitch_rep *rep;
698 u8 hw_id[ETH_ALEN];
699 int vport;
700
701 esw->offloads.vport_reps = kcalloc(total_vfs,
702 sizeof(struct mlx5_eswitch_rep),
703 GFP_KERNEL);
704 if (!esw->offloads.vport_reps)
705 return -ENOMEM;
706
707 offloads = &esw->offloads;
708 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
709
710 for (vport = 0; vport < total_vfs; vport++) {
711 rep = &offloads->vport_reps[vport];
712
713 rep->vport = vport;
714 ether_addr_copy(rep->hw_id, hw_id);
715 }
716
717 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
718
719 return 0;
720}
721
6ed1803a
MB
722static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
723{
724 struct mlx5_eswitch_rep *rep;
725 int vport;
726
727 for (vport = nvports - 1; vport >= 0; vport--) {
728 rep = &esw->offloads.vport_reps[vport];
729 if (!rep->valid)
730 continue;
731
4c66df01 732 rep->unload(rep);
6ed1803a
MB
733 }
734}
735
736static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
c930a3ad 737{
cb67b832
HHZ
738 struct mlx5_eswitch_rep *rep;
739 int vport;
c930a3ad
OG
740 int err;
741
6ed1803a
MB
742 for (vport = 0; vport < nvports; vport++) {
743 rep = &esw->offloads.vport_reps[vport];
744 if (!rep->valid)
745 continue;
746
4c66df01 747 err = rep->load(esw->dev, rep);
6ed1803a
MB
748 if (err)
749 goto err_reps;
750 }
751
752 return 0;
753
754err_reps:
755 esw_offloads_unload_reps(esw, vport);
756 return err;
757}
758
759int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
760{
761 int err;
762
5bae8c03
OG
763 /* disable PF RoCE so missed packets don't go through RoCE steering */
764 mlx5_dev_list_lock();
765 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
766 mlx5_dev_list_unlock();
767
1967ce6e 768 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 769 if (err)
5bae8c03 770 goto create_fdb_err;
c930a3ad
OG
771
772 err = esw_create_offloads_table(esw);
773 if (err)
774 goto create_ft_err;
775
776 err = esw_create_vport_rx_group(esw);
777 if (err)
778 goto create_fg_err;
779
6ed1803a
MB
780 err = esw_offloads_load_reps(esw, nvports);
781 if (err)
782 goto err_reps;
9da34cd3 783
c930a3ad
OG
784 return 0;
785
cb67b832 786err_reps:
cb67b832
HHZ
787 esw_destroy_vport_rx_group(esw);
788
c930a3ad
OG
789create_fg_err:
790 esw_destroy_offloads_table(esw);
791
792create_ft_err:
1967ce6e 793 esw_destroy_offloads_fdb_tables(esw);
5bae8c03
OG
794
795create_fdb_err:
796 /* enable back PF RoCE */
797 mlx5_dev_list_lock();
798 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
799 mlx5_dev_list_unlock();
800
c930a3ad
OG
801 return err;
802}
803
804static int esw_offloads_stop(struct mlx5_eswitch *esw)
805{
6c419ba8 806 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
807
808 mlx5_eswitch_disable_sriov(esw);
809 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
810 if (err) {
811 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
812 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
813 if (err1)
814 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
815 }
c930a3ad 816
5bae8c03
OG
817 /* enable back PF RoCE */
818 mlx5_dev_list_lock();
819 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
820 mlx5_dev_list_unlock();
821
c930a3ad
OG
822 return err;
823}
824
825void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
826{
6ed1803a 827 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
828 esw_destroy_vport_rx_group(esw);
829 esw_destroy_offloads_table(esw);
1967ce6e 830 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
831}
832
ef78618b 833static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
834{
835 switch (mode) {
836 case DEVLINK_ESWITCH_MODE_LEGACY:
837 *mlx5_mode = SRIOV_LEGACY;
838 break;
839 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
840 *mlx5_mode = SRIOV_OFFLOADS;
841 break;
842 default:
843 return -EINVAL;
844 }
845
846 return 0;
847}
848
ef78618b
OG
849static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
850{
851 switch (mlx5_mode) {
852 case SRIOV_LEGACY:
853 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
854 break;
855 case SRIOV_OFFLOADS:
856 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
857 break;
858 default:
859 return -EINVAL;
860 }
861
862 return 0;
863}
864
bffaa916
RD
865static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
866{
867 switch (mode) {
868 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
869 *mlx5_mode = MLX5_INLINE_MODE_NONE;
870 break;
871 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
872 *mlx5_mode = MLX5_INLINE_MODE_L2;
873 break;
874 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
875 *mlx5_mode = MLX5_INLINE_MODE_IP;
876 break;
877 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
878 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
879 break;
880 default:
881 return -EINVAL;
882 }
883
884 return 0;
885}
886
887static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
888{
889 switch (mlx5_mode) {
890 case MLX5_INLINE_MODE_NONE:
891 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
892 break;
893 case MLX5_INLINE_MODE_L2:
894 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
895 break;
896 case MLX5_INLINE_MODE_IP:
897 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
898 break;
899 case MLX5_INLINE_MODE_TCP_UDP:
900 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
901 break;
902 default:
903 return -EINVAL;
904 }
905
906 return 0;
907}
908
9d1cef19 909static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 910{
9d1cef19 911 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 912
9d1cef19
OG
913 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
914 return -EOPNOTSUPP;
c930a3ad
OG
915
916 if (!MLX5_CAP_GEN(dev, vport_group_manager))
917 return -EOPNOTSUPP;
918
9d1cef19 919 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
920 return -EOPNOTSUPP;
921
9d1cef19
OG
922 return 0;
923}
924
925int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 u16 cur_mlx5_mode, mlx5_mode = 0;
929 int err;
930
931 err = mlx5_devlink_eswitch_check(devlink);
932 if (err)
933 return err;
934
935 cur_mlx5_mode = dev->priv.eswitch->mode;
936
ef78618b 937 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
938 return -EINVAL;
939
940 if (cur_mlx5_mode == mlx5_mode)
941 return 0;
942
943 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
944 return esw_offloads_start(dev->priv.eswitch);
945 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
946 return esw_offloads_stop(dev->priv.eswitch);
947 else
948 return -EINVAL;
feae9087
OG
949}
950
951int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
952{
9d1cef19
OG
953 struct mlx5_core_dev *dev = devlink_priv(devlink);
954 int err;
c930a3ad 955
9d1cef19
OG
956 err = mlx5_devlink_eswitch_check(devlink);
957 if (err)
958 return err;
c930a3ad 959
ef78618b 960 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 961}
127ea380 962
bffaa916
RD
963int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
964{
965 struct mlx5_core_dev *dev = devlink_priv(devlink);
966 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 967 int err, vport;
bffaa916
RD
968 u8 mlx5_mode;
969
9d1cef19
OG
970 err = mlx5_devlink_eswitch_check(devlink);
971 if (err)
972 return err;
bffaa916 973
c415f704
OG
974 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
975 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
976 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
977 return 0;
978 /* fall through */
979 case MLX5_CAP_INLINE_MODE_L2:
980 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 981 return -EOPNOTSUPP;
c415f704
OG
982 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
983 break;
984 }
bffaa916 985
375f51e2
RD
986 if (esw->offloads.num_flows > 0) {
987 esw_warn(dev, "Can't set inline mode when flows are configured\n");
988 return -EOPNOTSUPP;
989 }
990
bffaa916
RD
991 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
992 if (err)
993 goto out;
994
9d1cef19 995 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
997 if (err) {
998 esw_warn(dev, "Failed to set min inline on vport %d\n",
999 vport);
1000 goto revert_inline_mode;
1001 }
1002 }
1003
1004 esw->offloads.inline_mode = mlx5_mode;
1005 return 0;
1006
1007revert_inline_mode:
1008 while (--vport > 0)
1009 mlx5_modify_nic_vport_min_inline(dev,
1010 vport,
1011 esw->offloads.inline_mode);
1012out:
1013 return err;
1014}
1015
1016int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1017{
1018 struct mlx5_core_dev *dev = devlink_priv(devlink);
1019 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1020 int err;
bffaa916 1021
9d1cef19
OG
1022 err = mlx5_devlink_eswitch_check(devlink);
1023 if (err)
1024 return err;
bffaa916 1025
bffaa916
RD
1026 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1027}
1028
1029int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1030{
c415f704 1031 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1032 struct mlx5_core_dev *dev = esw->dev;
1033 int vport;
bffaa916
RD
1034
1035 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1036 return -EOPNOTSUPP;
1037
1038 if (esw->mode == SRIOV_NONE)
1039 return -EOPNOTSUPP;
1040
c415f704
OG
1041 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1042 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1043 mlx5_mode = MLX5_INLINE_MODE_NONE;
1044 goto out;
1045 case MLX5_CAP_INLINE_MODE_L2:
1046 mlx5_mode = MLX5_INLINE_MODE_L2;
1047 goto out;
1048 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1049 goto query_vports;
1050 }
bffaa916 1051
c415f704 1052query_vports:
bffaa916
RD
1053 for (vport = 1; vport <= nvfs; vport++) {
1054 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1055 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1056 return -EINVAL;
1057 prev_mlx5_mode = mlx5_mode;
1058 }
1059
c415f704 1060out:
bffaa916
RD
1061 *mode = mlx5_mode;
1062 return 0;
1063}
1064
7768d197
RD
1065int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1066{
1067 struct mlx5_core_dev *dev = devlink_priv(devlink);
1068 struct mlx5_eswitch *esw = dev->priv.eswitch;
1069 int err;
1070
9d1cef19
OG
1071 err = mlx5_devlink_eswitch_check(devlink);
1072 if (err)
1073 return err;
7768d197
RD
1074
1075 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1076 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1077 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1078 return -EOPNOTSUPP;
1079
1080 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1081 return -EOPNOTSUPP;
1082
1083 if (esw->mode == SRIOV_LEGACY) {
1084 esw->offloads.encap = encap;
1085 return 0;
1086 }
1087
1088 if (esw->offloads.encap == encap)
1089 return 0;
1090
1091 if (esw->offloads.num_flows > 0) {
1092 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1093 return -EOPNOTSUPP;
1094 }
1095
1096 esw_destroy_offloads_fast_fdb_table(esw);
1097
1098 esw->offloads.encap = encap;
1099 err = esw_create_offloads_fast_fdb_table(esw);
1100 if (err) {
1101 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1102 esw->offloads.encap = !encap;
2fe30e23 1103 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1104 }
1105 return err;
1106}
1107
1108int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1109{
1110 struct mlx5_core_dev *dev = devlink_priv(devlink);
1111 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1112 int err;
7768d197 1113
9d1cef19
OG
1114 err = mlx5_devlink_eswitch_check(devlink);
1115 if (err)
1116 return err;
7768d197
RD
1117
1118 *encap = esw->offloads.encap;
1119 return 0;
1120}
1121
127ea380 1122void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
1123 int vport_index,
1124 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
1125{
1126 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1127 struct mlx5_eswitch_rep *rep;
1128
1129 rep = &offloads->vport_reps[vport_index];
127ea380 1130
bac9b6aa
OG
1131 rep->load = __rep->load;
1132 rep->unload = __rep->unload;
5ed99fb4 1133 rep->priv = __rep->priv;
127ea380 1134
9deb2241 1135 rep->valid = true;
127ea380
HHZ
1136}
1137
1138void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1139 int vport_index)
127ea380
HHZ
1140{
1141 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1142 struct mlx5_eswitch_rep *rep;
1143
9deb2241 1144 rep = &offloads->vport_reps[vport_index];
cb67b832 1145
9deb2241 1146 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
4c66df01 1147 rep->unload(rep);
127ea380 1148
9deb2241 1149 rep->valid = false;
127ea380 1150}
726293f1 1151
5ed99fb4 1152void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw)
726293f1
HHZ
1153{
1154#define UPLINK_REP_INDEX 0
1155 struct mlx5_esw_offload *offloads = &esw->offloads;
1156 struct mlx5_eswitch_rep *rep;
1157
1158 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
5ed99fb4 1159 return rep->priv;
726293f1 1160}