]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
d7e75a32
OG
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 95 flow_act.encap_id = attr->encap_id;
a54e20b4 96
74491de9 97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 98 spec, &flow_act, dest, i);
3d80d1a2 99 if (IS_ERR(rule))
aa0cbbae 100 goto err_add_rule;
375f51e2
RD
101 else
102 esw->offloads.num_flows++;
3d80d1a2
OG
103
104 return rule;
aa0cbbae
OG
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
3d80d1a2
OG
110}
111
d85cdccb
OG
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
aa0cbbae
OG
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
d85cdccb
OG
123}
124
f5f82476
OG
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
9eb78923 194 return -EOPNOTSUPP;
f5f82476
OG
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
74491de9 305static struct mlx5_flow_handle *
ab22be9b
OG
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
66958ed9 308 struct mlx5_flow_act flow_act = {0};
ab22be9b 309 struct mlx5_flow_destination dest;
74491de9 310 struct mlx5_flow_handle *flow_rule;
c5bb1730 311 struct mlx5_flow_spec *spec;
ab22be9b
OG
312 void *misc;
313
c5bb1730
MG
314 spec = mlx5_vzalloc(sizeof(*spec));
315 if (!spec) {
ab22be9b
OG
316 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
317 flow_rule = ERR_PTR(-ENOMEM);
318 goto out;
319 }
320
c5bb1730 321 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
322 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
323 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
324
c5bb1730 325 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
327 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
328
c5bb1730 329 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
330 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
331 dest.vport_num = vport;
66958ed9 332 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 333
74491de9 334 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 335 &flow_act, &dest, 1);
ab22be9b
OG
336 if (IS_ERR(flow_rule))
337 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
338out:
c5bb1730 339 kvfree(spec);
ab22be9b
OG
340 return flow_rule;
341}
342
cb67b832
HHZ
343void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
344 struct mlx5_eswitch_rep *rep)
345{
346 struct mlx5_esw_sq *esw_sq, *tmp;
347
348 if (esw->mode != SRIOV_OFFLOADS)
349 return;
350
351 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 352 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
353 list_del(&esw_sq->list);
354 kfree(esw_sq);
355 }
356}
357
358int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
359 struct mlx5_eswitch_rep *rep,
360 u16 *sqns_array, int sqns_num)
361{
74491de9 362 struct mlx5_flow_handle *flow_rule;
cb67b832 363 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
364 int err;
365 int i;
366
367 if (esw->mode != SRIOV_OFFLOADS)
368 return 0;
369
cb67b832
HHZ
370 for (i = 0; i < sqns_num; i++) {
371 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
372 if (!esw_sq) {
373 err = -ENOMEM;
374 goto out_err;
375 }
376
377 /* Add re-inject rule to the PF/representor sqs */
378 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 379 rep->vport,
cb67b832
HHZ
380 sqns_array[i]);
381 if (IS_ERR(flow_rule)) {
382 err = PTR_ERR(flow_rule);
383 kfree(esw_sq);
384 goto out_err;
385 }
386 esw_sq->send_to_vport_rule = flow_rule;
387 list_add(&esw_sq->list, &rep->vport_sqs_list);
388 }
389 return 0;
390
391out_err:
392 mlx5_eswitch_sqs2vport_stop(esw, rep);
393 return err;
394}
395
3aa33572
OG
396static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
397{
66958ed9 398 struct mlx5_flow_act flow_act = {0};
3aa33572 399 struct mlx5_flow_destination dest;
74491de9 400 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 401 struct mlx5_flow_spec *spec;
3aa33572
OG
402 int err = 0;
403
c5bb1730
MG
404 spec = mlx5_vzalloc(sizeof(*spec));
405 if (!spec) {
3aa33572
OG
406 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
407 err = -ENOMEM;
408 goto out;
409 }
410
411 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
412 dest.vport_num = 0;
66958ed9 413 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 414
74491de9 415 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 416 &flow_act, &dest, 1);
3aa33572
OG
417 if (IS_ERR(flow_rule)) {
418 err = PTR_ERR(flow_rule);
419 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
420 goto out;
421 }
422
423 esw->fdb_table.offloads.miss_rule = flow_rule;
424out:
c5bb1730 425 kvfree(spec);
3aa33572
OG
426 return err;
427}
428
1033665e 429#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 430
1967ce6e 431static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 432{
69697b6e
OG
433 struct mlx5_core_dev *dev = esw->dev;
434 struct mlx5_flow_namespace *root_ns;
435 struct mlx5_flow_table *fdb = NULL;
1967ce6e 436 int esw_size, err = 0;
bbd00f7e 437 u32 flags = 0;
69697b6e 438
69697b6e
OG
439 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
440 if (!root_ns) {
441 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 442 err = -EOPNOTSUPP;
1967ce6e 443 goto out;
69697b6e
OG
444 }
445
264d7bf3
OG
446 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
447 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
448 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
449
450 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
451 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 452
7768d197 453 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
bbd00f7e
HHZ
454 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
455
1033665e 456 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 457 esw_size,
c9f1b073 458 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 459 flags);
69697b6e
OG
460 if (IS_ERR(fdb)) {
461 err = PTR_ERR(fdb);
1033665e 462 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
1967ce6e 463 goto out;
69697b6e
OG
464 }
465 esw->fdb_table.fdb = fdb;
466
1967ce6e
OG
467out:
468 return err;
469}
470
471static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
472{
473 mlx5_destroy_flow_table(esw->fdb_table.fdb);
474}
475
476#define MAX_PF_SQ 256
477
478static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
479{
480 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
481 struct mlx5_flow_table_attr ft_attr = {};
482 struct mlx5_core_dev *dev = esw->dev;
483 struct mlx5_flow_namespace *root_ns;
484 struct mlx5_flow_table *fdb = NULL;
485 int table_size, ix, err = 0;
486 struct mlx5_flow_group *g;
487 void *match_criteria;
488 u32 *flow_group_in;
489
490 esw_debug(esw->dev, "Create offloads FDB Tables\n");
491 flow_group_in = mlx5_vzalloc(inlen);
492 if (!flow_group_in)
493 return -ENOMEM;
494
495 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
496 if (!root_ns) {
497 esw_warn(dev, "Failed to get FDB flow namespace\n");
498 err = -EOPNOTSUPP;
499 goto ns_err;
500 }
501
502 err = esw_create_offloads_fast_fdb_table(esw);
503 if (err)
504 goto fast_fdb_err;
505
1033665e 506 table_size = nvports + MAX_PF_SQ + 1;
b3ba5149
ES
507
508 ft_attr.max_fte = table_size;
509 ft_attr.prio = FDB_SLOW_PATH;
510
511 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
512 if (IS_ERR(fdb)) {
513 err = PTR_ERR(fdb);
514 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
515 goto slow_fdb_err;
516 }
517 esw->fdb_table.offloads.fdb = fdb;
518
69697b6e
OG
519 /* create send-to-vport group */
520 memset(flow_group_in, 0, inlen);
521 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
522 MLX5_MATCH_MISC_PARAMETERS);
523
524 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
525
526 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
527 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
528
529 ix = nvports + MAX_PF_SQ;
530 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
531 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
532
533 g = mlx5_create_flow_group(fdb, flow_group_in);
534 if (IS_ERR(g)) {
535 err = PTR_ERR(g);
536 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
537 goto send_vport_err;
538 }
539 esw->fdb_table.offloads.send_to_vport_grp = g;
540
541 /* create miss group */
542 memset(flow_group_in, 0, inlen);
543 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
544
545 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
546 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
547
548 g = mlx5_create_flow_group(fdb, flow_group_in);
549 if (IS_ERR(g)) {
550 err = PTR_ERR(g);
551 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
552 goto miss_err;
553 }
554 esw->fdb_table.offloads.miss_grp = g;
555
3aa33572
OG
556 err = esw_add_fdb_miss_rule(esw);
557 if (err)
558 goto miss_rule_err;
559
69697b6e
OG
560 return 0;
561
3aa33572
OG
562miss_rule_err:
563 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
564miss_err:
565 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
566send_vport_err:
1033665e
OG
567 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
568slow_fdb_err:
569 mlx5_destroy_flow_table(esw->fdb_table.fdb);
570fast_fdb_err:
69697b6e
OG
571ns_err:
572 kvfree(flow_group_in);
573 return err;
574}
575
1967ce6e 576static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e
OG
577{
578 if (!esw->fdb_table.fdb)
579 return;
580
1967ce6e 581 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
74491de9 582 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
583 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
584 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
585
1033665e 586 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
1967ce6e 587 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 588}
c116c6ee
OG
589
590static int esw_create_offloads_table(struct mlx5_eswitch *esw)
591{
b3ba5149 592 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 593 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
594 struct mlx5_flow_table *ft_offloads;
595 struct mlx5_flow_namespace *ns;
c116c6ee
OG
596 int err = 0;
597
598 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
599 if (!ns) {
600 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 601 return -EOPNOTSUPP;
c116c6ee
OG
602 }
603
b3ba5149
ES
604 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
605
606 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
607 if (IS_ERR(ft_offloads)) {
608 err = PTR_ERR(ft_offloads);
609 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
610 return err;
611 }
612
613 esw->offloads.ft_offloads = ft_offloads;
614 return 0;
615}
616
617static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
618{
619 struct mlx5_esw_offload *offloads = &esw->offloads;
620
621 mlx5_destroy_flow_table(offloads->ft_offloads);
622}
fed9ce22
OG
623
624static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
625{
626 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
627 struct mlx5_flow_group *g;
628 struct mlx5_priv *priv = &esw->dev->priv;
629 u32 *flow_group_in;
630 void *match_criteria, *misc;
631 int err = 0;
632 int nvports = priv->sriov.num_vfs + 2;
633
634 flow_group_in = mlx5_vzalloc(inlen);
635 if (!flow_group_in)
636 return -ENOMEM;
637
638 /* create vport rx group */
639 memset(flow_group_in, 0, inlen);
640 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
641 MLX5_MATCH_MISC_PARAMETERS);
642
643 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
644 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
645 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
646
647 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
648 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
649
650 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
651
652 if (IS_ERR(g)) {
653 err = PTR_ERR(g);
654 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
655 goto out;
656 }
657
658 esw->offloads.vport_rx_group = g;
659out:
660 kfree(flow_group_in);
661 return err;
662}
663
664static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
665{
666 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
667}
668
74491de9 669struct mlx5_flow_handle *
fed9ce22
OG
670mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
671{
66958ed9 672 struct mlx5_flow_act flow_act = {0};
fed9ce22 673 struct mlx5_flow_destination dest;
74491de9 674 struct mlx5_flow_handle *flow_rule;
c5bb1730 675 struct mlx5_flow_spec *spec;
fed9ce22
OG
676 void *misc;
677
c5bb1730
MG
678 spec = mlx5_vzalloc(sizeof(*spec));
679 if (!spec) {
fed9ce22
OG
680 esw_warn(esw->dev, "Failed to alloc match parameters\n");
681 flow_rule = ERR_PTR(-ENOMEM);
682 goto out;
683 }
684
c5bb1730 685 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
686 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
687
c5bb1730 688 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
689 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
690
c5bb1730 691 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
692 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
693 dest.tir_num = tirn;
694
66958ed9 695 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 696 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
66958ed9 697 &flow_act, &dest, 1);
fed9ce22
OG
698 if (IS_ERR(flow_rule)) {
699 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
700 goto out;
701 }
702
703out:
c5bb1730 704 kvfree(spec);
fed9ce22
OG
705 return flow_rule;
706}
feae9087 707
c930a3ad
OG
708static int esw_offloads_start(struct mlx5_eswitch *esw)
709{
6c419ba8 710 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
711
712 if (esw->mode != SRIOV_LEGACY) {
713 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
714 return -EINVAL;
715 }
716
717 mlx5_eswitch_disable_sriov(esw);
718 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
719 if (err) {
720 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
721 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
722 if (err1)
5403dc70 723 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 724 }
bffaa916
RD
725 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
726 if (mlx5_eswitch_inline_mode_get(esw,
727 num_vfs,
728 &esw->offloads.inline_mode)) {
729 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
730 esw_warn(esw->dev, "Inline mode is different between vports\n");
731 }
732 }
c930a3ad
OG
733 return err;
734}
735
736int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
737{
cb67b832
HHZ
738 struct mlx5_eswitch_rep *rep;
739 int vport;
c930a3ad
OG
740 int err;
741
5bae8c03
OG
742 /* disable PF RoCE so missed packets don't go through RoCE steering */
743 mlx5_dev_list_lock();
744 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
745 mlx5_dev_list_unlock();
746
1967ce6e 747 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 748 if (err)
5bae8c03 749 goto create_fdb_err;
c930a3ad
OG
750
751 err = esw_create_offloads_table(esw);
752 if (err)
753 goto create_ft_err;
754
755 err = esw_create_vport_rx_group(esw);
756 if (err)
757 goto create_fg_err;
758
cb67b832
HHZ
759 for (vport = 0; vport < nvports; vport++) {
760 rep = &esw->offloads.vport_reps[vport];
761 if (!rep->valid)
762 continue;
763
764 err = rep->load(esw, rep);
765 if (err)
766 goto err_reps;
767 }
9da34cd3 768
c930a3ad
OG
769 return 0;
770
cb67b832
HHZ
771err_reps:
772 for (vport--; vport >= 0; vport--) {
773 rep = &esw->offloads.vport_reps[vport];
774 if (!rep->valid)
775 continue;
776 rep->unload(esw, rep);
777 }
778 esw_destroy_vport_rx_group(esw);
779
c930a3ad
OG
780create_fg_err:
781 esw_destroy_offloads_table(esw);
782
783create_ft_err:
1967ce6e 784 esw_destroy_offloads_fdb_tables(esw);
5bae8c03
OG
785
786create_fdb_err:
787 /* enable back PF RoCE */
788 mlx5_dev_list_lock();
789 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
790 mlx5_dev_list_unlock();
791
c930a3ad
OG
792 return err;
793}
794
795static int esw_offloads_stop(struct mlx5_eswitch *esw)
796{
6c419ba8 797 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
798
799 mlx5_eswitch_disable_sriov(esw);
800 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
801 if (err) {
802 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
803 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
804 if (err1)
805 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
806 }
c930a3ad 807
5bae8c03
OG
808 /* enable back PF RoCE */
809 mlx5_dev_list_lock();
810 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
811 mlx5_dev_list_unlock();
812
c930a3ad
OG
813 return err;
814}
815
816void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
817{
cb67b832
HHZ
818 struct mlx5_eswitch_rep *rep;
819 int vport;
820
821 for (vport = 0; vport < nvports; vport++) {
822 rep = &esw->offloads.vport_reps[vport];
823 if (!rep->valid)
824 continue;
825 rep->unload(esw, rep);
826 }
827
c930a3ad
OG
828 esw_destroy_vport_rx_group(esw);
829 esw_destroy_offloads_table(esw);
1967ce6e 830 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
831}
832
ef78618b 833static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
834{
835 switch (mode) {
836 case DEVLINK_ESWITCH_MODE_LEGACY:
837 *mlx5_mode = SRIOV_LEGACY;
838 break;
839 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
840 *mlx5_mode = SRIOV_OFFLOADS;
841 break;
842 default:
843 return -EINVAL;
844 }
845
846 return 0;
847}
848
ef78618b
OG
849static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
850{
851 switch (mlx5_mode) {
852 case SRIOV_LEGACY:
853 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
854 break;
855 case SRIOV_OFFLOADS:
856 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
857 break;
858 default:
859 return -EINVAL;
860 }
861
862 return 0;
863}
864
bffaa916
RD
865static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
866{
867 switch (mode) {
868 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
869 *mlx5_mode = MLX5_INLINE_MODE_NONE;
870 break;
871 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
872 *mlx5_mode = MLX5_INLINE_MODE_L2;
873 break;
874 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
875 *mlx5_mode = MLX5_INLINE_MODE_IP;
876 break;
877 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
878 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
879 break;
880 default:
881 return -EINVAL;
882 }
883
884 return 0;
885}
886
887static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
888{
889 switch (mlx5_mode) {
890 case MLX5_INLINE_MODE_NONE:
891 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
892 break;
893 case MLX5_INLINE_MODE_L2:
894 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
895 break;
896 case MLX5_INLINE_MODE_IP:
897 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
898 break;
899 case MLX5_INLINE_MODE_TCP_UDP:
900 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
901 break;
902 default:
903 return -EINVAL;
904 }
905
906 return 0;
907}
908
9d1cef19 909static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 910{
9d1cef19 911 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 912
9d1cef19
OG
913 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
914 return -EOPNOTSUPP;
c930a3ad
OG
915
916 if (!MLX5_CAP_GEN(dev, vport_group_manager))
917 return -EOPNOTSUPP;
918
9d1cef19 919 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
920 return -EOPNOTSUPP;
921
9d1cef19
OG
922 return 0;
923}
924
925int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 u16 cur_mlx5_mode, mlx5_mode = 0;
929 int err;
930
931 err = mlx5_devlink_eswitch_check(devlink);
932 if (err)
933 return err;
934
935 cur_mlx5_mode = dev->priv.eswitch->mode;
936
ef78618b 937 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
938 return -EINVAL;
939
940 if (cur_mlx5_mode == mlx5_mode)
941 return 0;
942
943 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
944 return esw_offloads_start(dev->priv.eswitch);
945 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
946 return esw_offloads_stop(dev->priv.eswitch);
947 else
948 return -EINVAL;
feae9087
OG
949}
950
951int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
952{
9d1cef19
OG
953 struct mlx5_core_dev *dev = devlink_priv(devlink);
954 int err;
c930a3ad 955
9d1cef19
OG
956 err = mlx5_devlink_eswitch_check(devlink);
957 if (err)
958 return err;
c930a3ad 959
ef78618b 960 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 961}
127ea380 962
bffaa916
RD
963int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
964{
965 struct mlx5_core_dev *dev = devlink_priv(devlink);
966 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 967 int err, vport;
bffaa916
RD
968 u8 mlx5_mode;
969
9d1cef19
OG
970 err = mlx5_devlink_eswitch_check(devlink);
971 if (err)
972 return err;
bffaa916 973
c415f704
OG
974 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
975 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
976 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
977 return 0;
978 /* fall through */
979 case MLX5_CAP_INLINE_MODE_L2:
980 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 981 return -EOPNOTSUPP;
c415f704
OG
982 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
983 break;
984 }
bffaa916 985
375f51e2
RD
986 if (esw->offloads.num_flows > 0) {
987 esw_warn(dev, "Can't set inline mode when flows are configured\n");
988 return -EOPNOTSUPP;
989 }
990
bffaa916
RD
991 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
992 if (err)
993 goto out;
994
9d1cef19 995 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
997 if (err) {
998 esw_warn(dev, "Failed to set min inline on vport %d\n",
999 vport);
1000 goto revert_inline_mode;
1001 }
1002 }
1003
1004 esw->offloads.inline_mode = mlx5_mode;
1005 return 0;
1006
1007revert_inline_mode:
1008 while (--vport > 0)
1009 mlx5_modify_nic_vport_min_inline(dev,
1010 vport,
1011 esw->offloads.inline_mode);
1012out:
1013 return err;
1014}
1015
1016int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1017{
1018 struct mlx5_core_dev *dev = devlink_priv(devlink);
1019 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1020 int err;
bffaa916 1021
9d1cef19
OG
1022 err = mlx5_devlink_eswitch_check(devlink);
1023 if (err)
1024 return err;
bffaa916 1025
bffaa916
RD
1026 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1027}
1028
1029int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1030{
c415f704 1031 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1032 struct mlx5_core_dev *dev = esw->dev;
1033 int vport;
bffaa916
RD
1034
1035 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1036 return -EOPNOTSUPP;
1037
1038 if (esw->mode == SRIOV_NONE)
1039 return -EOPNOTSUPP;
1040
c415f704
OG
1041 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1042 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1043 mlx5_mode = MLX5_INLINE_MODE_NONE;
1044 goto out;
1045 case MLX5_CAP_INLINE_MODE_L2:
1046 mlx5_mode = MLX5_INLINE_MODE_L2;
1047 goto out;
1048 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1049 goto query_vports;
1050 }
bffaa916 1051
c415f704 1052query_vports:
bffaa916
RD
1053 for (vport = 1; vport <= nvfs; vport++) {
1054 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1055 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1056 return -EINVAL;
1057 prev_mlx5_mode = mlx5_mode;
1058 }
1059
c415f704 1060out:
bffaa916
RD
1061 *mode = mlx5_mode;
1062 return 0;
1063}
1064
7768d197
RD
1065int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1066{
1067 struct mlx5_core_dev *dev = devlink_priv(devlink);
1068 struct mlx5_eswitch *esw = dev->priv.eswitch;
1069 int err;
1070
9d1cef19
OG
1071 err = mlx5_devlink_eswitch_check(devlink);
1072 if (err)
1073 return err;
7768d197
RD
1074
1075 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1076 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1077 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1078 return -EOPNOTSUPP;
1079
1080 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1081 return -EOPNOTSUPP;
1082
1083 if (esw->mode == SRIOV_LEGACY) {
1084 esw->offloads.encap = encap;
1085 return 0;
1086 }
1087
1088 if (esw->offloads.encap == encap)
1089 return 0;
1090
1091 if (esw->offloads.num_flows > 0) {
1092 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1093 return -EOPNOTSUPP;
1094 }
1095
1096 esw_destroy_offloads_fast_fdb_table(esw);
1097
1098 esw->offloads.encap = encap;
1099 err = esw_create_offloads_fast_fdb_table(esw);
1100 if (err) {
1101 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1102 esw->offloads.encap = !encap;
1103 (void) esw_create_offloads_fast_fdb_table(esw);
1104 }
1105 return err;
1106}
1107
1108int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1109{
1110 struct mlx5_core_dev *dev = devlink_priv(devlink);
1111 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1112 int err;
7768d197 1113
9d1cef19
OG
1114 err = mlx5_devlink_eswitch_check(devlink);
1115 if (err)
1116 return err;
7768d197
RD
1117
1118 *encap = esw->offloads.encap;
1119 return 0;
1120}
1121
127ea380 1122void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
1123 int vport_index,
1124 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
1125{
1126 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1127 struct mlx5_eswitch_rep *rep;
1128
1129 rep = &offloads->vport_reps[vport_index];
127ea380 1130
bac9b6aa
OG
1131 memset(rep, 0, sizeof(*rep));
1132
1133 rep->load = __rep->load;
1134 rep->unload = __rep->unload;
1135 rep->vport = __rep->vport;
726293f1 1136 rep->netdev = __rep->netdev;
bac9b6aa 1137 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 1138
9deb2241
OG
1139 INIT_LIST_HEAD(&rep->vport_sqs_list);
1140 rep->valid = true;
127ea380
HHZ
1141}
1142
1143void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1144 int vport_index)
127ea380
HHZ
1145{
1146 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1147 struct mlx5_eswitch_rep *rep;
1148
9deb2241 1149 rep = &offloads->vport_reps[vport_index];
cb67b832 1150
9deb2241 1151 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 1152 rep->unload(esw, rep);
127ea380 1153
9deb2241 1154 rep->valid = false;
127ea380 1155}
726293f1
HHZ
1156
1157struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1158{
1159#define UPLINK_REP_INDEX 0
1160 struct mlx5_esw_offload *offloads = &esw->offloads;
1161 struct mlx5_eswitch_rep *rep;
1162
1163 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1164 return rep->netdev;
1165}