]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Add creation flags when adding new flow table
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
3d80d1a2 52 struct mlx5_fc *counter = NULL;
74491de9 53 struct mlx5_flow_handle *rule;
3d80d1a2 54 void *misc;
776b12b6 55 int action;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
776b12b6
OG
61 action = attr->action;
62
3d80d1a2 63 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
64 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
65 dest[i].vport_num = attr->out_rep->vport;
66 i++;
67 }
68 if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2
OG
69 counter = mlx5_fc_create(esw->dev, true);
70 if (IS_ERR(counter))
71 return ERR_CAST(counter);
e37a79e5
MB
72 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
73 dest[i].counter = counter;
74 i++;
3d80d1a2
OG
75 }
76
77 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 78 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
81 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
82
83 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
84 MLX5_MATCH_MISC_PARAMETERS;
85
74491de9 86 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
e37a79e5 87 spec, action, 0, dest, i);
3d80d1a2
OG
88 if (IS_ERR(rule))
89 mlx5_fc_destroy(esw->dev, counter);
90
91 return rule;
92}
93
f5f82476
OG
94static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
95{
96 struct mlx5_eswitch_rep *rep;
97 int vf_vport, err = 0;
98
99 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
100 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
101 rep = &esw->offloads.vport_reps[vf_vport];
102 if (!rep->valid)
103 continue;
104
105 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
106 if (err)
107 goto out;
108 }
109
110out:
111 return err;
112}
113
114static struct mlx5_eswitch_rep *
115esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
116{
117 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
118
119 in_rep = attr->in_rep;
120 out_rep = attr->out_rep;
121
122 if (push)
123 vport = in_rep;
124 else if (pop)
125 vport = out_rep;
126 else
127 vport = in_rep;
128
129 return vport;
130}
131
132static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
133 bool push, bool pop, bool fwd)
134{
135 struct mlx5_eswitch_rep *in_rep, *out_rep;
136
137 if ((push || pop) && !fwd)
138 goto out_notsupp;
139
140 in_rep = attr->in_rep;
141 out_rep = attr->out_rep;
142
143 if (push && in_rep->vport == FDB_UPLINK_VPORT)
144 goto out_notsupp;
145
146 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
147 goto out_notsupp;
148
149 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
150 if (!push && !pop && fwd)
151 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
152 goto out_notsupp;
153
154 /* protects against (1) setting rules with different vlans to push and
155 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
156 */
157 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
158 goto out_notsupp;
159
160 return 0;
161
162out_notsupp:
163 return -ENOTSUPP;
164}
165
166int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
167 struct mlx5_esw_flow_attr *attr)
168{
169 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
170 struct mlx5_eswitch_rep *vport = NULL;
171 bool push, pop, fwd;
172 int err = 0;
173
174 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
175 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
176 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
177
178 err = esw_add_vlan_action_check(attr, push, pop, fwd);
179 if (err)
180 return err;
181
182 attr->vlan_handled = false;
183
184 vport = esw_vlan_action_get_vport(attr, push, pop);
185
186 if (!push && !pop && fwd) {
187 /* tracks VF --> wire rules without vlan push action */
188 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
189 vport->vlan_refcount++;
190 attr->vlan_handled = true;
191 }
192
193 return 0;
194 }
195
196 if (!push && !pop)
197 return 0;
198
199 if (!(offloads->vlan_push_pop_refcount)) {
200 /* it's the 1st vlan rule, apply global vlan pop policy */
201 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
202 if (err)
203 goto out;
204 }
205 offloads->vlan_push_pop_refcount++;
206
207 if (push) {
208 if (vport->vlan_refcount)
209 goto skip_set_push;
210
211 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
212 SET_VLAN_INSERT | SET_VLAN_STRIP);
213 if (err)
214 goto out;
215 vport->vlan = attr->vlan;
216skip_set_push:
217 vport->vlan_refcount++;
218 }
219out:
220 if (!err)
221 attr->vlan_handled = true;
222 return err;
223}
224
225int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
226 struct mlx5_esw_flow_attr *attr)
227{
228 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
229 struct mlx5_eswitch_rep *vport = NULL;
230 bool push, pop, fwd;
231 int err = 0;
232
233 if (!attr->vlan_handled)
234 return 0;
235
236 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
237 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
238 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
239
240 vport = esw_vlan_action_get_vport(attr, push, pop);
241
242 if (!push && !pop && fwd) {
243 /* tracks VF --> wire rules without vlan push action */
244 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
245 vport->vlan_refcount--;
246
247 return 0;
248 }
249
250 if (push) {
251 vport->vlan_refcount--;
252 if (vport->vlan_refcount)
253 goto skip_unset_push;
254
255 vport->vlan = 0;
256 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
257 0, 0, SET_VLAN_STRIP);
258 if (err)
259 goto out;
260 }
261
262skip_unset_push:
263 offloads->vlan_push_pop_refcount--;
264 if (offloads->vlan_push_pop_refcount)
265 return 0;
266
267 /* no more vlan rules, stop global vlan pop policy */
268 err = esw_set_global_vlan_pop(esw, 0);
269
270out:
271 return err;
272}
273
74491de9 274static struct mlx5_flow_handle *
ab22be9b
OG
275mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
276{
277 struct mlx5_flow_destination dest;
74491de9 278 struct mlx5_flow_handle *flow_rule;
c5bb1730 279 struct mlx5_flow_spec *spec;
ab22be9b
OG
280 void *misc;
281
c5bb1730
MG
282 spec = mlx5_vzalloc(sizeof(*spec));
283 if (!spec) {
ab22be9b
OG
284 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
285 flow_rule = ERR_PTR(-ENOMEM);
286 goto out;
287 }
288
c5bb1730 289 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
290 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
291 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
292
c5bb1730 293 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
294 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
295 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
296
c5bb1730 297 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
298 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
299 dest.vport_num = vport;
300
74491de9
MB
301 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
302 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
303 0, &dest, 1);
ab22be9b
OG
304 if (IS_ERR(flow_rule))
305 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
306out:
c5bb1730 307 kvfree(spec);
ab22be9b
OG
308 return flow_rule;
309}
310
cb67b832
HHZ
311void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
312 struct mlx5_eswitch_rep *rep)
313{
314 struct mlx5_esw_sq *esw_sq, *tmp;
315
316 if (esw->mode != SRIOV_OFFLOADS)
317 return;
318
319 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 320 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
321 list_del(&esw_sq->list);
322 kfree(esw_sq);
323 }
324}
325
326int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
327 struct mlx5_eswitch_rep *rep,
328 u16 *sqns_array, int sqns_num)
329{
74491de9 330 struct mlx5_flow_handle *flow_rule;
cb67b832 331 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
332 int err;
333 int i;
334
335 if (esw->mode != SRIOV_OFFLOADS)
336 return 0;
337
cb67b832
HHZ
338 for (i = 0; i < sqns_num; i++) {
339 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
340 if (!esw_sq) {
341 err = -ENOMEM;
342 goto out_err;
343 }
344
345 /* Add re-inject rule to the PF/representor sqs */
346 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 347 rep->vport,
cb67b832
HHZ
348 sqns_array[i]);
349 if (IS_ERR(flow_rule)) {
350 err = PTR_ERR(flow_rule);
351 kfree(esw_sq);
352 goto out_err;
353 }
354 esw_sq->send_to_vport_rule = flow_rule;
355 list_add(&esw_sq->list, &rep->vport_sqs_list);
356 }
357 return 0;
358
359out_err:
360 mlx5_eswitch_sqs2vport_stop(esw, rep);
361 return err;
362}
363
3aa33572
OG
364static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
365{
366 struct mlx5_flow_destination dest;
74491de9 367 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 368 struct mlx5_flow_spec *spec;
3aa33572
OG
369 int err = 0;
370
c5bb1730
MG
371 spec = mlx5_vzalloc(sizeof(*spec));
372 if (!spec) {
3aa33572
OG
373 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
374 err = -ENOMEM;
375 goto out;
376 }
377
378 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
379 dest.vport_num = 0;
380
74491de9
MB
381 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
382 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
383 0, &dest, 1);
3aa33572
OG
384 if (IS_ERR(flow_rule)) {
385 err = PTR_ERR(flow_rule);
386 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
387 goto out;
388 }
389
390 esw->fdb_table.offloads.miss_rule = flow_rule;
391out:
c5bb1730 392 kvfree(spec);
3aa33572
OG
393 return err;
394}
395
69697b6e 396#define MAX_PF_SQ 256
1033665e
OG
397#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
398#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 399
c930a3ad 400static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
401{
402 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
403 struct mlx5_core_dev *dev = esw->dev;
404 struct mlx5_flow_namespace *root_ns;
405 struct mlx5_flow_table *fdb = NULL;
406 struct mlx5_flow_group *g;
407 u32 *flow_group_in;
408 void *match_criteria;
409 int table_size, ix, err = 0;
410
411 flow_group_in = mlx5_vzalloc(inlen);
412 if (!flow_group_in)
413 return -ENOMEM;
414
415 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
416 if (!root_ns) {
417 esw_warn(dev, "Failed to get FDB flow namespace\n");
418 goto ns_err;
419 }
420
421 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
422 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
423
1033665e
OG
424 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
425 ESW_OFFLOADS_NUM_ENTRIES,
c9f1b073
HHZ
426 ESW_OFFLOADS_NUM_GROUPS, 0,
427 0);
69697b6e
OG
428 if (IS_ERR(fdb)) {
429 err = PTR_ERR(fdb);
1033665e
OG
430 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
431 goto fast_fdb_err;
69697b6e
OG
432 }
433 esw->fdb_table.fdb = fdb;
434
1033665e 435 table_size = nvports + MAX_PF_SQ + 1;
c9f1b073 436 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
1033665e
OG
437 if (IS_ERR(fdb)) {
438 err = PTR_ERR(fdb);
439 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
440 goto slow_fdb_err;
441 }
442 esw->fdb_table.offloads.fdb = fdb;
443
69697b6e
OG
444 /* create send-to-vport group */
445 memset(flow_group_in, 0, inlen);
446 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
447 MLX5_MATCH_MISC_PARAMETERS);
448
449 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
450
451 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
452 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
453
454 ix = nvports + MAX_PF_SQ;
455 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
456 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
457
458 g = mlx5_create_flow_group(fdb, flow_group_in);
459 if (IS_ERR(g)) {
460 err = PTR_ERR(g);
461 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
462 goto send_vport_err;
463 }
464 esw->fdb_table.offloads.send_to_vport_grp = g;
465
466 /* create miss group */
467 memset(flow_group_in, 0, inlen);
468 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
469
470 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
471 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
472
473 g = mlx5_create_flow_group(fdb, flow_group_in);
474 if (IS_ERR(g)) {
475 err = PTR_ERR(g);
476 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
477 goto miss_err;
478 }
479 esw->fdb_table.offloads.miss_grp = g;
480
3aa33572
OG
481 err = esw_add_fdb_miss_rule(esw);
482 if (err)
483 goto miss_rule_err;
484
69697b6e
OG
485 return 0;
486
3aa33572
OG
487miss_rule_err:
488 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
489miss_err:
490 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
491send_vport_err:
1033665e
OG
492 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
493slow_fdb_err:
494 mlx5_destroy_flow_table(esw->fdb_table.fdb);
495fast_fdb_err:
69697b6e
OG
496ns_err:
497 kvfree(flow_group_in);
498 return err;
499}
500
c930a3ad 501static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
502{
503 if (!esw->fdb_table.fdb)
504 return;
505
506 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
74491de9 507 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
508 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
509 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
510
1033665e 511 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
512 mlx5_destroy_flow_table(esw->fdb_table.fdb);
513}
c116c6ee
OG
514
515static int esw_create_offloads_table(struct mlx5_eswitch *esw)
516{
517 struct mlx5_flow_namespace *ns;
518 struct mlx5_flow_table *ft_offloads;
519 struct mlx5_core_dev *dev = esw->dev;
520 int err = 0;
521
522 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
523 if (!ns) {
524 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
525 return -ENOMEM;
526 }
527
c9f1b073 528 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
c116c6ee
OG
529 if (IS_ERR(ft_offloads)) {
530 err = PTR_ERR(ft_offloads);
531 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
532 return err;
533 }
534
535 esw->offloads.ft_offloads = ft_offloads;
536 return 0;
537}
538
539static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
540{
541 struct mlx5_esw_offload *offloads = &esw->offloads;
542
543 mlx5_destroy_flow_table(offloads->ft_offloads);
544}
fed9ce22
OG
545
546static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
547{
548 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
549 struct mlx5_flow_group *g;
550 struct mlx5_priv *priv = &esw->dev->priv;
551 u32 *flow_group_in;
552 void *match_criteria, *misc;
553 int err = 0;
554 int nvports = priv->sriov.num_vfs + 2;
555
556 flow_group_in = mlx5_vzalloc(inlen);
557 if (!flow_group_in)
558 return -ENOMEM;
559
560 /* create vport rx group */
561 memset(flow_group_in, 0, inlen);
562 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
563 MLX5_MATCH_MISC_PARAMETERS);
564
565 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
566 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
567 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
568
569 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
570 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
571
572 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
573
574 if (IS_ERR(g)) {
575 err = PTR_ERR(g);
576 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
577 goto out;
578 }
579
580 esw->offloads.vport_rx_group = g;
581out:
582 kfree(flow_group_in);
583 return err;
584}
585
586static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
587{
588 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
589}
590
74491de9 591struct mlx5_flow_handle *
fed9ce22
OG
592mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
593{
594 struct mlx5_flow_destination dest;
74491de9 595 struct mlx5_flow_handle *flow_rule;
c5bb1730 596 struct mlx5_flow_spec *spec;
fed9ce22
OG
597 void *misc;
598
c5bb1730
MG
599 spec = mlx5_vzalloc(sizeof(*spec));
600 if (!spec) {
fed9ce22
OG
601 esw_warn(esw->dev, "Failed to alloc match parameters\n");
602 flow_rule = ERR_PTR(-ENOMEM);
603 goto out;
604 }
605
c5bb1730 606 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
607 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
608
c5bb1730 609 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
610 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
611
c5bb1730 612 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
613 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
614 dest.tir_num = tirn;
615
74491de9
MB
616 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
617 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
618 0, &dest, 1);
fed9ce22
OG
619 if (IS_ERR(flow_rule)) {
620 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
621 goto out;
622 }
623
624out:
c5bb1730 625 kvfree(spec);
fed9ce22
OG
626 return flow_rule;
627}
feae9087 628
c930a3ad
OG
629static int esw_offloads_start(struct mlx5_eswitch *esw)
630{
6c419ba8 631 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
632
633 if (esw->mode != SRIOV_LEGACY) {
634 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
635 return -EINVAL;
636 }
637
638 mlx5_eswitch_disable_sriov(esw);
639 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
640 if (err) {
641 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
642 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
643 if (err1)
644 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
645 }
c930a3ad
OG
646 return err;
647}
648
649int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
650{
cb67b832
HHZ
651 struct mlx5_eswitch_rep *rep;
652 int vport;
c930a3ad
OG
653 int err;
654
655 err = esw_create_offloads_fdb_table(esw, nvports);
656 if (err)
657 return err;
658
659 err = esw_create_offloads_table(esw);
660 if (err)
661 goto create_ft_err;
662
663 err = esw_create_vport_rx_group(esw);
664 if (err)
665 goto create_fg_err;
666
cb67b832
HHZ
667 for (vport = 0; vport < nvports; vport++) {
668 rep = &esw->offloads.vport_reps[vport];
669 if (!rep->valid)
670 continue;
671
672 err = rep->load(esw, rep);
673 if (err)
674 goto err_reps;
675 }
c930a3ad
OG
676 return 0;
677
cb67b832
HHZ
678err_reps:
679 for (vport--; vport >= 0; vport--) {
680 rep = &esw->offloads.vport_reps[vport];
681 if (!rep->valid)
682 continue;
683 rep->unload(esw, rep);
684 }
685 esw_destroy_vport_rx_group(esw);
686
c930a3ad
OG
687create_fg_err:
688 esw_destroy_offloads_table(esw);
689
690create_ft_err:
691 esw_destroy_offloads_fdb_table(esw);
692 return err;
693}
694
695static int esw_offloads_stop(struct mlx5_eswitch *esw)
696{
6c419ba8 697 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
698
699 mlx5_eswitch_disable_sriov(esw);
700 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
701 if (err) {
702 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
703 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
704 if (err1)
705 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
706 }
c930a3ad
OG
707
708 return err;
709}
710
711void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
712{
cb67b832
HHZ
713 struct mlx5_eswitch_rep *rep;
714 int vport;
715
716 for (vport = 0; vport < nvports; vport++) {
717 rep = &esw->offloads.vport_reps[vport];
718 if (!rep->valid)
719 continue;
720 rep->unload(esw, rep);
721 }
722
c930a3ad
OG
723 esw_destroy_vport_rx_group(esw);
724 esw_destroy_offloads_table(esw);
725 esw_destroy_offloads_fdb_table(esw);
726}
727
ef78618b 728static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
729{
730 switch (mode) {
731 case DEVLINK_ESWITCH_MODE_LEGACY:
732 *mlx5_mode = SRIOV_LEGACY;
733 break;
734 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
735 *mlx5_mode = SRIOV_OFFLOADS;
736 break;
737 default:
738 return -EINVAL;
739 }
740
741 return 0;
742}
743
ef78618b
OG
744static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
745{
746 switch (mlx5_mode) {
747 case SRIOV_LEGACY:
748 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
749 break;
750 case SRIOV_OFFLOADS:
751 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
752 break;
753 default:
754 return -EINVAL;
755 }
756
757 return 0;
758}
759
feae9087
OG
760int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
761{
c930a3ad
OG
762 struct mlx5_core_dev *dev;
763 u16 cur_mlx5_mode, mlx5_mode = 0;
764
765 dev = devlink_priv(devlink);
766
767 if (!MLX5_CAP_GEN(dev, vport_group_manager))
768 return -EOPNOTSUPP;
769
770 cur_mlx5_mode = dev->priv.eswitch->mode;
771
772 if (cur_mlx5_mode == SRIOV_NONE)
773 return -EOPNOTSUPP;
774
ef78618b 775 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
776 return -EINVAL;
777
778 if (cur_mlx5_mode == mlx5_mode)
779 return 0;
780
781 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
782 return esw_offloads_start(dev->priv.eswitch);
783 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
784 return esw_offloads_stop(dev->priv.eswitch);
785 else
786 return -EINVAL;
feae9087
OG
787}
788
789int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
790{
c930a3ad
OG
791 struct mlx5_core_dev *dev;
792
793 dev = devlink_priv(devlink);
794
795 if (!MLX5_CAP_GEN(dev, vport_group_manager))
796 return -EOPNOTSUPP;
797
798 if (dev->priv.eswitch->mode == SRIOV_NONE)
799 return -EOPNOTSUPP;
800
ef78618b 801 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 802}
127ea380
HHZ
803
804void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
805 int vport_index,
806 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
807{
808 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
809 struct mlx5_eswitch_rep *rep;
810
811 rep = &offloads->vport_reps[vport_index];
127ea380 812
bac9b6aa
OG
813 memset(rep, 0, sizeof(*rep));
814
815 rep->load = __rep->load;
816 rep->unload = __rep->unload;
817 rep->vport = __rep->vport;
818 rep->priv_data = __rep->priv_data;
819 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 820
9deb2241
OG
821 INIT_LIST_HEAD(&rep->vport_sqs_list);
822 rep->valid = true;
127ea380
HHZ
823}
824
825void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 826 int vport_index)
127ea380
HHZ
827{
828 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
829 struct mlx5_eswitch_rep *rep;
830
9deb2241 831 rep = &offloads->vport_reps[vport_index];
cb67b832 832
9deb2241 833 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 834 rep->unload(esw, rep);
127ea380 835
9deb2241 836 rep->valid = false;
127ea380 837}