]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Change ENOTSUPP to EOPNOTSUPP
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2
OG
70 counter = mlx5_fc_create(esw->dev, true);
71 if (IS_ERR(counter))
72 return ERR_CAST(counter);
e37a79e5
MB
73 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74 dest[i].counter = counter;
75 i++;
3d80d1a2
OG
76 }
77
78 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 79 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
80
81 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
82 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
83
84 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
85 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
86 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
87 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 88
a54e20b4
HHZ
89 if (attr->encap)
90 flow_act.encap_id = attr->encap->encap_id;
91
74491de9 92 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 93 spec, &flow_act, dest, i);
3d80d1a2
OG
94 if (IS_ERR(rule))
95 mlx5_fc_destroy(esw->dev, counter);
96
97 return rule;
98}
99
f5f82476
OG
100static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
101{
102 struct mlx5_eswitch_rep *rep;
103 int vf_vport, err = 0;
104
105 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
106 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
107 rep = &esw->offloads.vport_reps[vf_vport];
108 if (!rep->valid)
109 continue;
110
111 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
112 if (err)
113 goto out;
114 }
115
116out:
117 return err;
118}
119
120static struct mlx5_eswitch_rep *
121esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
122{
123 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
124
125 in_rep = attr->in_rep;
126 out_rep = attr->out_rep;
127
128 if (push)
129 vport = in_rep;
130 else if (pop)
131 vport = out_rep;
132 else
133 vport = in_rep;
134
135 return vport;
136}
137
138static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
139 bool push, bool pop, bool fwd)
140{
141 struct mlx5_eswitch_rep *in_rep, *out_rep;
142
143 if ((push || pop) && !fwd)
144 goto out_notsupp;
145
146 in_rep = attr->in_rep;
147 out_rep = attr->out_rep;
148
149 if (push && in_rep->vport == FDB_UPLINK_VPORT)
150 goto out_notsupp;
151
152 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
153 goto out_notsupp;
154
155 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
156 if (!push && !pop && fwd)
157 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
158 goto out_notsupp;
159
160 /* protects against (1) setting rules with different vlans to push and
161 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
162 */
163 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
164 goto out_notsupp;
165
166 return 0;
167
168out_notsupp:
9eb78923 169 return -EOPNOTSUPP;
f5f82476
OG
170}
171
172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
173 struct mlx5_esw_flow_attr *attr)
174{
175 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
176 struct mlx5_eswitch_rep *vport = NULL;
177 bool push, pop, fwd;
178 int err = 0;
179
180 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
181 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
182 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
183
184 err = esw_add_vlan_action_check(attr, push, pop, fwd);
185 if (err)
186 return err;
187
188 attr->vlan_handled = false;
189
190 vport = esw_vlan_action_get_vport(attr, push, pop);
191
192 if (!push && !pop && fwd) {
193 /* tracks VF --> wire rules without vlan push action */
194 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
195 vport->vlan_refcount++;
196 attr->vlan_handled = true;
197 }
198
199 return 0;
200 }
201
202 if (!push && !pop)
203 return 0;
204
205 if (!(offloads->vlan_push_pop_refcount)) {
206 /* it's the 1st vlan rule, apply global vlan pop policy */
207 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
208 if (err)
209 goto out;
210 }
211 offloads->vlan_push_pop_refcount++;
212
213 if (push) {
214 if (vport->vlan_refcount)
215 goto skip_set_push;
216
217 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
218 SET_VLAN_INSERT | SET_VLAN_STRIP);
219 if (err)
220 goto out;
221 vport->vlan = attr->vlan;
222skip_set_push:
223 vport->vlan_refcount++;
224 }
225out:
226 if (!err)
227 attr->vlan_handled = true;
228 return err;
229}
230
231int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
232 struct mlx5_esw_flow_attr *attr)
233{
234 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
235 struct mlx5_eswitch_rep *vport = NULL;
236 bool push, pop, fwd;
237 int err = 0;
238
239 if (!attr->vlan_handled)
240 return 0;
241
242 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
243 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
244 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
245
246 vport = esw_vlan_action_get_vport(attr, push, pop);
247
248 if (!push && !pop && fwd) {
249 /* tracks VF --> wire rules without vlan push action */
250 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
251 vport->vlan_refcount--;
252
253 return 0;
254 }
255
256 if (push) {
257 vport->vlan_refcount--;
258 if (vport->vlan_refcount)
259 goto skip_unset_push;
260
261 vport->vlan = 0;
262 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
263 0, 0, SET_VLAN_STRIP);
264 if (err)
265 goto out;
266 }
267
268skip_unset_push:
269 offloads->vlan_push_pop_refcount--;
270 if (offloads->vlan_push_pop_refcount)
271 return 0;
272
273 /* no more vlan rules, stop global vlan pop policy */
274 err = esw_set_global_vlan_pop(esw, 0);
275
276out:
277 return err;
278}
279
74491de9 280static struct mlx5_flow_handle *
ab22be9b
OG
281mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
282{
66958ed9 283 struct mlx5_flow_act flow_act = {0};
ab22be9b 284 struct mlx5_flow_destination dest;
74491de9 285 struct mlx5_flow_handle *flow_rule;
c5bb1730 286 struct mlx5_flow_spec *spec;
ab22be9b
OG
287 void *misc;
288
c5bb1730
MG
289 spec = mlx5_vzalloc(sizeof(*spec));
290 if (!spec) {
ab22be9b
OG
291 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
292 flow_rule = ERR_PTR(-ENOMEM);
293 goto out;
294 }
295
c5bb1730 296 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
297 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
298 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
299
c5bb1730 300 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
301 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
302 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
303
c5bb1730 304 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
305 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
306 dest.vport_num = vport;
66958ed9 307 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 308
74491de9 309 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 310 &flow_act, &dest, 1);
ab22be9b
OG
311 if (IS_ERR(flow_rule))
312 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
313out:
c5bb1730 314 kvfree(spec);
ab22be9b
OG
315 return flow_rule;
316}
317
cb67b832
HHZ
318void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
319 struct mlx5_eswitch_rep *rep)
320{
321 struct mlx5_esw_sq *esw_sq, *tmp;
322
323 if (esw->mode != SRIOV_OFFLOADS)
324 return;
325
326 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 327 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
328 list_del(&esw_sq->list);
329 kfree(esw_sq);
330 }
331}
332
333int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
334 struct mlx5_eswitch_rep *rep,
335 u16 *sqns_array, int sqns_num)
336{
74491de9 337 struct mlx5_flow_handle *flow_rule;
cb67b832 338 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
339 int err;
340 int i;
341
342 if (esw->mode != SRIOV_OFFLOADS)
343 return 0;
344
cb67b832
HHZ
345 for (i = 0; i < sqns_num; i++) {
346 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
347 if (!esw_sq) {
348 err = -ENOMEM;
349 goto out_err;
350 }
351
352 /* Add re-inject rule to the PF/representor sqs */
353 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 354 rep->vport,
cb67b832
HHZ
355 sqns_array[i]);
356 if (IS_ERR(flow_rule)) {
357 err = PTR_ERR(flow_rule);
358 kfree(esw_sq);
359 goto out_err;
360 }
361 esw_sq->send_to_vport_rule = flow_rule;
362 list_add(&esw_sq->list, &rep->vport_sqs_list);
363 }
364 return 0;
365
366out_err:
367 mlx5_eswitch_sqs2vport_stop(esw, rep);
368 return err;
369}
370
3aa33572
OG
371static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
372{
66958ed9 373 struct mlx5_flow_act flow_act = {0};
3aa33572 374 struct mlx5_flow_destination dest;
74491de9 375 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 376 struct mlx5_flow_spec *spec;
3aa33572
OG
377 int err = 0;
378
c5bb1730
MG
379 spec = mlx5_vzalloc(sizeof(*spec));
380 if (!spec) {
3aa33572
OG
381 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
382 err = -ENOMEM;
383 goto out;
384 }
385
386 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
387 dest.vport_num = 0;
66958ed9 388 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 389
74491de9 390 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 391 &flow_act, &dest, 1);
3aa33572
OG
392 if (IS_ERR(flow_rule)) {
393 err = PTR_ERR(flow_rule);
394 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
395 goto out;
396 }
397
398 esw->fdb_table.offloads.miss_rule = flow_rule;
399out:
c5bb1730 400 kvfree(spec);
3aa33572
OG
401 return err;
402}
403
69697b6e 404#define MAX_PF_SQ 256
1033665e
OG
405#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
406#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 407
c930a3ad 408static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
409{
410 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
411 struct mlx5_core_dev *dev = esw->dev;
412 struct mlx5_flow_namespace *root_ns;
413 struct mlx5_flow_table *fdb = NULL;
414 struct mlx5_flow_group *g;
415 u32 *flow_group_in;
416 void *match_criteria;
417 int table_size, ix, err = 0;
bbd00f7e 418 u32 flags = 0;
69697b6e
OG
419
420 flow_group_in = mlx5_vzalloc(inlen);
421 if (!flow_group_in)
422 return -ENOMEM;
423
424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
425 if (!root_ns) {
426 esw_warn(dev, "Failed to get FDB flow namespace\n");
427 goto ns_err;
428 }
429
430 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
431 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
432
bbd00f7e
HHZ
433 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
434 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
435 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
436
1033665e
OG
437 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
438 ESW_OFFLOADS_NUM_ENTRIES,
c9f1b073 439 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 440 flags);
69697b6e
OG
441 if (IS_ERR(fdb)) {
442 err = PTR_ERR(fdb);
1033665e
OG
443 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
444 goto fast_fdb_err;
69697b6e
OG
445 }
446 esw->fdb_table.fdb = fdb;
447
1033665e 448 table_size = nvports + MAX_PF_SQ + 1;
c9f1b073 449 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
1033665e
OG
450 if (IS_ERR(fdb)) {
451 err = PTR_ERR(fdb);
452 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
453 goto slow_fdb_err;
454 }
455 esw->fdb_table.offloads.fdb = fdb;
456
69697b6e
OG
457 /* create send-to-vport group */
458 memset(flow_group_in, 0, inlen);
459 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
460 MLX5_MATCH_MISC_PARAMETERS);
461
462 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
463
464 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
465 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
466
467 ix = nvports + MAX_PF_SQ;
468 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
469 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
470
471 g = mlx5_create_flow_group(fdb, flow_group_in);
472 if (IS_ERR(g)) {
473 err = PTR_ERR(g);
474 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
475 goto send_vport_err;
476 }
477 esw->fdb_table.offloads.send_to_vport_grp = g;
478
479 /* create miss group */
480 memset(flow_group_in, 0, inlen);
481 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
482
483 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
484 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
485
486 g = mlx5_create_flow_group(fdb, flow_group_in);
487 if (IS_ERR(g)) {
488 err = PTR_ERR(g);
489 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
490 goto miss_err;
491 }
492 esw->fdb_table.offloads.miss_grp = g;
493
3aa33572
OG
494 err = esw_add_fdb_miss_rule(esw);
495 if (err)
496 goto miss_rule_err;
497
69697b6e
OG
498 return 0;
499
3aa33572
OG
500miss_rule_err:
501 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
502miss_err:
503 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
504send_vport_err:
1033665e
OG
505 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
506slow_fdb_err:
507 mlx5_destroy_flow_table(esw->fdb_table.fdb);
508fast_fdb_err:
69697b6e
OG
509ns_err:
510 kvfree(flow_group_in);
511 return err;
512}
513
c930a3ad 514static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
515{
516 if (!esw->fdb_table.fdb)
517 return;
518
519 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
74491de9 520 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
521 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
522 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
523
1033665e 524 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
525 mlx5_destroy_flow_table(esw->fdb_table.fdb);
526}
c116c6ee
OG
527
528static int esw_create_offloads_table(struct mlx5_eswitch *esw)
529{
530 struct mlx5_flow_namespace *ns;
531 struct mlx5_flow_table *ft_offloads;
532 struct mlx5_core_dev *dev = esw->dev;
533 int err = 0;
534
535 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
536 if (!ns) {
537 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
538 return -ENOMEM;
539 }
540
c9f1b073 541 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
c116c6ee
OG
542 if (IS_ERR(ft_offloads)) {
543 err = PTR_ERR(ft_offloads);
544 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
545 return err;
546 }
547
548 esw->offloads.ft_offloads = ft_offloads;
549 return 0;
550}
551
552static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
553{
554 struct mlx5_esw_offload *offloads = &esw->offloads;
555
556 mlx5_destroy_flow_table(offloads->ft_offloads);
557}
fed9ce22
OG
558
559static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
560{
561 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
562 struct mlx5_flow_group *g;
563 struct mlx5_priv *priv = &esw->dev->priv;
564 u32 *flow_group_in;
565 void *match_criteria, *misc;
566 int err = 0;
567 int nvports = priv->sriov.num_vfs + 2;
568
569 flow_group_in = mlx5_vzalloc(inlen);
570 if (!flow_group_in)
571 return -ENOMEM;
572
573 /* create vport rx group */
574 memset(flow_group_in, 0, inlen);
575 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
576 MLX5_MATCH_MISC_PARAMETERS);
577
578 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
579 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
580 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
581
582 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
583 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
584
585 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
586
587 if (IS_ERR(g)) {
588 err = PTR_ERR(g);
589 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
590 goto out;
591 }
592
593 esw->offloads.vport_rx_group = g;
594out:
595 kfree(flow_group_in);
596 return err;
597}
598
599static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
600{
601 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
602}
603
74491de9 604struct mlx5_flow_handle *
fed9ce22
OG
605mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
606{
66958ed9 607 struct mlx5_flow_act flow_act = {0};
fed9ce22 608 struct mlx5_flow_destination dest;
74491de9 609 struct mlx5_flow_handle *flow_rule;
c5bb1730 610 struct mlx5_flow_spec *spec;
fed9ce22
OG
611 void *misc;
612
c5bb1730
MG
613 spec = mlx5_vzalloc(sizeof(*spec));
614 if (!spec) {
fed9ce22
OG
615 esw_warn(esw->dev, "Failed to alloc match parameters\n");
616 flow_rule = ERR_PTR(-ENOMEM);
617 goto out;
618 }
619
c5bb1730 620 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
621 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
622
c5bb1730 623 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
624 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
625
c5bb1730 626 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
627 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
628 dest.tir_num = tirn;
629
66958ed9 630 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 631 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
66958ed9 632 &flow_act, &dest, 1);
fed9ce22
OG
633 if (IS_ERR(flow_rule)) {
634 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
635 goto out;
636 }
637
638out:
c5bb1730 639 kvfree(spec);
fed9ce22
OG
640 return flow_rule;
641}
feae9087 642
c930a3ad
OG
643static int esw_offloads_start(struct mlx5_eswitch *esw)
644{
6c419ba8 645 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
646
647 if (esw->mode != SRIOV_LEGACY) {
648 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
649 return -EINVAL;
650 }
651
652 mlx5_eswitch_disable_sriov(esw);
653 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
654 if (err) {
655 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
656 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
657 if (err1)
658 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
659 }
bffaa916
RD
660 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
661 if (mlx5_eswitch_inline_mode_get(esw,
662 num_vfs,
663 &esw->offloads.inline_mode)) {
664 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
665 esw_warn(esw->dev, "Inline mode is different between vports\n");
666 }
667 }
c930a3ad
OG
668 return err;
669}
670
671int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
672{
cb67b832
HHZ
673 struct mlx5_eswitch_rep *rep;
674 int vport;
c930a3ad
OG
675 int err;
676
677 err = esw_create_offloads_fdb_table(esw, nvports);
678 if (err)
679 return err;
680
681 err = esw_create_offloads_table(esw);
682 if (err)
683 goto create_ft_err;
684
685 err = esw_create_vport_rx_group(esw);
686 if (err)
687 goto create_fg_err;
688
cb67b832
HHZ
689 for (vport = 0; vport < nvports; vport++) {
690 rep = &esw->offloads.vport_reps[vport];
691 if (!rep->valid)
692 continue;
693
694 err = rep->load(esw, rep);
695 if (err)
696 goto err_reps;
697 }
9da34cd3
OG
698
699 /* disable PF RoCE so missed packets don't go through RoCE steering */
700 mlx5_dev_list_lock();
701 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
702 mlx5_dev_list_unlock();
703
c930a3ad
OG
704 return 0;
705
cb67b832
HHZ
706err_reps:
707 for (vport--; vport >= 0; vport--) {
708 rep = &esw->offloads.vport_reps[vport];
709 if (!rep->valid)
710 continue;
711 rep->unload(esw, rep);
712 }
713 esw_destroy_vport_rx_group(esw);
714
c930a3ad
OG
715create_fg_err:
716 esw_destroy_offloads_table(esw);
717
718create_ft_err:
719 esw_destroy_offloads_fdb_table(esw);
720 return err;
721}
722
723static int esw_offloads_stop(struct mlx5_eswitch *esw)
724{
6c419ba8 725 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad 726
9da34cd3
OG
727 /* enable back PF RoCE */
728 mlx5_dev_list_lock();
729 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
730 mlx5_dev_list_unlock();
731
c930a3ad
OG
732 mlx5_eswitch_disable_sriov(esw);
733 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
734 if (err) {
735 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
736 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
737 if (err1)
738 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
739 }
c930a3ad
OG
740
741 return err;
742}
743
744void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
745{
cb67b832
HHZ
746 struct mlx5_eswitch_rep *rep;
747 int vport;
748
749 for (vport = 0; vport < nvports; vport++) {
750 rep = &esw->offloads.vport_reps[vport];
751 if (!rep->valid)
752 continue;
753 rep->unload(esw, rep);
754 }
755
c930a3ad
OG
756 esw_destroy_vport_rx_group(esw);
757 esw_destroy_offloads_table(esw);
758 esw_destroy_offloads_fdb_table(esw);
759}
760
ef78618b 761static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
762{
763 switch (mode) {
764 case DEVLINK_ESWITCH_MODE_LEGACY:
765 *mlx5_mode = SRIOV_LEGACY;
766 break;
767 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
768 *mlx5_mode = SRIOV_OFFLOADS;
769 break;
770 default:
771 return -EINVAL;
772 }
773
774 return 0;
775}
776
ef78618b
OG
777static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
778{
779 switch (mlx5_mode) {
780 case SRIOV_LEGACY:
781 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
782 break;
783 case SRIOV_OFFLOADS:
784 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
785 break;
786 default:
787 return -EINVAL;
788 }
789
790 return 0;
791}
792
bffaa916
RD
793static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
794{
795 switch (mode) {
796 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
797 *mlx5_mode = MLX5_INLINE_MODE_NONE;
798 break;
799 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
800 *mlx5_mode = MLX5_INLINE_MODE_L2;
801 break;
802 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
803 *mlx5_mode = MLX5_INLINE_MODE_IP;
804 break;
805 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
806 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
807 break;
808 default:
809 return -EINVAL;
810 }
811
812 return 0;
813}
814
815static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
816{
817 switch (mlx5_mode) {
818 case MLX5_INLINE_MODE_NONE:
819 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
820 break;
821 case MLX5_INLINE_MODE_L2:
822 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
823 break;
824 case MLX5_INLINE_MODE_IP:
825 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
826 break;
827 case MLX5_INLINE_MODE_TCP_UDP:
828 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
829 break;
830 default:
831 return -EINVAL;
832 }
833
834 return 0;
835}
836
feae9087
OG
837int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
838{
c930a3ad
OG
839 struct mlx5_core_dev *dev;
840 u16 cur_mlx5_mode, mlx5_mode = 0;
841
842 dev = devlink_priv(devlink);
843
844 if (!MLX5_CAP_GEN(dev, vport_group_manager))
845 return -EOPNOTSUPP;
846
847 cur_mlx5_mode = dev->priv.eswitch->mode;
848
849 if (cur_mlx5_mode == SRIOV_NONE)
850 return -EOPNOTSUPP;
851
ef78618b 852 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
853 return -EINVAL;
854
855 if (cur_mlx5_mode == mlx5_mode)
856 return 0;
857
858 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
859 return esw_offloads_start(dev->priv.eswitch);
860 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
861 return esw_offloads_stop(dev->priv.eswitch);
862 else
863 return -EINVAL;
feae9087
OG
864}
865
866int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
867{
c930a3ad
OG
868 struct mlx5_core_dev *dev;
869
870 dev = devlink_priv(devlink);
871
872 if (!MLX5_CAP_GEN(dev, vport_group_manager))
873 return -EOPNOTSUPP;
874
875 if (dev->priv.eswitch->mode == SRIOV_NONE)
876 return -EOPNOTSUPP;
877
ef78618b 878 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 879}
127ea380 880
bffaa916
RD
881int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
882{
883 struct mlx5_core_dev *dev = devlink_priv(devlink);
884 struct mlx5_eswitch *esw = dev->priv.eswitch;
885 int num_vports = esw->enabled_vports;
886 int err;
887 int vport;
888 u8 mlx5_mode;
889
890 if (!MLX5_CAP_GEN(dev, vport_group_manager))
891 return -EOPNOTSUPP;
892
893 if (esw->mode == SRIOV_NONE)
894 return -EOPNOTSUPP;
895
896 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
897 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
898 return -EOPNOTSUPP;
899
900 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
901 if (err)
902 goto out;
903
904 for (vport = 1; vport < num_vports; vport++) {
905 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
906 if (err) {
907 esw_warn(dev, "Failed to set min inline on vport %d\n",
908 vport);
909 goto revert_inline_mode;
910 }
911 }
912
913 esw->offloads.inline_mode = mlx5_mode;
914 return 0;
915
916revert_inline_mode:
917 while (--vport > 0)
918 mlx5_modify_nic_vport_min_inline(dev,
919 vport,
920 esw->offloads.inline_mode);
921out:
922 return err;
923}
924
925int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 struct mlx5_eswitch *esw = dev->priv.eswitch;
929
930 if (!MLX5_CAP_GEN(dev, vport_group_manager))
931 return -EOPNOTSUPP;
932
933 if (esw->mode == SRIOV_NONE)
934 return -EOPNOTSUPP;
935
936 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
937 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
938 return -EOPNOTSUPP;
939
940 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
941}
942
943int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
944{
945 struct mlx5_core_dev *dev = esw->dev;
946 int vport;
947 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
948
949 if (!MLX5_CAP_GEN(dev, vport_group_manager))
950 return -EOPNOTSUPP;
951
952 if (esw->mode == SRIOV_NONE)
953 return -EOPNOTSUPP;
954
955 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
956 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
957 return -EOPNOTSUPP;
958
959 for (vport = 1; vport <= nvfs; vport++) {
960 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
961 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
962 return -EINVAL;
963 prev_mlx5_mode = mlx5_mode;
964 }
965
966 *mode = mlx5_mode;
967 return 0;
968}
969
127ea380 970void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
971 int vport_index,
972 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
973{
974 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
975 struct mlx5_eswitch_rep *rep;
976
977 rep = &offloads->vport_reps[vport_index];
127ea380 978
bac9b6aa
OG
979 memset(rep, 0, sizeof(*rep));
980
981 rep->load = __rep->load;
982 rep->unload = __rep->unload;
983 rep->vport = __rep->vport;
726293f1 984 rep->netdev = __rep->netdev;
bac9b6aa 985 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 986
9deb2241
OG
987 INIT_LIST_HEAD(&rep->vport_sqs_list);
988 rep->valid = true;
127ea380
HHZ
989}
990
991void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 992 int vport_index)
127ea380
HHZ
993{
994 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
995 struct mlx5_eswitch_rep *rep;
996
9deb2241 997 rep = &offloads->vport_reps[vport_index];
cb67b832 998
9deb2241 999 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 1000 rep->unload(esw, rep);
127ea380 1001
9deb2241 1002 rep->valid = false;
127ea380 1003}
726293f1
HHZ
1004
1005struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1006{
1007#define UPLINK_REP_INDEX 0
1008 struct mlx5_esw_offload *offloads = &esw->offloads;
1009 struct mlx5_eswitch_rep *rep;
1010
1011 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1012 return rep->netdev;
1013}