]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: Change the TC offload rule add/del code path to be per NIC or E-Switch
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2
OG
70 counter = mlx5_fc_create(esw->dev, true);
71 if (IS_ERR(counter))
72 return ERR_CAST(counter);
e37a79e5
MB
73 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74 dest[i].counter = counter;
75 i++;
3d80d1a2
OG
76 }
77
78 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 79 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
80
81 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
82 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
83
84 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
85 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
86 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
87 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 88
a54e20b4
HHZ
89 if (attr->encap)
90 flow_act.encap_id = attr->encap->encap_id;
91
74491de9 92 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 93 spec, &flow_act, dest, i);
3d80d1a2
OG
94 if (IS_ERR(rule))
95 mlx5_fc_destroy(esw->dev, counter);
96
97 return rule;
98}
99
d85cdccb
OG
100void
101mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
102 struct mlx5_flow_handle *rule,
103 struct mlx5_esw_flow_attr *attr)
104{
105 struct mlx5_fc *counter = NULL;
106
107 if (!IS_ERR(rule)) {
108 counter = mlx5_flow_rule_counter(rule);
109 mlx5_del_flow_rules(rule);
110 mlx5_fc_destroy(esw->dev, counter);
111 }
112}
113
f5f82476
OG
114static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
115{
116 struct mlx5_eswitch_rep *rep;
117 int vf_vport, err = 0;
118
119 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
120 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
121 rep = &esw->offloads.vport_reps[vf_vport];
122 if (!rep->valid)
123 continue;
124
125 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
126 if (err)
127 goto out;
128 }
129
130out:
131 return err;
132}
133
134static struct mlx5_eswitch_rep *
135esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
136{
137 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
138
139 in_rep = attr->in_rep;
140 out_rep = attr->out_rep;
141
142 if (push)
143 vport = in_rep;
144 else if (pop)
145 vport = out_rep;
146 else
147 vport = in_rep;
148
149 return vport;
150}
151
152static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
153 bool push, bool pop, bool fwd)
154{
155 struct mlx5_eswitch_rep *in_rep, *out_rep;
156
157 if ((push || pop) && !fwd)
158 goto out_notsupp;
159
160 in_rep = attr->in_rep;
161 out_rep = attr->out_rep;
162
163 if (push && in_rep->vport == FDB_UPLINK_VPORT)
164 goto out_notsupp;
165
166 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
167 goto out_notsupp;
168
169 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
170 if (!push && !pop && fwd)
171 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
172 goto out_notsupp;
173
174 /* protects against (1) setting rules with different vlans to push and
175 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
176 */
177 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
178 goto out_notsupp;
179
180 return 0;
181
182out_notsupp:
9eb78923 183 return -EOPNOTSUPP;
f5f82476
OG
184}
185
186int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
187 struct mlx5_esw_flow_attr *attr)
188{
189 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
190 struct mlx5_eswitch_rep *vport = NULL;
191 bool push, pop, fwd;
192 int err = 0;
193
194 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
195 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
196 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
197
198 err = esw_add_vlan_action_check(attr, push, pop, fwd);
199 if (err)
200 return err;
201
202 attr->vlan_handled = false;
203
204 vport = esw_vlan_action_get_vport(attr, push, pop);
205
206 if (!push && !pop && fwd) {
207 /* tracks VF --> wire rules without vlan push action */
208 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
209 vport->vlan_refcount++;
210 attr->vlan_handled = true;
211 }
212
213 return 0;
214 }
215
216 if (!push && !pop)
217 return 0;
218
219 if (!(offloads->vlan_push_pop_refcount)) {
220 /* it's the 1st vlan rule, apply global vlan pop policy */
221 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
222 if (err)
223 goto out;
224 }
225 offloads->vlan_push_pop_refcount++;
226
227 if (push) {
228 if (vport->vlan_refcount)
229 goto skip_set_push;
230
231 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
232 SET_VLAN_INSERT | SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 vport->vlan = attr->vlan;
236skip_set_push:
237 vport->vlan_refcount++;
238 }
239out:
240 if (!err)
241 attr->vlan_handled = true;
242 return err;
243}
244
245int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
246 struct mlx5_esw_flow_attr *attr)
247{
248 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
249 struct mlx5_eswitch_rep *vport = NULL;
250 bool push, pop, fwd;
251 int err = 0;
252
253 if (!attr->vlan_handled)
254 return 0;
255
256 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
257 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
258 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
259
260 vport = esw_vlan_action_get_vport(attr, push, pop);
261
262 if (!push && !pop && fwd) {
263 /* tracks VF --> wire rules without vlan push action */
264 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
265 vport->vlan_refcount--;
266
267 return 0;
268 }
269
270 if (push) {
271 vport->vlan_refcount--;
272 if (vport->vlan_refcount)
273 goto skip_unset_push;
274
275 vport->vlan = 0;
276 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
277 0, 0, SET_VLAN_STRIP);
278 if (err)
279 goto out;
280 }
281
282skip_unset_push:
283 offloads->vlan_push_pop_refcount--;
284 if (offloads->vlan_push_pop_refcount)
285 return 0;
286
287 /* no more vlan rules, stop global vlan pop policy */
288 err = esw_set_global_vlan_pop(esw, 0);
289
290out:
291 return err;
292}
293
74491de9 294static struct mlx5_flow_handle *
ab22be9b
OG
295mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
296{
66958ed9 297 struct mlx5_flow_act flow_act = {0};
ab22be9b 298 struct mlx5_flow_destination dest;
74491de9 299 struct mlx5_flow_handle *flow_rule;
c5bb1730 300 struct mlx5_flow_spec *spec;
ab22be9b
OG
301 void *misc;
302
c5bb1730
MG
303 spec = mlx5_vzalloc(sizeof(*spec));
304 if (!spec) {
ab22be9b
OG
305 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
306 flow_rule = ERR_PTR(-ENOMEM);
307 goto out;
308 }
309
c5bb1730 310 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
311 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
312 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
313
c5bb1730 314 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
315 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
316 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
317
c5bb1730 318 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
319 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
320 dest.vport_num = vport;
66958ed9 321 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 322
74491de9 323 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 324 &flow_act, &dest, 1);
ab22be9b
OG
325 if (IS_ERR(flow_rule))
326 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
327out:
c5bb1730 328 kvfree(spec);
ab22be9b
OG
329 return flow_rule;
330}
331
cb67b832
HHZ
332void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
333 struct mlx5_eswitch_rep *rep)
334{
335 struct mlx5_esw_sq *esw_sq, *tmp;
336
337 if (esw->mode != SRIOV_OFFLOADS)
338 return;
339
340 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 341 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
342 list_del(&esw_sq->list);
343 kfree(esw_sq);
344 }
345}
346
347int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
348 struct mlx5_eswitch_rep *rep,
349 u16 *sqns_array, int sqns_num)
350{
74491de9 351 struct mlx5_flow_handle *flow_rule;
cb67b832 352 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
353 int err;
354 int i;
355
356 if (esw->mode != SRIOV_OFFLOADS)
357 return 0;
358
cb67b832
HHZ
359 for (i = 0; i < sqns_num; i++) {
360 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
361 if (!esw_sq) {
362 err = -ENOMEM;
363 goto out_err;
364 }
365
366 /* Add re-inject rule to the PF/representor sqs */
367 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 368 rep->vport,
cb67b832
HHZ
369 sqns_array[i]);
370 if (IS_ERR(flow_rule)) {
371 err = PTR_ERR(flow_rule);
372 kfree(esw_sq);
373 goto out_err;
374 }
375 esw_sq->send_to_vport_rule = flow_rule;
376 list_add(&esw_sq->list, &rep->vport_sqs_list);
377 }
378 return 0;
379
380out_err:
381 mlx5_eswitch_sqs2vport_stop(esw, rep);
382 return err;
383}
384
3aa33572
OG
385static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
386{
66958ed9 387 struct mlx5_flow_act flow_act = {0};
3aa33572 388 struct mlx5_flow_destination dest;
74491de9 389 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 390 struct mlx5_flow_spec *spec;
3aa33572
OG
391 int err = 0;
392
c5bb1730
MG
393 spec = mlx5_vzalloc(sizeof(*spec));
394 if (!spec) {
3aa33572
OG
395 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
396 err = -ENOMEM;
397 goto out;
398 }
399
400 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
401 dest.vport_num = 0;
66958ed9 402 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 403
74491de9 404 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 405 &flow_act, &dest, 1);
3aa33572
OG
406 if (IS_ERR(flow_rule)) {
407 err = PTR_ERR(flow_rule);
408 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
409 goto out;
410 }
411
412 esw->fdb_table.offloads.miss_rule = flow_rule;
413out:
c5bb1730 414 kvfree(spec);
3aa33572
OG
415 return err;
416}
417
69697b6e 418#define MAX_PF_SQ 256
1033665e 419#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 420
c930a3ad 421static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
422{
423 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
264d7bf3 424 int table_size, ix, esw_size, err = 0;
69697b6e
OG
425 struct mlx5_core_dev *dev = esw->dev;
426 struct mlx5_flow_namespace *root_ns;
427 struct mlx5_flow_table *fdb = NULL;
428 struct mlx5_flow_group *g;
429 u32 *flow_group_in;
430 void *match_criteria;
bbd00f7e 431 u32 flags = 0;
69697b6e
OG
432
433 flow_group_in = mlx5_vzalloc(inlen);
434 if (!flow_group_in)
435 return -ENOMEM;
436
437 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
438 if (!root_ns) {
439 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 440 err = -EOPNOTSUPP;
69697b6e
OG
441 goto ns_err;
442 }
443
264d7bf3
OG
444 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
445 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
446 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
447
448 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
449 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 450
bbd00f7e
HHZ
451 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
452 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
453 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
454
1033665e 455 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 456 esw_size,
c9f1b073 457 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 458 flags);
69697b6e
OG
459 if (IS_ERR(fdb)) {
460 err = PTR_ERR(fdb);
1033665e
OG
461 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
462 goto fast_fdb_err;
69697b6e
OG
463 }
464 esw->fdb_table.fdb = fdb;
465
1033665e 466 table_size = nvports + MAX_PF_SQ + 1;
c9f1b073 467 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
1033665e
OG
468 if (IS_ERR(fdb)) {
469 err = PTR_ERR(fdb);
470 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
471 goto slow_fdb_err;
472 }
473 esw->fdb_table.offloads.fdb = fdb;
474
69697b6e
OG
475 /* create send-to-vport group */
476 memset(flow_group_in, 0, inlen);
477 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
478 MLX5_MATCH_MISC_PARAMETERS);
479
480 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
481
482 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
483 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
484
485 ix = nvports + MAX_PF_SQ;
486 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
487 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
488
489 g = mlx5_create_flow_group(fdb, flow_group_in);
490 if (IS_ERR(g)) {
491 err = PTR_ERR(g);
492 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
493 goto send_vport_err;
494 }
495 esw->fdb_table.offloads.send_to_vport_grp = g;
496
497 /* create miss group */
498 memset(flow_group_in, 0, inlen);
499 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
500
501 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
502 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
503
504 g = mlx5_create_flow_group(fdb, flow_group_in);
505 if (IS_ERR(g)) {
506 err = PTR_ERR(g);
507 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
508 goto miss_err;
509 }
510 esw->fdb_table.offloads.miss_grp = g;
511
3aa33572
OG
512 err = esw_add_fdb_miss_rule(esw);
513 if (err)
514 goto miss_rule_err;
515
69697b6e
OG
516 return 0;
517
3aa33572
OG
518miss_rule_err:
519 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
520miss_err:
521 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
522send_vport_err:
1033665e
OG
523 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
524slow_fdb_err:
525 mlx5_destroy_flow_table(esw->fdb_table.fdb);
526fast_fdb_err:
69697b6e
OG
527ns_err:
528 kvfree(flow_group_in);
529 return err;
530}
531
c930a3ad 532static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
533{
534 if (!esw->fdb_table.fdb)
535 return;
536
537 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
74491de9 538 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
539 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
540 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
541
1033665e 542 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
543 mlx5_destroy_flow_table(esw->fdb_table.fdb);
544}
c116c6ee
OG
545
546static int esw_create_offloads_table(struct mlx5_eswitch *esw)
547{
548 struct mlx5_flow_namespace *ns;
549 struct mlx5_flow_table *ft_offloads;
550 struct mlx5_core_dev *dev = esw->dev;
551 int err = 0;
552
553 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
554 if (!ns) {
555 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 556 return -EOPNOTSUPP;
c116c6ee
OG
557 }
558
c9f1b073 559 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
c116c6ee
OG
560 if (IS_ERR(ft_offloads)) {
561 err = PTR_ERR(ft_offloads);
562 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
563 return err;
564 }
565
566 esw->offloads.ft_offloads = ft_offloads;
567 return 0;
568}
569
570static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
571{
572 struct mlx5_esw_offload *offloads = &esw->offloads;
573
574 mlx5_destroy_flow_table(offloads->ft_offloads);
575}
fed9ce22
OG
576
577static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
578{
579 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
580 struct mlx5_flow_group *g;
581 struct mlx5_priv *priv = &esw->dev->priv;
582 u32 *flow_group_in;
583 void *match_criteria, *misc;
584 int err = 0;
585 int nvports = priv->sriov.num_vfs + 2;
586
587 flow_group_in = mlx5_vzalloc(inlen);
588 if (!flow_group_in)
589 return -ENOMEM;
590
591 /* create vport rx group */
592 memset(flow_group_in, 0, inlen);
593 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
594 MLX5_MATCH_MISC_PARAMETERS);
595
596 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
597 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
598 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
599
600 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
601 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
602
603 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
604
605 if (IS_ERR(g)) {
606 err = PTR_ERR(g);
607 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
608 goto out;
609 }
610
611 esw->offloads.vport_rx_group = g;
612out:
613 kfree(flow_group_in);
614 return err;
615}
616
617static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
618{
619 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
620}
621
74491de9 622struct mlx5_flow_handle *
fed9ce22
OG
623mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
624{
66958ed9 625 struct mlx5_flow_act flow_act = {0};
fed9ce22 626 struct mlx5_flow_destination dest;
74491de9 627 struct mlx5_flow_handle *flow_rule;
c5bb1730 628 struct mlx5_flow_spec *spec;
fed9ce22
OG
629 void *misc;
630
c5bb1730
MG
631 spec = mlx5_vzalloc(sizeof(*spec));
632 if (!spec) {
fed9ce22
OG
633 esw_warn(esw->dev, "Failed to alloc match parameters\n");
634 flow_rule = ERR_PTR(-ENOMEM);
635 goto out;
636 }
637
c5bb1730 638 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
639 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
640
c5bb1730 641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
642 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
643
c5bb1730 644 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
645 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
646 dest.tir_num = tirn;
647
66958ed9 648 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 649 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
66958ed9 650 &flow_act, &dest, 1);
fed9ce22
OG
651 if (IS_ERR(flow_rule)) {
652 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
653 goto out;
654 }
655
656out:
c5bb1730 657 kvfree(spec);
fed9ce22
OG
658 return flow_rule;
659}
feae9087 660
c930a3ad
OG
661static int esw_offloads_start(struct mlx5_eswitch *esw)
662{
6c419ba8 663 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
664
665 if (esw->mode != SRIOV_LEGACY) {
666 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
667 return -EINVAL;
668 }
669
670 mlx5_eswitch_disable_sriov(esw);
671 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
672 if (err) {
673 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
674 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
675 if (err1)
5403dc70 676 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 677 }
bffaa916
RD
678 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
679 if (mlx5_eswitch_inline_mode_get(esw,
680 num_vfs,
681 &esw->offloads.inline_mode)) {
682 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
683 esw_warn(esw->dev, "Inline mode is different between vports\n");
684 }
685 }
c930a3ad
OG
686 return err;
687}
688
689int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
690{
cb67b832
HHZ
691 struct mlx5_eswitch_rep *rep;
692 int vport;
c930a3ad
OG
693 int err;
694
5bae8c03
OG
695 /* disable PF RoCE so missed packets don't go through RoCE steering */
696 mlx5_dev_list_lock();
697 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
698 mlx5_dev_list_unlock();
699
c930a3ad
OG
700 err = esw_create_offloads_fdb_table(esw, nvports);
701 if (err)
5bae8c03 702 goto create_fdb_err;
c930a3ad
OG
703
704 err = esw_create_offloads_table(esw);
705 if (err)
706 goto create_ft_err;
707
708 err = esw_create_vport_rx_group(esw);
709 if (err)
710 goto create_fg_err;
711
cb67b832
HHZ
712 for (vport = 0; vport < nvports; vport++) {
713 rep = &esw->offloads.vport_reps[vport];
714 if (!rep->valid)
715 continue;
716
717 err = rep->load(esw, rep);
718 if (err)
719 goto err_reps;
720 }
9da34cd3 721
c930a3ad
OG
722 return 0;
723
cb67b832
HHZ
724err_reps:
725 for (vport--; vport >= 0; vport--) {
726 rep = &esw->offloads.vport_reps[vport];
727 if (!rep->valid)
728 continue;
729 rep->unload(esw, rep);
730 }
731 esw_destroy_vport_rx_group(esw);
732
c930a3ad
OG
733create_fg_err:
734 esw_destroy_offloads_table(esw);
735
736create_ft_err:
737 esw_destroy_offloads_fdb_table(esw);
5bae8c03
OG
738
739create_fdb_err:
740 /* enable back PF RoCE */
741 mlx5_dev_list_lock();
742 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
743 mlx5_dev_list_unlock();
744
c930a3ad
OG
745 return err;
746}
747
748static int esw_offloads_stop(struct mlx5_eswitch *esw)
749{
6c419ba8 750 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
751
752 mlx5_eswitch_disable_sriov(esw);
753 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
754 if (err) {
755 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
756 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
757 if (err1)
758 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
759 }
c930a3ad 760
5bae8c03
OG
761 /* enable back PF RoCE */
762 mlx5_dev_list_lock();
763 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
764 mlx5_dev_list_unlock();
765
c930a3ad
OG
766 return err;
767}
768
769void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
770{
cb67b832
HHZ
771 struct mlx5_eswitch_rep *rep;
772 int vport;
773
774 for (vport = 0; vport < nvports; vport++) {
775 rep = &esw->offloads.vport_reps[vport];
776 if (!rep->valid)
777 continue;
778 rep->unload(esw, rep);
779 }
780
c930a3ad
OG
781 esw_destroy_vport_rx_group(esw);
782 esw_destroy_offloads_table(esw);
783 esw_destroy_offloads_fdb_table(esw);
784}
785
ef78618b 786static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
787{
788 switch (mode) {
789 case DEVLINK_ESWITCH_MODE_LEGACY:
790 *mlx5_mode = SRIOV_LEGACY;
791 break;
792 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
793 *mlx5_mode = SRIOV_OFFLOADS;
794 break;
795 default:
796 return -EINVAL;
797 }
798
799 return 0;
800}
801
ef78618b
OG
802static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
803{
804 switch (mlx5_mode) {
805 case SRIOV_LEGACY:
806 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
807 break;
808 case SRIOV_OFFLOADS:
809 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
810 break;
811 default:
812 return -EINVAL;
813 }
814
815 return 0;
816}
817
bffaa916
RD
818static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
819{
820 switch (mode) {
821 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
822 *mlx5_mode = MLX5_INLINE_MODE_NONE;
823 break;
824 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
825 *mlx5_mode = MLX5_INLINE_MODE_L2;
826 break;
827 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
828 *mlx5_mode = MLX5_INLINE_MODE_IP;
829 break;
830 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
831 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
832 break;
833 default:
834 return -EINVAL;
835 }
836
837 return 0;
838}
839
840static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
841{
842 switch (mlx5_mode) {
843 case MLX5_INLINE_MODE_NONE:
844 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
845 break;
846 case MLX5_INLINE_MODE_L2:
847 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
848 break;
849 case MLX5_INLINE_MODE_IP:
850 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
851 break;
852 case MLX5_INLINE_MODE_TCP_UDP:
853 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
854 break;
855 default:
856 return -EINVAL;
857 }
858
859 return 0;
860}
861
feae9087
OG
862int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
863{
c930a3ad
OG
864 struct mlx5_core_dev *dev;
865 u16 cur_mlx5_mode, mlx5_mode = 0;
866
867 dev = devlink_priv(devlink);
868
869 if (!MLX5_CAP_GEN(dev, vport_group_manager))
870 return -EOPNOTSUPP;
871
872 cur_mlx5_mode = dev->priv.eswitch->mode;
873
874 if (cur_mlx5_mode == SRIOV_NONE)
875 return -EOPNOTSUPP;
876
ef78618b 877 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
878 return -EINVAL;
879
880 if (cur_mlx5_mode == mlx5_mode)
881 return 0;
882
883 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
884 return esw_offloads_start(dev->priv.eswitch);
885 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
886 return esw_offloads_stop(dev->priv.eswitch);
887 else
888 return -EINVAL;
feae9087
OG
889}
890
891int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
892{
c930a3ad
OG
893 struct mlx5_core_dev *dev;
894
895 dev = devlink_priv(devlink);
896
897 if (!MLX5_CAP_GEN(dev, vport_group_manager))
898 return -EOPNOTSUPP;
899
900 if (dev->priv.eswitch->mode == SRIOV_NONE)
901 return -EOPNOTSUPP;
902
ef78618b 903 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 904}
127ea380 905
bffaa916
RD
906int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
907{
908 struct mlx5_core_dev *dev = devlink_priv(devlink);
909 struct mlx5_eswitch *esw = dev->priv.eswitch;
910 int num_vports = esw->enabled_vports;
911 int err;
912 int vport;
913 u8 mlx5_mode;
914
915 if (!MLX5_CAP_GEN(dev, vport_group_manager))
916 return -EOPNOTSUPP;
917
918 if (esw->mode == SRIOV_NONE)
919 return -EOPNOTSUPP;
920
921 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
922 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
923 return -EOPNOTSUPP;
924
925 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
926 if (err)
927 goto out;
928
929 for (vport = 1; vport < num_vports; vport++) {
930 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
931 if (err) {
932 esw_warn(dev, "Failed to set min inline on vport %d\n",
933 vport);
934 goto revert_inline_mode;
935 }
936 }
937
938 esw->offloads.inline_mode = mlx5_mode;
939 return 0;
940
941revert_inline_mode:
942 while (--vport > 0)
943 mlx5_modify_nic_vport_min_inline(dev,
944 vport,
945 esw->offloads.inline_mode);
946out:
947 return err;
948}
949
950int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
951{
952 struct mlx5_core_dev *dev = devlink_priv(devlink);
953 struct mlx5_eswitch *esw = dev->priv.eswitch;
954
955 if (!MLX5_CAP_GEN(dev, vport_group_manager))
956 return -EOPNOTSUPP;
957
958 if (esw->mode == SRIOV_NONE)
959 return -EOPNOTSUPP;
960
961 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
962 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
963 return -EOPNOTSUPP;
964
965 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
966}
967
968int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
969{
970 struct mlx5_core_dev *dev = esw->dev;
971 int vport;
972 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
973
974 if (!MLX5_CAP_GEN(dev, vport_group_manager))
975 return -EOPNOTSUPP;
976
977 if (esw->mode == SRIOV_NONE)
978 return -EOPNOTSUPP;
979
980 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
981 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
982 return -EOPNOTSUPP;
983
984 for (vport = 1; vport <= nvfs; vport++) {
985 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
986 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
987 return -EINVAL;
988 prev_mlx5_mode = mlx5_mode;
989 }
990
991 *mode = mlx5_mode;
992 return 0;
993}
994
127ea380 995void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
996 int vport_index,
997 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
998{
999 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1000 struct mlx5_eswitch_rep *rep;
1001
1002 rep = &offloads->vport_reps[vport_index];
127ea380 1003
bac9b6aa
OG
1004 memset(rep, 0, sizeof(*rep));
1005
1006 rep->load = __rep->load;
1007 rep->unload = __rep->unload;
1008 rep->vport = __rep->vport;
726293f1 1009 rep->netdev = __rep->netdev;
bac9b6aa 1010 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 1011
9deb2241
OG
1012 INIT_LIST_HEAD(&rep->vport_sqs_list);
1013 rep->valid = true;
127ea380
HHZ
1014}
1015
1016void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1017 int vport_index)
127ea380
HHZ
1018{
1019 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1020 struct mlx5_eswitch_rep *rep;
1021
9deb2241 1022 rep = &offloads->vport_reps[vport_index];
cb67b832 1023
9deb2241 1024 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 1025 rep->unload(esw, rep);
127ea380 1026
9deb2241 1027 rep->valid = false;
127ea380 1028}
726293f1
HHZ
1029
1030struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1031{
1032#define UPLINK_REP_INDEX 0
1033 struct mlx5_esw_offload *offloads = &esw->offloads;
1034 struct mlx5_eswitch_rep *rep;
1035
1036 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1037 return rep->netdev;
1038}