]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/devlink: Add E-Switch encapsulation control
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
d7e75a32
OG
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
a54e20b4
HHZ
95 flow_act.encap_id = attr->encap->encap_id;
96
74491de9 97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 98 spec, &flow_act, dest, i);
3d80d1a2 99 if (IS_ERR(rule))
aa0cbbae 100 goto err_add_rule;
375f51e2
RD
101 else
102 esw->offloads.num_flows++;
3d80d1a2
OG
103
104 return rule;
aa0cbbae
OG
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
3d80d1a2
OG
110}
111
d85cdccb
OG
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
aa0cbbae
OG
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
d85cdccb
OG
123}
124
f5f82476
OG
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
9eb78923 194 return -EOPNOTSUPP;
f5f82476
OG
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
74491de9 305static struct mlx5_flow_handle *
ab22be9b
OG
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
66958ed9 308 struct mlx5_flow_act flow_act = {0};
ab22be9b 309 struct mlx5_flow_destination dest;
74491de9 310 struct mlx5_flow_handle *flow_rule;
c5bb1730 311 struct mlx5_flow_spec *spec;
ab22be9b
OG
312 void *misc;
313
c5bb1730
MG
314 spec = mlx5_vzalloc(sizeof(*spec));
315 if (!spec) {
ab22be9b
OG
316 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
317 flow_rule = ERR_PTR(-ENOMEM);
318 goto out;
319 }
320
c5bb1730 321 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
322 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
323 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
324
c5bb1730 325 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
327 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
328
c5bb1730 329 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
330 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
331 dest.vport_num = vport;
66958ed9 332 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 333
74491de9 334 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 335 &flow_act, &dest, 1);
ab22be9b
OG
336 if (IS_ERR(flow_rule))
337 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
338out:
c5bb1730 339 kvfree(spec);
ab22be9b
OG
340 return flow_rule;
341}
342
cb67b832
HHZ
343void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
344 struct mlx5_eswitch_rep *rep)
345{
346 struct mlx5_esw_sq *esw_sq, *tmp;
347
348 if (esw->mode != SRIOV_OFFLOADS)
349 return;
350
351 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 352 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
353 list_del(&esw_sq->list);
354 kfree(esw_sq);
355 }
356}
357
358int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
359 struct mlx5_eswitch_rep *rep,
360 u16 *sqns_array, int sqns_num)
361{
74491de9 362 struct mlx5_flow_handle *flow_rule;
cb67b832 363 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
364 int err;
365 int i;
366
367 if (esw->mode != SRIOV_OFFLOADS)
368 return 0;
369
cb67b832
HHZ
370 for (i = 0; i < sqns_num; i++) {
371 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
372 if (!esw_sq) {
373 err = -ENOMEM;
374 goto out_err;
375 }
376
377 /* Add re-inject rule to the PF/representor sqs */
378 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 379 rep->vport,
cb67b832
HHZ
380 sqns_array[i]);
381 if (IS_ERR(flow_rule)) {
382 err = PTR_ERR(flow_rule);
383 kfree(esw_sq);
384 goto out_err;
385 }
386 esw_sq->send_to_vport_rule = flow_rule;
387 list_add(&esw_sq->list, &rep->vport_sqs_list);
388 }
389 return 0;
390
391out_err:
392 mlx5_eswitch_sqs2vport_stop(esw, rep);
393 return err;
394}
395
3aa33572
OG
396static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
397{
66958ed9 398 struct mlx5_flow_act flow_act = {0};
3aa33572 399 struct mlx5_flow_destination dest;
74491de9 400 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 401 struct mlx5_flow_spec *spec;
3aa33572
OG
402 int err = 0;
403
c5bb1730
MG
404 spec = mlx5_vzalloc(sizeof(*spec));
405 if (!spec) {
3aa33572
OG
406 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
407 err = -ENOMEM;
408 goto out;
409 }
410
411 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
412 dest.vport_num = 0;
66958ed9 413 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 414
74491de9 415 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 416 &flow_act, &dest, 1);
3aa33572
OG
417 if (IS_ERR(flow_rule)) {
418 err = PTR_ERR(flow_rule);
419 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
420 goto out;
421 }
422
423 esw->fdb_table.offloads.miss_rule = flow_rule;
424out:
c5bb1730 425 kvfree(spec);
3aa33572
OG
426 return err;
427}
428
69697b6e 429#define MAX_PF_SQ 256
1033665e 430#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 431
c930a3ad 432static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
433{
434 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
b3ba5149 435 struct mlx5_flow_table_attr ft_attr = {};
264d7bf3 436 int table_size, ix, esw_size, err = 0;
69697b6e
OG
437 struct mlx5_core_dev *dev = esw->dev;
438 struct mlx5_flow_namespace *root_ns;
439 struct mlx5_flow_table *fdb = NULL;
440 struct mlx5_flow_group *g;
441 u32 *flow_group_in;
442 void *match_criteria;
bbd00f7e 443 u32 flags = 0;
69697b6e
OG
444
445 flow_group_in = mlx5_vzalloc(inlen);
446 if (!flow_group_in)
447 return -ENOMEM;
448
449 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
450 if (!root_ns) {
451 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 452 err = -EOPNOTSUPP;
69697b6e
OG
453 goto ns_err;
454 }
455
264d7bf3
OG
456 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
457 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
458 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
459
460 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
461 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 462
bbd00f7e
HHZ
463 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
464 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
465 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
466
1033665e 467 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 468 esw_size,
c9f1b073 469 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 470 flags);
69697b6e
OG
471 if (IS_ERR(fdb)) {
472 err = PTR_ERR(fdb);
1033665e
OG
473 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
474 goto fast_fdb_err;
69697b6e
OG
475 }
476 esw->fdb_table.fdb = fdb;
477
1033665e 478 table_size = nvports + MAX_PF_SQ + 1;
b3ba5149
ES
479
480 ft_attr.max_fte = table_size;
481 ft_attr.prio = FDB_SLOW_PATH;
482
483 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
484 if (IS_ERR(fdb)) {
485 err = PTR_ERR(fdb);
486 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
487 goto slow_fdb_err;
488 }
489 esw->fdb_table.offloads.fdb = fdb;
490
69697b6e
OG
491 /* create send-to-vport group */
492 memset(flow_group_in, 0, inlen);
493 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
494 MLX5_MATCH_MISC_PARAMETERS);
495
496 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
497
498 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
499 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
500
501 ix = nvports + MAX_PF_SQ;
502 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
503 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
504
505 g = mlx5_create_flow_group(fdb, flow_group_in);
506 if (IS_ERR(g)) {
507 err = PTR_ERR(g);
508 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
509 goto send_vport_err;
510 }
511 esw->fdb_table.offloads.send_to_vport_grp = g;
512
513 /* create miss group */
514 memset(flow_group_in, 0, inlen);
515 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
516
517 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
518 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
519
520 g = mlx5_create_flow_group(fdb, flow_group_in);
521 if (IS_ERR(g)) {
522 err = PTR_ERR(g);
523 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
524 goto miss_err;
525 }
526 esw->fdb_table.offloads.miss_grp = g;
527
3aa33572
OG
528 err = esw_add_fdb_miss_rule(esw);
529 if (err)
530 goto miss_rule_err;
531
69697b6e
OG
532 return 0;
533
3aa33572
OG
534miss_rule_err:
535 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
536miss_err:
537 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
538send_vport_err:
1033665e
OG
539 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
540slow_fdb_err:
541 mlx5_destroy_flow_table(esw->fdb_table.fdb);
542fast_fdb_err:
69697b6e
OG
543ns_err:
544 kvfree(flow_group_in);
545 return err;
546}
547
c930a3ad 548static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
549{
550 if (!esw->fdb_table.fdb)
551 return;
552
553 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
74491de9 554 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
555 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
556 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
557
1033665e 558 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
559 mlx5_destroy_flow_table(esw->fdb_table.fdb);
560}
c116c6ee
OG
561
562static int esw_create_offloads_table(struct mlx5_eswitch *esw)
563{
b3ba5149 564 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 565 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
566 struct mlx5_flow_table *ft_offloads;
567 struct mlx5_flow_namespace *ns;
c116c6ee
OG
568 int err = 0;
569
570 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
571 if (!ns) {
572 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 573 return -EOPNOTSUPP;
c116c6ee
OG
574 }
575
b3ba5149
ES
576 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
577
578 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
579 if (IS_ERR(ft_offloads)) {
580 err = PTR_ERR(ft_offloads);
581 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
582 return err;
583 }
584
585 esw->offloads.ft_offloads = ft_offloads;
586 return 0;
587}
588
589static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
590{
591 struct mlx5_esw_offload *offloads = &esw->offloads;
592
593 mlx5_destroy_flow_table(offloads->ft_offloads);
594}
fed9ce22
OG
595
596static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
597{
598 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
599 struct mlx5_flow_group *g;
600 struct mlx5_priv *priv = &esw->dev->priv;
601 u32 *flow_group_in;
602 void *match_criteria, *misc;
603 int err = 0;
604 int nvports = priv->sriov.num_vfs + 2;
605
606 flow_group_in = mlx5_vzalloc(inlen);
607 if (!flow_group_in)
608 return -ENOMEM;
609
610 /* create vport rx group */
611 memset(flow_group_in, 0, inlen);
612 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
613 MLX5_MATCH_MISC_PARAMETERS);
614
615 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
616 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
617 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
618
619 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
620 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
621
622 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
623
624 if (IS_ERR(g)) {
625 err = PTR_ERR(g);
626 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
627 goto out;
628 }
629
630 esw->offloads.vport_rx_group = g;
631out:
632 kfree(flow_group_in);
633 return err;
634}
635
636static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
637{
638 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
639}
640
74491de9 641struct mlx5_flow_handle *
fed9ce22
OG
642mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
643{
66958ed9 644 struct mlx5_flow_act flow_act = {0};
fed9ce22 645 struct mlx5_flow_destination dest;
74491de9 646 struct mlx5_flow_handle *flow_rule;
c5bb1730 647 struct mlx5_flow_spec *spec;
fed9ce22
OG
648 void *misc;
649
c5bb1730
MG
650 spec = mlx5_vzalloc(sizeof(*spec));
651 if (!spec) {
fed9ce22
OG
652 esw_warn(esw->dev, "Failed to alloc match parameters\n");
653 flow_rule = ERR_PTR(-ENOMEM);
654 goto out;
655 }
656
c5bb1730 657 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
658 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
659
c5bb1730 660 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
661 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
662
c5bb1730 663 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
664 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
665 dest.tir_num = tirn;
666
66958ed9 667 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 668 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
66958ed9 669 &flow_act, &dest, 1);
fed9ce22
OG
670 if (IS_ERR(flow_rule)) {
671 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
672 goto out;
673 }
674
675out:
c5bb1730 676 kvfree(spec);
fed9ce22
OG
677 return flow_rule;
678}
feae9087 679
c930a3ad
OG
680static int esw_offloads_start(struct mlx5_eswitch *esw)
681{
6c419ba8 682 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
683
684 if (esw->mode != SRIOV_LEGACY) {
685 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
686 return -EINVAL;
687 }
688
689 mlx5_eswitch_disable_sriov(esw);
690 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
691 if (err) {
692 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
693 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
694 if (err1)
5403dc70 695 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 696 }
bffaa916
RD
697 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
698 if (mlx5_eswitch_inline_mode_get(esw,
699 num_vfs,
700 &esw->offloads.inline_mode)) {
701 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
702 esw_warn(esw->dev, "Inline mode is different between vports\n");
703 }
704 }
c930a3ad
OG
705 return err;
706}
707
708int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
709{
cb67b832
HHZ
710 struct mlx5_eswitch_rep *rep;
711 int vport;
c930a3ad
OG
712 int err;
713
5bae8c03
OG
714 /* disable PF RoCE so missed packets don't go through RoCE steering */
715 mlx5_dev_list_lock();
716 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
717 mlx5_dev_list_unlock();
718
c930a3ad
OG
719 err = esw_create_offloads_fdb_table(esw, nvports);
720 if (err)
5bae8c03 721 goto create_fdb_err;
c930a3ad
OG
722
723 err = esw_create_offloads_table(esw);
724 if (err)
725 goto create_ft_err;
726
727 err = esw_create_vport_rx_group(esw);
728 if (err)
729 goto create_fg_err;
730
cb67b832
HHZ
731 for (vport = 0; vport < nvports; vport++) {
732 rep = &esw->offloads.vport_reps[vport];
733 if (!rep->valid)
734 continue;
735
736 err = rep->load(esw, rep);
737 if (err)
738 goto err_reps;
739 }
9da34cd3 740
c930a3ad
OG
741 return 0;
742
cb67b832
HHZ
743err_reps:
744 for (vport--; vport >= 0; vport--) {
745 rep = &esw->offloads.vport_reps[vport];
746 if (!rep->valid)
747 continue;
748 rep->unload(esw, rep);
749 }
750 esw_destroy_vport_rx_group(esw);
751
c930a3ad
OG
752create_fg_err:
753 esw_destroy_offloads_table(esw);
754
755create_ft_err:
756 esw_destroy_offloads_fdb_table(esw);
5bae8c03
OG
757
758create_fdb_err:
759 /* enable back PF RoCE */
760 mlx5_dev_list_lock();
761 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
762 mlx5_dev_list_unlock();
763
c930a3ad
OG
764 return err;
765}
766
767static int esw_offloads_stop(struct mlx5_eswitch *esw)
768{
6c419ba8 769 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
770
771 mlx5_eswitch_disable_sriov(esw);
772 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
773 if (err) {
774 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
775 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
776 if (err1)
777 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
778 }
c930a3ad 779
5bae8c03
OG
780 /* enable back PF RoCE */
781 mlx5_dev_list_lock();
782 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
783 mlx5_dev_list_unlock();
784
c930a3ad
OG
785 return err;
786}
787
788void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
789{
cb67b832
HHZ
790 struct mlx5_eswitch_rep *rep;
791 int vport;
792
793 for (vport = 0; vport < nvports; vport++) {
794 rep = &esw->offloads.vport_reps[vport];
795 if (!rep->valid)
796 continue;
797 rep->unload(esw, rep);
798 }
799
c930a3ad
OG
800 esw_destroy_vport_rx_group(esw);
801 esw_destroy_offloads_table(esw);
802 esw_destroy_offloads_fdb_table(esw);
803}
804
ef78618b 805static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
806{
807 switch (mode) {
808 case DEVLINK_ESWITCH_MODE_LEGACY:
809 *mlx5_mode = SRIOV_LEGACY;
810 break;
811 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
812 *mlx5_mode = SRIOV_OFFLOADS;
813 break;
814 default:
815 return -EINVAL;
816 }
817
818 return 0;
819}
820
ef78618b
OG
821static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
822{
823 switch (mlx5_mode) {
824 case SRIOV_LEGACY:
825 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
826 break;
827 case SRIOV_OFFLOADS:
828 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
829 break;
830 default:
831 return -EINVAL;
832 }
833
834 return 0;
835}
836
bffaa916
RD
837static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
838{
839 switch (mode) {
840 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
841 *mlx5_mode = MLX5_INLINE_MODE_NONE;
842 break;
843 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
844 *mlx5_mode = MLX5_INLINE_MODE_L2;
845 break;
846 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
847 *mlx5_mode = MLX5_INLINE_MODE_IP;
848 break;
849 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
850 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
851 break;
852 default:
853 return -EINVAL;
854 }
855
856 return 0;
857}
858
859static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
860{
861 switch (mlx5_mode) {
862 case MLX5_INLINE_MODE_NONE:
863 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
864 break;
865 case MLX5_INLINE_MODE_L2:
866 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
867 break;
868 case MLX5_INLINE_MODE_IP:
869 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
870 break;
871 case MLX5_INLINE_MODE_TCP_UDP:
872 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
873 break;
874 default:
875 return -EINVAL;
876 }
877
878 return 0;
879}
880
feae9087
OG
881int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
882{
c930a3ad
OG
883 struct mlx5_core_dev *dev;
884 u16 cur_mlx5_mode, mlx5_mode = 0;
885
886 dev = devlink_priv(devlink);
887
888 if (!MLX5_CAP_GEN(dev, vport_group_manager))
889 return -EOPNOTSUPP;
890
891 cur_mlx5_mode = dev->priv.eswitch->mode;
892
893 if (cur_mlx5_mode == SRIOV_NONE)
894 return -EOPNOTSUPP;
895
ef78618b 896 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
897 return -EINVAL;
898
899 if (cur_mlx5_mode == mlx5_mode)
900 return 0;
901
902 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
903 return esw_offloads_start(dev->priv.eswitch);
904 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
905 return esw_offloads_stop(dev->priv.eswitch);
906 else
907 return -EINVAL;
feae9087
OG
908}
909
910int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
911{
c930a3ad
OG
912 struct mlx5_core_dev *dev;
913
914 dev = devlink_priv(devlink);
915
916 if (!MLX5_CAP_GEN(dev, vport_group_manager))
917 return -EOPNOTSUPP;
918
919 if (dev->priv.eswitch->mode == SRIOV_NONE)
920 return -EOPNOTSUPP;
921
ef78618b 922 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 923}
127ea380 924
bffaa916
RD
925int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 struct mlx5_eswitch *esw = dev->priv.eswitch;
929 int num_vports = esw->enabled_vports;
930 int err;
931 int vport;
932 u8 mlx5_mode;
933
934 if (!MLX5_CAP_GEN(dev, vport_group_manager))
935 return -EOPNOTSUPP;
936
937 if (esw->mode == SRIOV_NONE)
938 return -EOPNOTSUPP;
939
940 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
941 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
942 return -EOPNOTSUPP;
943
375f51e2
RD
944 if (esw->offloads.num_flows > 0) {
945 esw_warn(dev, "Can't set inline mode when flows are configured\n");
946 return -EOPNOTSUPP;
947 }
948
bffaa916
RD
949 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
950 if (err)
951 goto out;
952
953 for (vport = 1; vport < num_vports; vport++) {
954 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
955 if (err) {
956 esw_warn(dev, "Failed to set min inline on vport %d\n",
957 vport);
958 goto revert_inline_mode;
959 }
960 }
961
962 esw->offloads.inline_mode = mlx5_mode;
963 return 0;
964
965revert_inline_mode:
966 while (--vport > 0)
967 mlx5_modify_nic_vport_min_inline(dev,
968 vport,
969 esw->offloads.inline_mode);
970out:
971 return err;
972}
973
974int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
975{
976 struct mlx5_core_dev *dev = devlink_priv(devlink);
977 struct mlx5_eswitch *esw = dev->priv.eswitch;
978
979 if (!MLX5_CAP_GEN(dev, vport_group_manager))
980 return -EOPNOTSUPP;
981
982 if (esw->mode == SRIOV_NONE)
983 return -EOPNOTSUPP;
984
985 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
986 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
987 return -EOPNOTSUPP;
988
989 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
990}
991
992int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
993{
994 struct mlx5_core_dev *dev = esw->dev;
995 int vport;
996 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
997
998 if (!MLX5_CAP_GEN(dev, vport_group_manager))
999 return -EOPNOTSUPP;
1000
1001 if (esw->mode == SRIOV_NONE)
1002 return -EOPNOTSUPP;
1003
1004 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
1005 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1006 return -EOPNOTSUPP;
1007
1008 for (vport = 1; vport <= nvfs; vport++) {
1009 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1010 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1011 return -EINVAL;
1012 prev_mlx5_mode = mlx5_mode;
1013 }
1014
1015 *mode = mlx5_mode;
1016 return 0;
1017}
1018
127ea380 1019void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
1020 int vport_index,
1021 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
1022{
1023 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1024 struct mlx5_eswitch_rep *rep;
1025
1026 rep = &offloads->vport_reps[vport_index];
127ea380 1027
bac9b6aa
OG
1028 memset(rep, 0, sizeof(*rep));
1029
1030 rep->load = __rep->load;
1031 rep->unload = __rep->unload;
1032 rep->vport = __rep->vport;
726293f1 1033 rep->netdev = __rep->netdev;
bac9b6aa 1034 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 1035
9deb2241
OG
1036 INIT_LIST_HEAD(&rep->vport_sqs_list);
1037 rep->valid = true;
127ea380
HHZ
1038}
1039
1040void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1041 int vport_index)
127ea380
HHZ
1042{
1043 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1044 struct mlx5_eswitch_rep *rep;
1045
9deb2241 1046 rep = &offloads->vport_reps[vport_index];
cb67b832 1047
9deb2241 1048 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 1049 rep->unload(esw, rep);
127ea380 1050
9deb2241 1051 rep->valid = false;
127ea380 1052}
726293f1
HHZ
1053
1054struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1055{
1056#define UPLINK_REP_INDEX 0
1057 struct mlx5_esw_offload *offloads = &esw->offloads;
1058 struct mlx5_eswitch_rep *rep;
1059
1060 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1061 return rep->netdev;
1062}