]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: Add interface down dropped packets statistics
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
d7e75a32
OG
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 95 flow_act.encap_id = attr->encap_id;
a54e20b4 96
74491de9 97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 98 spec, &flow_act, dest, i);
3d80d1a2 99 if (IS_ERR(rule))
aa0cbbae 100 goto err_add_rule;
375f51e2
RD
101 else
102 esw->offloads.num_flows++;
3d80d1a2
OG
103
104 return rule;
aa0cbbae
OG
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
3d80d1a2
OG
110}
111
d85cdccb
OG
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
aa0cbbae
OG
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
d85cdccb
OG
123}
124
f5f82476
OG
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 133 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
9eb78923 194 return -EOPNOTSUPP;
f5f82476
OG
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
f7a68945 305struct mlx5_flow_handle *
ab22be9b
OG
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
66958ed9 308 struct mlx5_flow_act flow_act = {0};
4c5009c5 309 struct mlx5_flow_destination dest = {};
74491de9 310 struct mlx5_flow_handle *flow_rule;
c5bb1730 311 struct mlx5_flow_spec *spec;
ab22be9b
OG
312 void *misc;
313
1b9a07ee 314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 315 if (!spec) {
ab22be9b
OG
316 flow_rule = ERR_PTR(-ENOMEM);
317 goto out;
318 }
319
c5bb1730 320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
321 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
322 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
323
c5bb1730 324 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
327
c5bb1730 328 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
330 dest.vport_num = vport;
66958ed9 331 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 332
74491de9 333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 334 &flow_act, &dest, 1);
ab22be9b
OG
335 if (IS_ERR(flow_rule))
336 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
337out:
c5bb1730 338 kvfree(spec);
ab22be9b
OG
339 return flow_rule;
340}
57cbd893 341EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 342
159fe639
MB
343void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
344{
345 mlx5_del_flow_rules(rule);
346}
347
3aa33572
OG
348static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
349{
66958ed9 350 struct mlx5_flow_act flow_act = {0};
4c5009c5 351 struct mlx5_flow_destination dest = {};
74491de9 352 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 353 struct mlx5_flow_spec *spec;
f80be543
MB
354 void *headers_c;
355 void *headers_v;
3aa33572 356 int err = 0;
f80be543
MB
357 u8 *dmac_c;
358 u8 *dmac_v;
3aa33572 359
1b9a07ee 360 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 361 if (!spec) {
3aa33572
OG
362 err = -ENOMEM;
363 goto out;
364 }
365
f80be543
MB
366 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
367 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
368 outer_headers);
369 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
370 outer_headers.dmac_47_16);
371 dmac_c[0] = 0x01;
372
3aa33572
OG
373 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
374 dest.vport_num = 0;
66958ed9 375 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 376
74491de9 377 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 378 &flow_act, &dest, 1);
3aa33572
OG
379 if (IS_ERR(flow_rule)) {
380 err = PTR_ERR(flow_rule);
f80be543 381 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
382 goto out;
383 }
384
f80be543
MB
385 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
386
387 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
388 outer_headers);
389 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
390 outer_headers.dmac_47_16);
391 dmac_v[0] = 0x01;
392 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
393 &flow_act, &dest, 1);
394 if (IS_ERR(flow_rule)) {
395 err = PTR_ERR(flow_rule);
396 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
397 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
398 goto out;
399 }
400
401 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
402
3aa33572 403out:
c5bb1730 404 kvfree(spec);
3aa33572
OG
405 return err;
406}
407
1033665e 408#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 409
1967ce6e 410static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 411{
69697b6e
OG
412 struct mlx5_core_dev *dev = esw->dev;
413 struct mlx5_flow_namespace *root_ns;
414 struct mlx5_flow_table *fdb = NULL;
1967ce6e 415 int esw_size, err = 0;
bbd00f7e 416 u32 flags = 0;
a8ffcc74
RL
417 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
418 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 419
69697b6e
OG
420 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
421 if (!root_ns) {
422 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 423 err = -EOPNOTSUPP;
1967ce6e 424 goto out;
69697b6e
OG
425 }
426
264d7bf3
OG
427 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
428 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 429 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 430
a8ffcc74 431 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 432 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 433
7768d197 434 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
bbd00f7e
HHZ
435 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
436
1033665e 437 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 438 esw_size,
c9f1b073 439 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 440 flags);
69697b6e
OG
441 if (IS_ERR(fdb)) {
442 err = PTR_ERR(fdb);
1033665e 443 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
1967ce6e 444 goto out;
69697b6e
OG
445 }
446 esw->fdb_table.fdb = fdb;
447
1967ce6e
OG
448out:
449 return err;
450}
451
452static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
453{
454 mlx5_destroy_flow_table(esw->fdb_table.fdb);
455}
456
457#define MAX_PF_SQ 256
cd3d07e7 458#define MAX_SQ_NVPORTS 32
1967ce6e
OG
459
460static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
461{
462 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
463 struct mlx5_flow_table_attr ft_attr = {};
464 struct mlx5_core_dev *dev = esw->dev;
465 struct mlx5_flow_namespace *root_ns;
466 struct mlx5_flow_table *fdb = NULL;
467 int table_size, ix, err = 0;
468 struct mlx5_flow_group *g;
469 void *match_criteria;
470 u32 *flow_group_in;
f80be543 471 u8 *dmac;
1967ce6e
OG
472
473 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 474 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
475 if (!flow_group_in)
476 return -ENOMEM;
477
478 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
479 if (!root_ns) {
480 esw_warn(dev, "Failed to get FDB flow namespace\n");
481 err = -EOPNOTSUPP;
482 goto ns_err;
483 }
484
485 err = esw_create_offloads_fast_fdb_table(esw);
486 if (err)
487 goto fast_fdb_err;
488
f80be543 489 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
b3ba5149
ES
490
491 ft_attr.max_fte = table_size;
492 ft_attr.prio = FDB_SLOW_PATH;
493
494 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
495 if (IS_ERR(fdb)) {
496 err = PTR_ERR(fdb);
497 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
498 goto slow_fdb_err;
499 }
500 esw->fdb_table.offloads.fdb = fdb;
501
69697b6e
OG
502 /* create send-to-vport group */
503 memset(flow_group_in, 0, inlen);
504 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
505 MLX5_MATCH_MISC_PARAMETERS);
506
507 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
508
509 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
510 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
511
cd3d07e7 512 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
513 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
514 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
515
516 g = mlx5_create_flow_group(fdb, flow_group_in);
517 if (IS_ERR(g)) {
518 err = PTR_ERR(g);
519 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
520 goto send_vport_err;
521 }
522 esw->fdb_table.offloads.send_to_vport_grp = g;
523
524 /* create miss group */
525 memset(flow_group_in, 0, inlen);
f80be543
MB
526 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
527 MLX5_MATCH_OUTER_HEADERS);
528 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
529 match_criteria);
530 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
531 outer_headers.dmac_47_16);
532 dmac[0] = 0x01;
69697b6e
OG
533
534 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 535 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
536
537 g = mlx5_create_flow_group(fdb, flow_group_in);
538 if (IS_ERR(g)) {
539 err = PTR_ERR(g);
540 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
541 goto miss_err;
542 }
543 esw->fdb_table.offloads.miss_grp = g;
544
3aa33572
OG
545 err = esw_add_fdb_miss_rule(esw);
546 if (err)
547 goto miss_rule_err;
548
69697b6e
OG
549 return 0;
550
3aa33572
OG
551miss_rule_err:
552 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
553miss_err:
554 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
555send_vport_err:
1033665e
OG
556 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
557slow_fdb_err:
558 mlx5_destroy_flow_table(esw->fdb_table.fdb);
559fast_fdb_err:
69697b6e
OG
560ns_err:
561 kvfree(flow_group_in);
562 return err;
563}
564
1967ce6e 565static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e
OG
566{
567 if (!esw->fdb_table.fdb)
568 return;
569
1967ce6e 570 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
571 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
572 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e
OG
573 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
574 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
575
1033665e 576 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
1967ce6e 577 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 578}
c116c6ee
OG
579
580static int esw_create_offloads_table(struct mlx5_eswitch *esw)
581{
b3ba5149 582 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 583 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
584 struct mlx5_flow_table *ft_offloads;
585 struct mlx5_flow_namespace *ns;
c116c6ee
OG
586 int err = 0;
587
588 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
589 if (!ns) {
590 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 591 return -EOPNOTSUPP;
c116c6ee
OG
592 }
593
b3ba5149
ES
594 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
595
596 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
597 if (IS_ERR(ft_offloads)) {
598 err = PTR_ERR(ft_offloads);
599 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
600 return err;
601 }
602
603 esw->offloads.ft_offloads = ft_offloads;
604 return 0;
605}
606
607static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
608{
609 struct mlx5_esw_offload *offloads = &esw->offloads;
610
611 mlx5_destroy_flow_table(offloads->ft_offloads);
612}
fed9ce22
OG
613
614static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
615{
616 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
617 struct mlx5_flow_group *g;
618 struct mlx5_priv *priv = &esw->dev->priv;
619 u32 *flow_group_in;
620 void *match_criteria, *misc;
621 int err = 0;
622 int nvports = priv->sriov.num_vfs + 2;
623
1b9a07ee 624 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
625 if (!flow_group_in)
626 return -ENOMEM;
627
628 /* create vport rx group */
629 memset(flow_group_in, 0, inlen);
630 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
631 MLX5_MATCH_MISC_PARAMETERS);
632
633 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
634 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
635 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
636
637 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
638 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
639
640 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
641
642 if (IS_ERR(g)) {
643 err = PTR_ERR(g);
644 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
645 goto out;
646 }
647
648 esw->offloads.vport_rx_group = g;
649out:
650 kfree(flow_group_in);
651 return err;
652}
653
654static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
655{
656 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
657}
658
74491de9 659struct mlx5_flow_handle *
fed9ce22
OG
660mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
661{
66958ed9 662 struct mlx5_flow_act flow_act = {0};
4c5009c5 663 struct mlx5_flow_destination dest = {};
74491de9 664 struct mlx5_flow_handle *flow_rule;
c5bb1730 665 struct mlx5_flow_spec *spec;
fed9ce22
OG
666 void *misc;
667
1b9a07ee 668 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 669 if (!spec) {
fed9ce22
OG
670 flow_rule = ERR_PTR(-ENOMEM);
671 goto out;
672 }
673
c5bb1730 674 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
675 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
676
c5bb1730 677 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
678 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
679
c5bb1730 680 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
681 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
682 dest.tir_num = tirn;
683
66958ed9 684 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 685 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
e53eef63 686 &flow_act, &dest, 1);
fed9ce22
OG
687 if (IS_ERR(flow_rule)) {
688 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
689 goto out;
690 }
691
692out:
c5bb1730 693 kvfree(spec);
fed9ce22
OG
694 return flow_rule;
695}
feae9087 696
c930a3ad
OG
697static int esw_offloads_start(struct mlx5_eswitch *esw)
698{
6c419ba8 699 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
700
701 if (esw->mode != SRIOV_LEGACY) {
702 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
703 return -EINVAL;
704 }
705
706 mlx5_eswitch_disable_sriov(esw);
707 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
708 if (err) {
709 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
710 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
711 if (err1)
5403dc70 712 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 713 }
bffaa916
RD
714 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
715 if (mlx5_eswitch_inline_mode_get(esw,
716 num_vfs,
717 &esw->offloads.inline_mode)) {
718 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
719 esw_warn(esw->dev, "Inline mode is different between vports\n");
720 }
721 }
c930a3ad
OG
722 return err;
723}
724
e8d31c4d
MB
725void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
726{
727 kfree(esw->offloads.vport_reps);
728}
729
730int esw_offloads_init_reps(struct mlx5_eswitch *esw)
731{
732 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
733 struct mlx5_core_dev *dev = esw->dev;
734 struct mlx5_esw_offload *offloads;
735 struct mlx5_eswitch_rep *rep;
736 u8 hw_id[ETH_ALEN];
737 int vport;
738
739 esw->offloads.vport_reps = kcalloc(total_vfs,
740 sizeof(struct mlx5_eswitch_rep),
741 GFP_KERNEL);
742 if (!esw->offloads.vport_reps)
743 return -ENOMEM;
744
745 offloads = &esw->offloads;
746 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
747
748 for (vport = 0; vport < total_vfs; vport++) {
749 rep = &offloads->vport_reps[vport];
750
751 rep->vport = vport;
752 ether_addr_copy(rep->hw_id, hw_id);
753 }
754
755 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
756
757 return 0;
758}
759
a4b97ab4
MB
760static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
761 u8 rep_type)
6ed1803a
MB
762{
763 struct mlx5_eswitch_rep *rep;
764 int vport;
765
766 for (vport = nvports - 1; vport >= 0; vport--) {
767 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 768 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
769 continue;
770
a4b97ab4 771 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
772 }
773}
774
a4b97ab4
MB
775static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
776{
777 u8 rep_type = NUM_REP_TYPES;
778
779 while (rep_type-- > 0)
780 esw_offloads_unload_reps_type(esw, nvports, rep_type);
781}
782
783static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
784 u8 rep_type)
c930a3ad 785{
cb67b832
HHZ
786 struct mlx5_eswitch_rep *rep;
787 int vport;
c930a3ad
OG
788 int err;
789
6ed1803a
MB
790 for (vport = 0; vport < nvports; vport++) {
791 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 792 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
793 continue;
794
a4b97ab4 795 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
796 if (err)
797 goto err_reps;
798 }
799
800 return 0;
801
802err_reps:
a4b97ab4
MB
803 esw_offloads_unload_reps_type(esw, vport, rep_type);
804 return err;
805}
806
807static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
808{
809 u8 rep_type = 0;
810 int err;
811
812 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
813 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
814 if (err)
815 goto err_reps;
816 }
817
818 return err;
819
820err_reps:
821 while (rep_type-- > 0)
822 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
823 return err;
824}
825
826int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
827{
828 int err;
829
1967ce6e 830 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 831 if (err)
c5447c70 832 return err;
c930a3ad
OG
833
834 err = esw_create_offloads_table(esw);
835 if (err)
836 goto create_ft_err;
837
838 err = esw_create_vport_rx_group(esw);
839 if (err)
840 goto create_fg_err;
841
6ed1803a
MB
842 err = esw_offloads_load_reps(esw, nvports);
843 if (err)
844 goto err_reps;
9da34cd3 845
c930a3ad
OG
846 return 0;
847
cb67b832 848err_reps:
cb67b832
HHZ
849 esw_destroy_vport_rx_group(esw);
850
c930a3ad
OG
851create_fg_err:
852 esw_destroy_offloads_table(esw);
853
854create_ft_err:
1967ce6e 855 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 856
c930a3ad
OG
857 return err;
858}
859
860static int esw_offloads_stop(struct mlx5_eswitch *esw)
861{
6c419ba8 862 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
863
864 mlx5_eswitch_disable_sriov(esw);
865 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
866 if (err) {
867 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
868 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
869 if (err1)
870 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
871 }
c930a3ad 872
5bae8c03 873 /* enable back PF RoCE */
c5447c70 874 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5bae8c03 875
c930a3ad
OG
876 return err;
877}
878
879void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
880{
6ed1803a 881 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
882 esw_destroy_vport_rx_group(esw);
883 esw_destroy_offloads_table(esw);
1967ce6e 884 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
885}
886
ef78618b 887static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
888{
889 switch (mode) {
890 case DEVLINK_ESWITCH_MODE_LEGACY:
891 *mlx5_mode = SRIOV_LEGACY;
892 break;
893 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
894 *mlx5_mode = SRIOV_OFFLOADS;
895 break;
896 default:
897 return -EINVAL;
898 }
899
900 return 0;
901}
902
ef78618b
OG
903static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
904{
905 switch (mlx5_mode) {
906 case SRIOV_LEGACY:
907 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
908 break;
909 case SRIOV_OFFLOADS:
910 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
911 break;
912 default:
913 return -EINVAL;
914 }
915
916 return 0;
917}
918
bffaa916
RD
919static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
920{
921 switch (mode) {
922 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
923 *mlx5_mode = MLX5_INLINE_MODE_NONE;
924 break;
925 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
926 *mlx5_mode = MLX5_INLINE_MODE_L2;
927 break;
928 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
929 *mlx5_mode = MLX5_INLINE_MODE_IP;
930 break;
931 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
932 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
933 break;
934 default:
935 return -EINVAL;
936 }
937
938 return 0;
939}
940
941static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
942{
943 switch (mlx5_mode) {
944 case MLX5_INLINE_MODE_NONE:
945 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
946 break;
947 case MLX5_INLINE_MODE_L2:
948 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
949 break;
950 case MLX5_INLINE_MODE_IP:
951 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
952 break;
953 case MLX5_INLINE_MODE_TCP_UDP:
954 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
955 break;
956 default:
957 return -EINVAL;
958 }
959
960 return 0;
961}
962
9d1cef19 963static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 964{
9d1cef19 965 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 966
9d1cef19
OG
967 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
968 return -EOPNOTSUPP;
c930a3ad
OG
969
970 if (!MLX5_CAP_GEN(dev, vport_group_manager))
971 return -EOPNOTSUPP;
972
9d1cef19 973 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
974 return -EOPNOTSUPP;
975
9d1cef19
OG
976 return 0;
977}
978
979int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
980{
981 struct mlx5_core_dev *dev = devlink_priv(devlink);
982 u16 cur_mlx5_mode, mlx5_mode = 0;
983 int err;
984
985 err = mlx5_devlink_eswitch_check(devlink);
986 if (err)
987 return err;
988
989 cur_mlx5_mode = dev->priv.eswitch->mode;
990
ef78618b 991 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
992 return -EINVAL;
993
994 if (cur_mlx5_mode == mlx5_mode)
995 return 0;
996
997 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
998 return esw_offloads_start(dev->priv.eswitch);
999 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1000 return esw_offloads_stop(dev->priv.eswitch);
1001 else
1002 return -EINVAL;
feae9087
OG
1003}
1004
1005int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1006{
9d1cef19
OG
1007 struct mlx5_core_dev *dev = devlink_priv(devlink);
1008 int err;
c930a3ad 1009
9d1cef19
OG
1010 err = mlx5_devlink_eswitch_check(devlink);
1011 if (err)
1012 return err;
c930a3ad 1013
ef78618b 1014 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1015}
127ea380 1016
bffaa916
RD
1017int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1018{
1019 struct mlx5_core_dev *dev = devlink_priv(devlink);
1020 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1021 int err, vport;
bffaa916
RD
1022 u8 mlx5_mode;
1023
9d1cef19
OG
1024 err = mlx5_devlink_eswitch_check(devlink);
1025 if (err)
1026 return err;
bffaa916 1027
c415f704
OG
1028 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1029 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1030 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1031 return 0;
1032 /* fall through */
1033 case MLX5_CAP_INLINE_MODE_L2:
1034 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 1035 return -EOPNOTSUPP;
c415f704
OG
1036 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1037 break;
1038 }
bffaa916 1039
375f51e2
RD
1040 if (esw->offloads.num_flows > 0) {
1041 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1042 return -EOPNOTSUPP;
1043 }
1044
bffaa916
RD
1045 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1046 if (err)
1047 goto out;
1048
9d1cef19 1049 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1050 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1051 if (err) {
1052 esw_warn(dev, "Failed to set min inline on vport %d\n",
1053 vport);
1054 goto revert_inline_mode;
1055 }
1056 }
1057
1058 esw->offloads.inline_mode = mlx5_mode;
1059 return 0;
1060
1061revert_inline_mode:
1062 while (--vport > 0)
1063 mlx5_modify_nic_vport_min_inline(dev,
1064 vport,
1065 esw->offloads.inline_mode);
1066out:
1067 return err;
1068}
1069
1070int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1071{
1072 struct mlx5_core_dev *dev = devlink_priv(devlink);
1073 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1074 int err;
bffaa916 1075
9d1cef19
OG
1076 err = mlx5_devlink_eswitch_check(devlink);
1077 if (err)
1078 return err;
bffaa916 1079
bffaa916
RD
1080 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1081}
1082
1083int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1084{
c415f704 1085 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1086 struct mlx5_core_dev *dev = esw->dev;
1087 int vport;
bffaa916
RD
1088
1089 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1090 return -EOPNOTSUPP;
1091
1092 if (esw->mode == SRIOV_NONE)
1093 return -EOPNOTSUPP;
1094
c415f704
OG
1095 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1096 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1097 mlx5_mode = MLX5_INLINE_MODE_NONE;
1098 goto out;
1099 case MLX5_CAP_INLINE_MODE_L2:
1100 mlx5_mode = MLX5_INLINE_MODE_L2;
1101 goto out;
1102 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1103 goto query_vports;
1104 }
bffaa916 1105
c415f704 1106query_vports:
bffaa916
RD
1107 for (vport = 1; vport <= nvfs; vport++) {
1108 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1109 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1110 return -EINVAL;
1111 prev_mlx5_mode = mlx5_mode;
1112 }
1113
c415f704 1114out:
bffaa916
RD
1115 *mode = mlx5_mode;
1116 return 0;
1117}
1118
7768d197
RD
1119int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1120{
1121 struct mlx5_core_dev *dev = devlink_priv(devlink);
1122 struct mlx5_eswitch *esw = dev->priv.eswitch;
1123 int err;
1124
9d1cef19
OG
1125 err = mlx5_devlink_eswitch_check(devlink);
1126 if (err)
1127 return err;
7768d197
RD
1128
1129 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1130 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1131 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1132 return -EOPNOTSUPP;
1133
1134 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1135 return -EOPNOTSUPP;
1136
1137 if (esw->mode == SRIOV_LEGACY) {
1138 esw->offloads.encap = encap;
1139 return 0;
1140 }
1141
1142 if (esw->offloads.encap == encap)
1143 return 0;
1144
1145 if (esw->offloads.num_flows > 0) {
1146 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1147 return -EOPNOTSUPP;
1148 }
1149
1150 esw_destroy_offloads_fast_fdb_table(esw);
1151
1152 esw->offloads.encap = encap;
1153 err = esw_create_offloads_fast_fdb_table(esw);
1154 if (err) {
1155 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1156 esw->offloads.encap = !encap;
2fe30e23 1157 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1158 }
1159 return err;
1160}
1161
1162int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1163{
1164 struct mlx5_core_dev *dev = devlink_priv(devlink);
1165 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1166 int err;
7768d197 1167
9d1cef19
OG
1168 err = mlx5_devlink_eswitch_check(devlink);
1169 if (err)
1170 return err;
7768d197
RD
1171
1172 *encap = esw->offloads.encap;
1173 return 0;
1174}
1175
127ea380 1176void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1177 int vport_index,
a4b97ab4
MB
1178 struct mlx5_eswitch_rep_if *__rep_if,
1179 u8 rep_type)
127ea380
HHZ
1180{
1181 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1182 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1183
a4b97ab4 1184 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1185
a4b97ab4
MB
1186 rep_if->load = __rep_if->load;
1187 rep_if->unload = __rep_if->unload;
22215908 1188 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1189 rep_if->priv = __rep_if->priv;
127ea380 1190
a4b97ab4 1191 rep_if->valid = true;
127ea380 1192}
57cbd893 1193EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1194
1195void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1196 int vport_index, u8 rep_type)
127ea380
HHZ
1197{
1198 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1199 struct mlx5_eswitch_rep *rep;
1200
9deb2241 1201 rep = &offloads->vport_reps[vport_index];
cb67b832 1202
9deb2241 1203 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1204 rep->rep_if[rep_type].unload(rep);
127ea380 1205
a4b97ab4 1206 rep->rep_if[rep_type].valid = false;
127ea380 1207}
57cbd893 1208EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1209
a4b97ab4 1210void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1211{
1212#define UPLINK_REP_INDEX 0
1213 struct mlx5_esw_offload *offloads = &esw->offloads;
1214 struct mlx5_eswitch_rep *rep;
1215
1216 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1217 return rep->rep_if[rep_type].priv;
726293f1 1218}
22215908
MB
1219
1220void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1221 int vport,
1222 u8 rep_type)
1223{
1224 struct mlx5_esw_offload *offloads = &esw->offloads;
1225 struct mlx5_eswitch_rep *rep;
1226
1227 if (vport == FDB_UPLINK_VPORT)
1228 vport = UPLINK_REP_INDEX;
1229
1230 rep = &offloads->vport_reps[vport];
1231
1232 if (rep->rep_if[rep_type].valid &&
1233 rep->rep_if[rep_type].get_proto_dev)
1234 return rep->rep_if[rep_type].get_proto_dev(rep);
1235 return NULL;
1236}
57cbd893 1237EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1238
1239void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1240{
1241 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1242}
57cbd893
MB
1243EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1244
1245struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1246 int vport)
1247{
1248 return &esw->offloads.vport_reps[vport];
1249}
1250EXPORT_SYMBOL(mlx5_eswitch_vport_rep);