]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Use u16 for Work Queue buffer strides offset
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
592d3651 51 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
e4ad91f2 53 struct mlx5_flow_table *ft = NULL;
3d80d1a2 54 struct mlx5_fc *counter = NULL;
74491de9 55 struct mlx5_flow_handle *rule;
592d3651 56 int j, i = 0;
3d80d1a2
OG
57 void *misc;
58
59 if (esw->mode != SRIOV_OFFLOADS)
60 return ERR_PTR(-EOPNOTSUPP);
61
e4ad91f2
CM
62 if (attr->mirror_count)
63 ft = esw->fdb_table.offloads.fwd_fdb;
64 else
65 ft = esw->fdb_table.offloads.fast_fdb;
66
6acfbf38
OG
67 flow_act.action = attr->action;
68 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 69 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
70 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
71 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
72 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
73 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
74 flow_act.vlan[0].vid = attr->vlan_vid[0];
75 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
76 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
77 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
78 flow_act.vlan[1].vid = attr->vlan_vid[1];
79 flow_act.vlan[1].prio = attr->vlan_prio[1];
80 }
6acfbf38 81 }
776b12b6 82
66958ed9 83 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
592d3651
CM
84 for (j = attr->mirror_count; j < attr->out_count; j++) {
85 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
86 dest[i].vport.num = attr->out_rep[j]->vport;
e4ad91f2
CM
87 dest[i].vport.vhca_id =
88 MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
89 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
592d3651 90 i++;
56e858df 91 }
e37a79e5 92 }
66958ed9 93 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 94 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
95 if (IS_ERR(counter)) {
96 rule = ERR_CAST(counter);
97 goto err_counter_alloc;
98 }
e37a79e5
MB
99 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
100 dest[i].counter = counter;
101 i++;
3d80d1a2
OG
102 }
103
104 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 105 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 106
10ff5359
SK
107 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
108 MLX5_SET(fte_match_set_misc, misc,
109 source_eswitch_owner_vhca_id,
110 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
111
3d80d1a2
OG
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
114 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
115 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
116 source_eswitch_owner_vhca_id);
3d80d1a2 117
38aa51c1
OG
118 if (attr->match_level == MLX5_MATCH_NONE)
119 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
120 else
121 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
122 MLX5_MATCH_MISC_PARAMETERS;
123
bbd00f7e
HHZ
124 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
125 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 126
aa24670e 127 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
128 flow_act.modify_id = attr->mod_hdr_id;
129
aa24670e 130 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 131 flow_act.encap_id = attr->encap_id;
a54e20b4 132
e4ad91f2 133 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
3d80d1a2 134 if (IS_ERR(rule))
aa0cbbae 135 goto err_add_rule;
375f51e2
RD
136 else
137 esw->offloads.num_flows++;
3d80d1a2
OG
138
139 return rule;
aa0cbbae
OG
140
141err_add_rule:
142 mlx5_fc_destroy(esw->dev, counter);
143err_counter_alloc:
144 return rule;
3d80d1a2
OG
145}
146
e4ad91f2
CM
147struct mlx5_flow_handle *
148mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
149 struct mlx5_flow_spec *spec,
150 struct mlx5_esw_flow_attr *attr)
151{
152 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
153 struct mlx5_flow_act flow_act = {0};
154 struct mlx5_flow_handle *rule;
155 void *misc;
156 int i;
157
158 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
159 for (i = 0; i < attr->mirror_count; i++) {
160 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
161 dest[i].vport.num = attr->out_rep[i]->vport;
162 dest[i].vport.vhca_id =
163 MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
164 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
165 }
166 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
167 dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
168 i++;
169
170 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
171 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
172
173 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
174 MLX5_SET(fte_match_set_misc, misc,
175 source_eswitch_owner_vhca_id,
176 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
177
178 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
179 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
180 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
181 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
182 source_eswitch_owner_vhca_id);
183
184 if (attr->match_level == MLX5_MATCH_NONE)
185 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
186 else
187 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
188 MLX5_MATCH_MISC_PARAMETERS;
189
190 rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
191
192 if (!IS_ERR(rule))
193 esw->offloads.num_flows++;
194
195 return rule;
196}
197
d85cdccb
OG
198void
199mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
200 struct mlx5_flow_handle *rule,
201 struct mlx5_esw_flow_attr *attr)
202{
203 struct mlx5_fc *counter = NULL;
204
aa0cbbae
OG
205 counter = mlx5_flow_rule_counter(rule);
206 mlx5_del_flow_rules(rule);
207 mlx5_fc_destroy(esw->dev, counter);
208 esw->offloads.num_flows--;
d85cdccb
OG
209}
210
f5f82476
OG
211static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
212{
213 struct mlx5_eswitch_rep *rep;
214 int vf_vport, err = 0;
215
216 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
217 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
218 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 219 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
220 continue;
221
222 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
223 if (err)
224 goto out;
225 }
226
227out:
228 return err;
229}
230
231static struct mlx5_eswitch_rep *
232esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
233{
234 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
235
236 in_rep = attr->in_rep;
592d3651 237 out_rep = attr->out_rep[0];
f5f82476
OG
238
239 if (push)
240 vport = in_rep;
241 else if (pop)
242 vport = out_rep;
243 else
244 vport = in_rep;
245
246 return vport;
247}
248
249static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
250 bool push, bool pop, bool fwd)
251{
252 struct mlx5_eswitch_rep *in_rep, *out_rep;
253
254 if ((push || pop) && !fwd)
255 goto out_notsupp;
256
257 in_rep = attr->in_rep;
592d3651 258 out_rep = attr->out_rep[0];
f5f82476
OG
259
260 if (push && in_rep->vport == FDB_UPLINK_VPORT)
261 goto out_notsupp;
262
263 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
264 goto out_notsupp;
265
266 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
267 if (!push && !pop && fwd)
268 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
269 goto out_notsupp;
270
271 /* protects against (1) setting rules with different vlans to push and
272 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
273 */
1482bd3d 274 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
275 goto out_notsupp;
276
277 return 0;
278
279out_notsupp:
9eb78923 280 return -EOPNOTSUPP;
f5f82476
OG
281}
282
283int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
284 struct mlx5_esw_flow_attr *attr)
285{
286 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
287 struct mlx5_eswitch_rep *vport = NULL;
288 bool push, pop, fwd;
289 int err = 0;
290
6acfbf38 291 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 292 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
293 return 0;
294
f5f82476
OG
295 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
296 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
297 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
298
299 err = esw_add_vlan_action_check(attr, push, pop, fwd);
300 if (err)
301 return err;
302
303 attr->vlan_handled = false;
304
305 vport = esw_vlan_action_get_vport(attr, push, pop);
306
307 if (!push && !pop && fwd) {
308 /* tracks VF --> wire rules without vlan push action */
592d3651 309 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
f5f82476
OG
310 vport->vlan_refcount++;
311 attr->vlan_handled = true;
312 }
313
314 return 0;
315 }
316
317 if (!push && !pop)
318 return 0;
319
320 if (!(offloads->vlan_push_pop_refcount)) {
321 /* it's the 1st vlan rule, apply global vlan pop policy */
322 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
323 if (err)
324 goto out;
325 }
326 offloads->vlan_push_pop_refcount++;
327
328 if (push) {
329 if (vport->vlan_refcount)
330 goto skip_set_push;
331
1482bd3d 332 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
333 SET_VLAN_INSERT | SET_VLAN_STRIP);
334 if (err)
335 goto out;
1482bd3d 336 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
337skip_set_push:
338 vport->vlan_refcount++;
339 }
340out:
341 if (!err)
342 attr->vlan_handled = true;
343 return err;
344}
345
346int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
347 struct mlx5_esw_flow_attr *attr)
348{
349 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
350 struct mlx5_eswitch_rep *vport = NULL;
351 bool push, pop, fwd;
352 int err = 0;
353
6acfbf38 354 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 355 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
356 return 0;
357
f5f82476
OG
358 if (!attr->vlan_handled)
359 return 0;
360
361 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
362 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
363 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
364
365 vport = esw_vlan_action_get_vport(attr, push, pop);
366
367 if (!push && !pop && fwd) {
368 /* tracks VF --> wire rules without vlan push action */
592d3651 369 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
f5f82476
OG
370 vport->vlan_refcount--;
371
372 return 0;
373 }
374
375 if (push) {
376 vport->vlan_refcount--;
377 if (vport->vlan_refcount)
378 goto skip_unset_push;
379
380 vport->vlan = 0;
381 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
382 0, 0, SET_VLAN_STRIP);
383 if (err)
384 goto out;
385 }
386
387skip_unset_push:
388 offloads->vlan_push_pop_refcount--;
389 if (offloads->vlan_push_pop_refcount)
390 return 0;
391
392 /* no more vlan rules, stop global vlan pop policy */
393 err = esw_set_global_vlan_pop(esw, 0);
394
395out:
396 return err;
397}
398
f7a68945 399struct mlx5_flow_handle *
ab22be9b
OG
400mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
401{
66958ed9 402 struct mlx5_flow_act flow_act = {0};
4c5009c5 403 struct mlx5_flow_destination dest = {};
74491de9 404 struct mlx5_flow_handle *flow_rule;
c5bb1730 405 struct mlx5_flow_spec *spec;
ab22be9b
OG
406 void *misc;
407
1b9a07ee 408 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 409 if (!spec) {
ab22be9b
OG
410 flow_rule = ERR_PTR(-ENOMEM);
411 goto out;
412 }
413
c5bb1730 414 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
415 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
416 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
417
c5bb1730 418 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
419 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
420 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
421
c5bb1730 422 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 423 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 424 dest.vport.num = vport;
66958ed9 425 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 426
52fff327 427 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 428 &flow_act, &dest, 1);
ab22be9b
OG
429 if (IS_ERR(flow_rule))
430 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
431out:
c5bb1730 432 kvfree(spec);
ab22be9b
OG
433 return flow_rule;
434}
57cbd893 435EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 436
159fe639
MB
437void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
438{
439 mlx5_del_flow_rules(rule);
440}
441
3aa33572
OG
442static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
443{
66958ed9 444 struct mlx5_flow_act flow_act = {0};
4c5009c5 445 struct mlx5_flow_destination dest = {};
74491de9 446 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 447 struct mlx5_flow_spec *spec;
f80be543
MB
448 void *headers_c;
449 void *headers_v;
3aa33572 450 int err = 0;
f80be543
MB
451 u8 *dmac_c;
452 u8 *dmac_v;
3aa33572 453
1b9a07ee 454 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 455 if (!spec) {
3aa33572
OG
456 err = -ENOMEM;
457 goto out;
458 }
459
f80be543
MB
460 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
461 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
462 outer_headers);
463 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
464 outer_headers.dmac_47_16);
465 dmac_c[0] = 0x01;
466
3aa33572 467 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 468 dest.vport.num = 0;
66958ed9 469 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 470
52fff327 471 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 472 &flow_act, &dest, 1);
3aa33572
OG
473 if (IS_ERR(flow_rule)) {
474 err = PTR_ERR(flow_rule);
f80be543 475 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
476 goto out;
477 }
478
f80be543
MB
479 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
480
481 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
482 outer_headers);
483 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
484 outer_headers.dmac_47_16);
485 dmac_v[0] = 0x01;
52fff327 486 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
487 &flow_act, &dest, 1);
488 if (IS_ERR(flow_rule)) {
489 err = PTR_ERR(flow_rule);
490 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
491 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
492 goto out;
493 }
494
495 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
496
3aa33572 497out:
c5bb1730 498 kvfree(spec);
3aa33572
OG
499 return err;
500}
501
1033665e 502#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 503
1967ce6e 504static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 505{
69697b6e
OG
506 struct mlx5_core_dev *dev = esw->dev;
507 struct mlx5_flow_namespace *root_ns;
508 struct mlx5_flow_table *fdb = NULL;
1967ce6e 509 int esw_size, err = 0;
bbd00f7e 510 u32 flags = 0;
a8ffcc74
RL
511 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
512 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 513
69697b6e
OG
514 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
515 if (!root_ns) {
516 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 517 err = -EOPNOTSUPP;
a842dd04 518 goto out_namespace;
69697b6e
OG
519 }
520
264d7bf3
OG
521 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
522 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 523 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 524
a8ffcc74 525 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 526 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 527
a842dd04
CM
528 if (mlx5_esw_has_fwd_fdb(dev))
529 esw_size >>= 1;
530
7768d197 531 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
bbd00f7e
HHZ
532 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
533
1033665e 534 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 535 esw_size,
c9f1b073 536 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 537 flags);
69697b6e
OG
538 if (IS_ERR(fdb)) {
539 err = PTR_ERR(fdb);
1033665e 540 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
a842dd04 541 goto out_namespace;
69697b6e 542 }
52fff327 543 esw->fdb_table.offloads.fast_fdb = fdb;
69697b6e 544
a842dd04
CM
545 if (!mlx5_esw_has_fwd_fdb(dev))
546 goto out_namespace;
547
548 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
549 esw_size,
550 ESW_OFFLOADS_NUM_GROUPS, 1,
551 flags);
552 if (IS_ERR(fdb)) {
553 err = PTR_ERR(fdb);
554 esw_warn(dev, "Failed to create fwd table err %d\n", err);
555 goto out_ft;
556 }
557 esw->fdb_table.offloads.fwd_fdb = fdb;
558
559 return err;
560
561out_ft:
562 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
563out_namespace:
1967ce6e
OG
564 return err;
565}
566
567static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
568{
a842dd04
CM
569 if (mlx5_esw_has_fwd_fdb(esw->dev))
570 mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
52fff327 571 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
1967ce6e
OG
572}
573
574#define MAX_PF_SQ 256
cd3d07e7 575#define MAX_SQ_NVPORTS 32
1967ce6e
OG
576
577static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
578{
579 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
580 struct mlx5_flow_table_attr ft_attr = {};
581 struct mlx5_core_dev *dev = esw->dev;
582 struct mlx5_flow_namespace *root_ns;
583 struct mlx5_flow_table *fdb = NULL;
584 int table_size, ix, err = 0;
585 struct mlx5_flow_group *g;
586 void *match_criteria;
587 u32 *flow_group_in;
f80be543 588 u8 *dmac;
1967ce6e
OG
589
590 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 591 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
592 if (!flow_group_in)
593 return -ENOMEM;
594
595 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
596 if (!root_ns) {
597 esw_warn(dev, "Failed to get FDB flow namespace\n");
598 err = -EOPNOTSUPP;
599 goto ns_err;
600 }
601
602 err = esw_create_offloads_fast_fdb_table(esw);
603 if (err)
604 goto fast_fdb_err;
605
f80be543 606 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
b3ba5149
ES
607
608 ft_attr.max_fte = table_size;
609 ft_attr.prio = FDB_SLOW_PATH;
610
611 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
612 if (IS_ERR(fdb)) {
613 err = PTR_ERR(fdb);
614 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
615 goto slow_fdb_err;
616 }
52fff327 617 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 618
69697b6e
OG
619 /* create send-to-vport group */
620 memset(flow_group_in, 0, inlen);
621 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
622 MLX5_MATCH_MISC_PARAMETERS);
623
624 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
625
626 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
627 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
628
cd3d07e7 629 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
630 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
631 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
632
633 g = mlx5_create_flow_group(fdb, flow_group_in);
634 if (IS_ERR(g)) {
635 err = PTR_ERR(g);
636 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
637 goto send_vport_err;
638 }
639 esw->fdb_table.offloads.send_to_vport_grp = g;
640
641 /* create miss group */
642 memset(flow_group_in, 0, inlen);
f80be543
MB
643 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
644 MLX5_MATCH_OUTER_HEADERS);
645 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
646 match_criteria);
647 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
648 outer_headers.dmac_47_16);
649 dmac[0] = 0x01;
69697b6e
OG
650
651 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 652 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
653
654 g = mlx5_create_flow_group(fdb, flow_group_in);
655 if (IS_ERR(g)) {
656 err = PTR_ERR(g);
657 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
658 goto miss_err;
659 }
660 esw->fdb_table.offloads.miss_grp = g;
661
3aa33572
OG
662 err = esw_add_fdb_miss_rule(esw);
663 if (err)
664 goto miss_rule_err;
665
69697b6e
OG
666 return 0;
667
3aa33572
OG
668miss_rule_err:
669 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
670miss_err:
671 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
672send_vport_err:
52fff327 673 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 674slow_fdb_err:
a842dd04 675 esw_destroy_offloads_fast_fdb_table(esw);
1033665e 676fast_fdb_err:
69697b6e
OG
677ns_err:
678 kvfree(flow_group_in);
679 return err;
680}
681
1967ce6e 682static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 683{
52fff327 684 if (!esw->fdb_table.offloads.fast_fdb)
69697b6e
OG
685 return;
686
1967ce6e 687 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
688 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
689 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e
OG
690 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
691 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
692
52fff327 693 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1967ce6e 694 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 695}
c116c6ee
OG
696
697static int esw_create_offloads_table(struct mlx5_eswitch *esw)
698{
b3ba5149 699 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 700 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
701 struct mlx5_flow_table *ft_offloads;
702 struct mlx5_flow_namespace *ns;
c116c6ee
OG
703 int err = 0;
704
705 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
706 if (!ns) {
707 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 708 return -EOPNOTSUPP;
c116c6ee
OG
709 }
710
b3ba5149
ES
711 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
712
713 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
714 if (IS_ERR(ft_offloads)) {
715 err = PTR_ERR(ft_offloads);
716 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
717 return err;
718 }
719
720 esw->offloads.ft_offloads = ft_offloads;
721 return 0;
722}
723
724static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
725{
726 struct mlx5_esw_offload *offloads = &esw->offloads;
727
728 mlx5_destroy_flow_table(offloads->ft_offloads);
729}
fed9ce22
OG
730
731static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
732{
733 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
734 struct mlx5_flow_group *g;
735 struct mlx5_priv *priv = &esw->dev->priv;
736 u32 *flow_group_in;
737 void *match_criteria, *misc;
738 int err = 0;
739 int nvports = priv->sriov.num_vfs + 2;
740
1b9a07ee 741 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
742 if (!flow_group_in)
743 return -ENOMEM;
744
745 /* create vport rx group */
746 memset(flow_group_in, 0, inlen);
747 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
748 MLX5_MATCH_MISC_PARAMETERS);
749
750 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
751 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
752 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
753
754 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
755 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
756
757 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
758
759 if (IS_ERR(g)) {
760 err = PTR_ERR(g);
761 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
762 goto out;
763 }
764
765 esw->offloads.vport_rx_group = g;
766out:
e574978a 767 kvfree(flow_group_in);
fed9ce22
OG
768 return err;
769}
770
771static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
772{
773 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
774}
775
74491de9 776struct mlx5_flow_handle *
fed9ce22
OG
777mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
778{
66958ed9 779 struct mlx5_flow_act flow_act = {0};
4c5009c5 780 struct mlx5_flow_destination dest = {};
74491de9 781 struct mlx5_flow_handle *flow_rule;
c5bb1730 782 struct mlx5_flow_spec *spec;
fed9ce22
OG
783 void *misc;
784
1b9a07ee 785 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 786 if (!spec) {
fed9ce22
OG
787 flow_rule = ERR_PTR(-ENOMEM);
788 goto out;
789 }
790
c5bb1730 791 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
792 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
793
c5bb1730 794 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
795 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
796
c5bb1730 797 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
798 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
799 dest.tir_num = tirn;
800
66958ed9 801 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 802 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
e53eef63 803 &flow_act, &dest, 1);
fed9ce22
OG
804 if (IS_ERR(flow_rule)) {
805 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
806 goto out;
807 }
808
809out:
c5bb1730 810 kvfree(spec);
fed9ce22
OG
811 return flow_rule;
812}
feae9087 813
c930a3ad
OG
814static int esw_offloads_start(struct mlx5_eswitch *esw)
815{
6c419ba8 816 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
817
818 if (esw->mode != SRIOV_LEGACY) {
819 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
820 return -EINVAL;
821 }
822
823 mlx5_eswitch_disable_sriov(esw);
824 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
825 if (err) {
826 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
827 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
828 if (err1)
5403dc70 829 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 830 }
bffaa916
RD
831 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
832 if (mlx5_eswitch_inline_mode_get(esw,
833 num_vfs,
834 &esw->offloads.inline_mode)) {
835 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
836 esw_warn(esw->dev, "Inline mode is different between vports\n");
837 }
838 }
c930a3ad
OG
839 return err;
840}
841
e8d31c4d
MB
842void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
843{
844 kfree(esw->offloads.vport_reps);
845}
846
847int esw_offloads_init_reps(struct mlx5_eswitch *esw)
848{
849 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
850 struct mlx5_core_dev *dev = esw->dev;
851 struct mlx5_esw_offload *offloads;
852 struct mlx5_eswitch_rep *rep;
853 u8 hw_id[ETH_ALEN];
854 int vport;
855
856 esw->offloads.vport_reps = kcalloc(total_vfs,
857 sizeof(struct mlx5_eswitch_rep),
858 GFP_KERNEL);
859 if (!esw->offloads.vport_reps)
860 return -ENOMEM;
861
862 offloads = &esw->offloads;
863 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
864
865 for (vport = 0; vport < total_vfs; vport++) {
866 rep = &offloads->vport_reps[vport];
867
868 rep->vport = vport;
869 ether_addr_copy(rep->hw_id, hw_id);
870 }
871
872 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
873
874 return 0;
875}
876
a4b97ab4
MB
877static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
878 u8 rep_type)
6ed1803a
MB
879{
880 struct mlx5_eswitch_rep *rep;
881 int vport;
882
883 for (vport = nvports - 1; vport >= 0; vport--) {
884 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 885 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
886 continue;
887
a4b97ab4 888 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
889 }
890}
891
a4b97ab4
MB
892static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
893{
894 u8 rep_type = NUM_REP_TYPES;
895
896 while (rep_type-- > 0)
897 esw_offloads_unload_reps_type(esw, nvports, rep_type);
898}
899
900static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
901 u8 rep_type)
c930a3ad 902{
cb67b832
HHZ
903 struct mlx5_eswitch_rep *rep;
904 int vport;
c930a3ad
OG
905 int err;
906
6ed1803a
MB
907 for (vport = 0; vport < nvports; vport++) {
908 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 909 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
910 continue;
911
a4b97ab4 912 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
913 if (err)
914 goto err_reps;
915 }
916
917 return 0;
918
919err_reps:
a4b97ab4
MB
920 esw_offloads_unload_reps_type(esw, vport, rep_type);
921 return err;
922}
923
924static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
925{
926 u8 rep_type = 0;
927 int err;
928
929 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
930 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
931 if (err)
932 goto err_reps;
933 }
934
935 return err;
936
937err_reps:
938 while (rep_type-- > 0)
939 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
940 return err;
941}
942
943int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
944{
945 int err;
946
1967ce6e 947 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 948 if (err)
c5447c70 949 return err;
c930a3ad
OG
950
951 err = esw_create_offloads_table(esw);
952 if (err)
953 goto create_ft_err;
954
955 err = esw_create_vport_rx_group(esw);
956 if (err)
957 goto create_fg_err;
958
6ed1803a
MB
959 err = esw_offloads_load_reps(esw, nvports);
960 if (err)
961 goto err_reps;
9da34cd3 962
c930a3ad
OG
963 return 0;
964
cb67b832 965err_reps:
cb67b832
HHZ
966 esw_destroy_vport_rx_group(esw);
967
c930a3ad
OG
968create_fg_err:
969 esw_destroy_offloads_table(esw);
970
971create_ft_err:
1967ce6e 972 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 973
c930a3ad
OG
974 return err;
975}
976
977static int esw_offloads_stop(struct mlx5_eswitch *esw)
978{
6c419ba8 979 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
980
981 mlx5_eswitch_disable_sriov(esw);
982 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
983 if (err) {
984 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
985 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
986 if (err1)
987 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
988 }
c930a3ad 989
5bae8c03 990 /* enable back PF RoCE */
c5447c70 991 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5bae8c03 992
c930a3ad
OG
993 return err;
994}
995
996void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
997{
6ed1803a 998 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
999 esw_destroy_vport_rx_group(esw);
1000 esw_destroy_offloads_table(esw);
1967ce6e 1001 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
1002}
1003
ef78618b 1004static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1005{
1006 switch (mode) {
1007 case DEVLINK_ESWITCH_MODE_LEGACY:
1008 *mlx5_mode = SRIOV_LEGACY;
1009 break;
1010 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1011 *mlx5_mode = SRIOV_OFFLOADS;
1012 break;
1013 default:
1014 return -EINVAL;
1015 }
1016
1017 return 0;
1018}
1019
ef78618b
OG
1020static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1021{
1022 switch (mlx5_mode) {
1023 case SRIOV_LEGACY:
1024 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1025 break;
1026 case SRIOV_OFFLOADS:
1027 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1028 break;
1029 default:
1030 return -EINVAL;
1031 }
1032
1033 return 0;
1034}
1035
bffaa916
RD
1036static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1037{
1038 switch (mode) {
1039 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1040 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1041 break;
1042 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1043 *mlx5_mode = MLX5_INLINE_MODE_L2;
1044 break;
1045 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1046 *mlx5_mode = MLX5_INLINE_MODE_IP;
1047 break;
1048 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1049 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1050 break;
1051 default:
1052 return -EINVAL;
1053 }
1054
1055 return 0;
1056}
1057
1058static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1059{
1060 switch (mlx5_mode) {
1061 case MLX5_INLINE_MODE_NONE:
1062 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1063 break;
1064 case MLX5_INLINE_MODE_L2:
1065 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1066 break;
1067 case MLX5_INLINE_MODE_IP:
1068 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1069 break;
1070 case MLX5_INLINE_MODE_TCP_UDP:
1071 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1072 break;
1073 default:
1074 return -EINVAL;
1075 }
1076
1077 return 0;
1078}
1079
9d1cef19 1080static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1081{
9d1cef19 1082 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1083
9d1cef19
OG
1084 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1085 return -EOPNOTSUPP;
c930a3ad 1086
733d3e54
OG
1087 if(!MLX5_ESWITCH_MANAGER(dev))
1088 return -EPERM;
c930a3ad 1089
9d1cef19 1090 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
1091 return -EOPNOTSUPP;
1092
9d1cef19
OG
1093 return 0;
1094}
1095
1096int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
1097{
1098 struct mlx5_core_dev *dev = devlink_priv(devlink);
1099 u16 cur_mlx5_mode, mlx5_mode = 0;
1100 int err;
1101
1102 err = mlx5_devlink_eswitch_check(devlink);
1103 if (err)
1104 return err;
1105
1106 cur_mlx5_mode = dev->priv.eswitch->mode;
1107
ef78618b 1108 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1109 return -EINVAL;
1110
1111 if (cur_mlx5_mode == mlx5_mode)
1112 return 0;
1113
1114 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1115 return esw_offloads_start(dev->priv.eswitch);
1116 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1117 return esw_offloads_stop(dev->priv.eswitch);
1118 else
1119 return -EINVAL;
feae9087
OG
1120}
1121
1122int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1123{
9d1cef19
OG
1124 struct mlx5_core_dev *dev = devlink_priv(devlink);
1125 int err;
c930a3ad 1126
9d1cef19
OG
1127 err = mlx5_devlink_eswitch_check(devlink);
1128 if (err)
1129 return err;
c930a3ad 1130
ef78618b 1131 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1132}
127ea380 1133
bffaa916
RD
1134int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1135{
1136 struct mlx5_core_dev *dev = devlink_priv(devlink);
1137 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1138 int err, vport;
bffaa916
RD
1139 u8 mlx5_mode;
1140
9d1cef19
OG
1141 err = mlx5_devlink_eswitch_check(devlink);
1142 if (err)
1143 return err;
bffaa916 1144
c415f704
OG
1145 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1146 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1147 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1148 return 0;
1149 /* fall through */
1150 case MLX5_CAP_INLINE_MODE_L2:
1151 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 1152 return -EOPNOTSUPP;
c415f704
OG
1153 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1154 break;
1155 }
bffaa916 1156
375f51e2
RD
1157 if (esw->offloads.num_flows > 0) {
1158 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1159 return -EOPNOTSUPP;
1160 }
1161
bffaa916
RD
1162 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1163 if (err)
1164 goto out;
1165
9d1cef19 1166 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1167 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1168 if (err) {
1169 esw_warn(dev, "Failed to set min inline on vport %d\n",
1170 vport);
1171 goto revert_inline_mode;
1172 }
1173 }
1174
1175 esw->offloads.inline_mode = mlx5_mode;
1176 return 0;
1177
1178revert_inline_mode:
1179 while (--vport > 0)
1180 mlx5_modify_nic_vport_min_inline(dev,
1181 vport,
1182 esw->offloads.inline_mode);
1183out:
1184 return err;
1185}
1186
1187int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1188{
1189 struct mlx5_core_dev *dev = devlink_priv(devlink);
1190 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1191 int err;
bffaa916 1192
9d1cef19
OG
1193 err = mlx5_devlink_eswitch_check(devlink);
1194 if (err)
1195 return err;
bffaa916 1196
bffaa916
RD
1197 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1198}
1199
1200int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1201{
c415f704 1202 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1203 struct mlx5_core_dev *dev = esw->dev;
1204 int vport;
bffaa916
RD
1205
1206 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1207 return -EOPNOTSUPP;
1208
1209 if (esw->mode == SRIOV_NONE)
1210 return -EOPNOTSUPP;
1211
c415f704
OG
1212 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1213 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1214 mlx5_mode = MLX5_INLINE_MODE_NONE;
1215 goto out;
1216 case MLX5_CAP_INLINE_MODE_L2:
1217 mlx5_mode = MLX5_INLINE_MODE_L2;
1218 goto out;
1219 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1220 goto query_vports;
1221 }
bffaa916 1222
c415f704 1223query_vports:
bffaa916
RD
1224 for (vport = 1; vport <= nvfs; vport++) {
1225 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1226 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1227 return -EINVAL;
1228 prev_mlx5_mode = mlx5_mode;
1229 }
1230
c415f704 1231out:
bffaa916
RD
1232 *mode = mlx5_mode;
1233 return 0;
1234}
1235
7768d197
RD
1236int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1237{
1238 struct mlx5_core_dev *dev = devlink_priv(devlink);
1239 struct mlx5_eswitch *esw = dev->priv.eswitch;
1240 int err;
1241
9d1cef19
OG
1242 err = mlx5_devlink_eswitch_check(devlink);
1243 if (err)
1244 return err;
7768d197
RD
1245
1246 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1247 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1248 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1249 return -EOPNOTSUPP;
1250
1251 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1252 return -EOPNOTSUPP;
1253
1254 if (esw->mode == SRIOV_LEGACY) {
1255 esw->offloads.encap = encap;
1256 return 0;
1257 }
1258
1259 if (esw->offloads.encap == encap)
1260 return 0;
1261
1262 if (esw->offloads.num_flows > 0) {
1263 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1264 return -EOPNOTSUPP;
1265 }
1266
1267 esw_destroy_offloads_fast_fdb_table(esw);
1268
1269 esw->offloads.encap = encap;
1270 err = esw_create_offloads_fast_fdb_table(esw);
1271 if (err) {
1272 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1273 esw->offloads.encap = !encap;
2fe30e23 1274 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1275 }
1276 return err;
1277}
1278
1279int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1280{
1281 struct mlx5_core_dev *dev = devlink_priv(devlink);
1282 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1283 int err;
7768d197 1284
9d1cef19
OG
1285 err = mlx5_devlink_eswitch_check(devlink);
1286 if (err)
1287 return err;
7768d197
RD
1288
1289 *encap = esw->offloads.encap;
1290 return 0;
1291}
1292
127ea380 1293void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1294 int vport_index,
a4b97ab4
MB
1295 struct mlx5_eswitch_rep_if *__rep_if,
1296 u8 rep_type)
127ea380
HHZ
1297{
1298 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1299 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1300
a4b97ab4 1301 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1302
a4b97ab4
MB
1303 rep_if->load = __rep_if->load;
1304 rep_if->unload = __rep_if->unload;
22215908 1305 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1306 rep_if->priv = __rep_if->priv;
127ea380 1307
a4b97ab4 1308 rep_if->valid = true;
127ea380 1309}
57cbd893 1310EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1311
1312void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1313 int vport_index, u8 rep_type)
127ea380
HHZ
1314{
1315 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1316 struct mlx5_eswitch_rep *rep;
1317
9deb2241 1318 rep = &offloads->vport_reps[vport_index];
cb67b832 1319
9deb2241 1320 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1321 rep->rep_if[rep_type].unload(rep);
127ea380 1322
a4b97ab4 1323 rep->rep_if[rep_type].valid = false;
127ea380 1324}
57cbd893 1325EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1326
a4b97ab4 1327void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1328{
1329#define UPLINK_REP_INDEX 0
1330 struct mlx5_esw_offload *offloads = &esw->offloads;
1331 struct mlx5_eswitch_rep *rep;
1332
1333 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1334 return rep->rep_if[rep_type].priv;
726293f1 1335}
22215908
MB
1336
1337void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1338 int vport,
1339 u8 rep_type)
1340{
1341 struct mlx5_esw_offload *offloads = &esw->offloads;
1342 struct mlx5_eswitch_rep *rep;
1343
1344 if (vport == FDB_UPLINK_VPORT)
1345 vport = UPLINK_REP_INDEX;
1346
1347 rep = &offloads->vport_reps[vport];
1348
1349 if (rep->rep_if[rep_type].valid &&
1350 rep->rep_if[rep_type].get_proto_dev)
1351 return rep->rep_if[rep_type].get_proto_dev(rep);
1352 return NULL;
1353}
57cbd893 1354EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1355
1356void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1357{
1358 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1359}
57cbd893
MB
1360EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1361
1362struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1363 int vport)
1364{
1365 return &esw->offloads.vport_reps[vport];
1366}
1367EXPORT_SYMBOL(mlx5_eswitch_vport_rep);