]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Split FDB fast path prio to multiple namespaces
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
592d3651 51 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
e4ad91f2 53 struct mlx5_flow_table *ft = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
592d3651 55 int j, i = 0;
3d80d1a2
OG
56 void *misc;
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
e4ad91f2
CM
61 if (attr->mirror_count)
62 ft = esw->fdb_table.offloads.fwd_fdb;
63 else
64 ft = esw->fdb_table.offloads.fast_fdb;
65
6acfbf38
OG
66 flow_act.action = attr->action;
67 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 68 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
69 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
70 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
71 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
72 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
73 flow_act.vlan[0].vid = attr->vlan_vid[0];
74 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
75 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
76 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
77 flow_act.vlan[1].vid = attr->vlan_vid[1];
78 flow_act.vlan[1].prio = attr->vlan_prio[1];
79 }
6acfbf38 80 }
776b12b6 81
66958ed9 82 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
592d3651
CM
83 for (j = attr->mirror_count; j < attr->out_count; j++) {
84 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
85 dest[i].vport.num = attr->out_rep[j]->vport;
e4ad91f2
CM
86 dest[i].vport.vhca_id =
87 MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
88 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
592d3651 89 i++;
56e858df 90 }
e37a79e5 91 }
66958ed9 92 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 93 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 94 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 95 i++;
3d80d1a2
OG
96 }
97
98 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 99 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 100
10ff5359
SK
101 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
102 MLX5_SET(fte_match_set_misc, misc,
103 source_eswitch_owner_vhca_id,
104 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
105
3d80d1a2
OG
106 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
107 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
108 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
109 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
110 source_eswitch_owner_vhca_id);
3d80d1a2 111
38aa51c1
OG
112 if (attr->match_level == MLX5_MATCH_NONE)
113 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
114 else
115 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
116 MLX5_MATCH_MISC_PARAMETERS;
117
bbd00f7e
HHZ
118 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
119 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 120
aa24670e 121 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
122 flow_act.modify_id = attr->mod_hdr_id;
123
60786f09
MB
124 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
125 flow_act.reformat_id = attr->encap_id;
a54e20b4 126
e4ad91f2 127 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
3d80d1a2 128 if (IS_ERR(rule))
b8aee822 129 goto out;
375f51e2
RD
130 else
131 esw->offloads.num_flows++;
3d80d1a2 132
b8aee822 133out:
aa0cbbae 134 return rule;
3d80d1a2
OG
135}
136
e4ad91f2
CM
137struct mlx5_flow_handle *
138mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
139 struct mlx5_flow_spec *spec,
140 struct mlx5_esw_flow_attr *attr)
141{
142 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
143 struct mlx5_flow_act flow_act = {0};
144 struct mlx5_flow_handle *rule;
145 void *misc;
146 int i;
147
148 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
149 for (i = 0; i < attr->mirror_count; i++) {
150 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
151 dest[i].vport.num = attr->out_rep[i]->vport;
152 dest[i].vport.vhca_id =
153 MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
154 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
155 }
156 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
157 dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
158 i++;
159
160 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
161 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
162
163 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
164 MLX5_SET(fte_match_set_misc, misc,
165 source_eswitch_owner_vhca_id,
166 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
167
168 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
169 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
170 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
171 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
172 source_eswitch_owner_vhca_id);
173
174 if (attr->match_level == MLX5_MATCH_NONE)
175 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
176 else
177 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
178 MLX5_MATCH_MISC_PARAMETERS;
179
180 rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
181
182 if (!IS_ERR(rule))
183 esw->offloads.num_flows++;
184
185 return rule;
186}
187
d85cdccb
OG
188void
189mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
190 struct mlx5_flow_handle *rule,
191 struct mlx5_esw_flow_attr *attr)
192{
aa0cbbae 193 mlx5_del_flow_rules(rule);
aa0cbbae 194 esw->offloads.num_flows--;
d85cdccb
OG
195}
196
f5f82476
OG
197static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
198{
199 struct mlx5_eswitch_rep *rep;
200 int vf_vport, err = 0;
201
202 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
203 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
204 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 205 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
206 continue;
207
208 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
209 if (err)
210 goto out;
211 }
212
213out:
214 return err;
215}
216
217static struct mlx5_eswitch_rep *
218esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
219{
220 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
221
222 in_rep = attr->in_rep;
592d3651 223 out_rep = attr->out_rep[0];
f5f82476
OG
224
225 if (push)
226 vport = in_rep;
227 else if (pop)
228 vport = out_rep;
229 else
230 vport = in_rep;
231
232 return vport;
233}
234
235static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
236 bool push, bool pop, bool fwd)
237{
238 struct mlx5_eswitch_rep *in_rep, *out_rep;
239
240 if ((push || pop) && !fwd)
241 goto out_notsupp;
242
243 in_rep = attr->in_rep;
592d3651 244 out_rep = attr->out_rep[0];
f5f82476
OG
245
246 if (push && in_rep->vport == FDB_UPLINK_VPORT)
247 goto out_notsupp;
248
249 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
250 goto out_notsupp;
251
252 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
253 if (!push && !pop && fwd)
254 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
255 goto out_notsupp;
256
257 /* protects against (1) setting rules with different vlans to push and
258 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
259 */
1482bd3d 260 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
261 goto out_notsupp;
262
263 return 0;
264
265out_notsupp:
9eb78923 266 return -EOPNOTSUPP;
f5f82476
OG
267}
268
269int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
270 struct mlx5_esw_flow_attr *attr)
271{
272 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
273 struct mlx5_eswitch_rep *vport = NULL;
274 bool push, pop, fwd;
275 int err = 0;
276
6acfbf38 277 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 278 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
279 return 0;
280
f5f82476
OG
281 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
282 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
283 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
284
285 err = esw_add_vlan_action_check(attr, push, pop, fwd);
286 if (err)
287 return err;
288
289 attr->vlan_handled = false;
290
291 vport = esw_vlan_action_get_vport(attr, push, pop);
292
293 if (!push && !pop && fwd) {
294 /* tracks VF --> wire rules without vlan push action */
592d3651 295 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
f5f82476
OG
296 vport->vlan_refcount++;
297 attr->vlan_handled = true;
298 }
299
300 return 0;
301 }
302
303 if (!push && !pop)
304 return 0;
305
306 if (!(offloads->vlan_push_pop_refcount)) {
307 /* it's the 1st vlan rule, apply global vlan pop policy */
308 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
309 if (err)
310 goto out;
311 }
312 offloads->vlan_push_pop_refcount++;
313
314 if (push) {
315 if (vport->vlan_refcount)
316 goto skip_set_push;
317
1482bd3d 318 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
319 SET_VLAN_INSERT | SET_VLAN_STRIP);
320 if (err)
321 goto out;
1482bd3d 322 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
323skip_set_push:
324 vport->vlan_refcount++;
325 }
326out:
327 if (!err)
328 attr->vlan_handled = true;
329 return err;
330}
331
332int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
333 struct mlx5_esw_flow_attr *attr)
334{
335 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
336 struct mlx5_eswitch_rep *vport = NULL;
337 bool push, pop, fwd;
338 int err = 0;
339
6acfbf38 340 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 341 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
342 return 0;
343
f5f82476
OG
344 if (!attr->vlan_handled)
345 return 0;
346
347 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
348 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
349 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
350
351 vport = esw_vlan_action_get_vport(attr, push, pop);
352
353 if (!push && !pop && fwd) {
354 /* tracks VF --> wire rules without vlan push action */
592d3651 355 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
f5f82476
OG
356 vport->vlan_refcount--;
357
358 return 0;
359 }
360
361 if (push) {
362 vport->vlan_refcount--;
363 if (vport->vlan_refcount)
364 goto skip_unset_push;
365
366 vport->vlan = 0;
367 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
368 0, 0, SET_VLAN_STRIP);
369 if (err)
370 goto out;
371 }
372
373skip_unset_push:
374 offloads->vlan_push_pop_refcount--;
375 if (offloads->vlan_push_pop_refcount)
376 return 0;
377
378 /* no more vlan rules, stop global vlan pop policy */
379 err = esw_set_global_vlan_pop(esw, 0);
380
381out:
382 return err;
383}
384
f7a68945 385struct mlx5_flow_handle *
ab22be9b
OG
386mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
387{
66958ed9 388 struct mlx5_flow_act flow_act = {0};
4c5009c5 389 struct mlx5_flow_destination dest = {};
74491de9 390 struct mlx5_flow_handle *flow_rule;
c5bb1730 391 struct mlx5_flow_spec *spec;
ab22be9b
OG
392 void *misc;
393
1b9a07ee 394 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 395 if (!spec) {
ab22be9b
OG
396 flow_rule = ERR_PTR(-ENOMEM);
397 goto out;
398 }
399
c5bb1730 400 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
401 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
402 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
403
c5bb1730 404 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
405 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
406 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
407
c5bb1730 408 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 409 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 410 dest.vport.num = vport;
66958ed9 411 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 412
52fff327 413 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 414 &flow_act, &dest, 1);
ab22be9b
OG
415 if (IS_ERR(flow_rule))
416 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
417out:
c5bb1730 418 kvfree(spec);
ab22be9b
OG
419 return flow_rule;
420}
57cbd893 421EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 422
159fe639
MB
423void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
424{
425 mlx5_del_flow_rules(rule);
426}
427
3aa33572
OG
428static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
429{
66958ed9 430 struct mlx5_flow_act flow_act = {0};
4c5009c5 431 struct mlx5_flow_destination dest = {};
74491de9 432 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 433 struct mlx5_flow_spec *spec;
f80be543
MB
434 void *headers_c;
435 void *headers_v;
3aa33572 436 int err = 0;
f80be543
MB
437 u8 *dmac_c;
438 u8 *dmac_v;
3aa33572 439
1b9a07ee 440 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 441 if (!spec) {
3aa33572
OG
442 err = -ENOMEM;
443 goto out;
444 }
445
f80be543
MB
446 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
447 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
448 outer_headers);
449 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
450 outer_headers.dmac_47_16);
451 dmac_c[0] = 0x01;
452
3aa33572 453 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 454 dest.vport.num = 0;
66958ed9 455 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 456
52fff327 457 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 458 &flow_act, &dest, 1);
3aa33572
OG
459 if (IS_ERR(flow_rule)) {
460 err = PTR_ERR(flow_rule);
f80be543 461 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
462 goto out;
463 }
464
f80be543
MB
465 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
466
467 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
468 outer_headers);
469 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
470 outer_headers.dmac_47_16);
471 dmac_v[0] = 0x01;
52fff327 472 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
473 &flow_act, &dest, 1);
474 if (IS_ERR(flow_rule)) {
475 err = PTR_ERR(flow_rule);
476 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
477 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
478 goto out;
479 }
480
481 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
482
3aa33572 483out:
c5bb1730 484 kvfree(spec);
3aa33572
OG
485 return err;
486}
487
1033665e 488#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 489
1967ce6e 490static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 491{
69697b6e
OG
492 struct mlx5_core_dev *dev = esw->dev;
493 struct mlx5_flow_namespace *root_ns;
494 struct mlx5_flow_table *fdb = NULL;
1967ce6e 495 int esw_size, err = 0;
bbd00f7e 496 u32 flags = 0;
a8ffcc74
RL
497 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
498 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 499
69697b6e
OG
500 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
501 if (!root_ns) {
502 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 503 err = -EOPNOTSUPP;
a842dd04 504 goto out_namespace;
69697b6e
OG
505 }
506
264d7bf3
OG
507 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
508 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 509 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 510
a8ffcc74 511 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 512 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 513
a842dd04
CM
514 if (mlx5_esw_has_fwd_fdb(dev))
515 esw_size >>= 1;
516
7768d197 517 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 518 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 519 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 520
1033665e 521 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 522 esw_size,
c9f1b073 523 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 524 flags);
69697b6e
OG
525 if (IS_ERR(fdb)) {
526 err = PTR_ERR(fdb);
1033665e 527 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
a842dd04 528 goto out_namespace;
69697b6e 529 }
52fff327 530 esw->fdb_table.offloads.fast_fdb = fdb;
69697b6e 531
a842dd04
CM
532 if (!mlx5_esw_has_fwd_fdb(dev))
533 goto out_namespace;
534
535 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
536 esw_size,
537 ESW_OFFLOADS_NUM_GROUPS, 1,
538 flags);
539 if (IS_ERR(fdb)) {
540 err = PTR_ERR(fdb);
541 esw_warn(dev, "Failed to create fwd table err %d\n", err);
542 goto out_ft;
543 }
544 esw->fdb_table.offloads.fwd_fdb = fdb;
545
546 return err;
547
548out_ft:
549 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
550out_namespace:
1967ce6e
OG
551 return err;
552}
553
554static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
555{
a842dd04
CM
556 if (mlx5_esw_has_fwd_fdb(esw->dev))
557 mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
52fff327 558 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
1967ce6e
OG
559}
560
561#define MAX_PF_SQ 256
cd3d07e7 562#define MAX_SQ_NVPORTS 32
1967ce6e
OG
563
564static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
565{
566 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
567 struct mlx5_flow_table_attr ft_attr = {};
568 struct mlx5_core_dev *dev = esw->dev;
569 struct mlx5_flow_namespace *root_ns;
570 struct mlx5_flow_table *fdb = NULL;
571 int table_size, ix, err = 0;
572 struct mlx5_flow_group *g;
573 void *match_criteria;
574 u32 *flow_group_in;
f80be543 575 u8 *dmac;
1967ce6e
OG
576
577 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 578 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
579 if (!flow_group_in)
580 return -ENOMEM;
581
582 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
583 if (!root_ns) {
584 esw_warn(dev, "Failed to get FDB flow namespace\n");
585 err = -EOPNOTSUPP;
586 goto ns_err;
587 }
588
589 err = esw_create_offloads_fast_fdb_table(esw);
590 if (err)
591 goto fast_fdb_err;
592
f80be543 593 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
b3ba5149
ES
594
595 ft_attr.max_fte = table_size;
596 ft_attr.prio = FDB_SLOW_PATH;
597
598 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
599 if (IS_ERR(fdb)) {
600 err = PTR_ERR(fdb);
601 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
602 goto slow_fdb_err;
603 }
52fff327 604 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 605
69697b6e
OG
606 /* create send-to-vport group */
607 memset(flow_group_in, 0, inlen);
608 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
609 MLX5_MATCH_MISC_PARAMETERS);
610
611 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
612
613 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
614 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
615
cd3d07e7 616 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
617 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
618 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
619
620 g = mlx5_create_flow_group(fdb, flow_group_in);
621 if (IS_ERR(g)) {
622 err = PTR_ERR(g);
623 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
624 goto send_vport_err;
625 }
626 esw->fdb_table.offloads.send_to_vport_grp = g;
627
628 /* create miss group */
629 memset(flow_group_in, 0, inlen);
f80be543
MB
630 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
631 MLX5_MATCH_OUTER_HEADERS);
632 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
633 match_criteria);
634 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
635 outer_headers.dmac_47_16);
636 dmac[0] = 0x01;
69697b6e
OG
637
638 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 639 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
640
641 g = mlx5_create_flow_group(fdb, flow_group_in);
642 if (IS_ERR(g)) {
643 err = PTR_ERR(g);
644 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
645 goto miss_err;
646 }
647 esw->fdb_table.offloads.miss_grp = g;
648
3aa33572
OG
649 err = esw_add_fdb_miss_rule(esw);
650 if (err)
651 goto miss_rule_err;
652
c88a026e 653 kvfree(flow_group_in);
69697b6e
OG
654 return 0;
655
3aa33572
OG
656miss_rule_err:
657 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
658miss_err:
659 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
660send_vport_err:
52fff327 661 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 662slow_fdb_err:
a842dd04 663 esw_destroy_offloads_fast_fdb_table(esw);
1033665e 664fast_fdb_err:
69697b6e
OG
665ns_err:
666 kvfree(flow_group_in);
667 return err;
668}
669
1967ce6e 670static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 671{
52fff327 672 if (!esw->fdb_table.offloads.fast_fdb)
69697b6e
OG
673 return;
674
1967ce6e 675 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
676 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
677 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e
OG
678 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
679 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
680
52fff327 681 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1967ce6e 682 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 683}
c116c6ee
OG
684
685static int esw_create_offloads_table(struct mlx5_eswitch *esw)
686{
b3ba5149 687 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 688 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
689 struct mlx5_flow_table *ft_offloads;
690 struct mlx5_flow_namespace *ns;
c116c6ee
OG
691 int err = 0;
692
693 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
694 if (!ns) {
695 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 696 return -EOPNOTSUPP;
c116c6ee
OG
697 }
698
b3ba5149
ES
699 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
700
701 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
702 if (IS_ERR(ft_offloads)) {
703 err = PTR_ERR(ft_offloads);
704 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
705 return err;
706 }
707
708 esw->offloads.ft_offloads = ft_offloads;
709 return 0;
710}
711
712static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
713{
714 struct mlx5_esw_offload *offloads = &esw->offloads;
715
716 mlx5_destroy_flow_table(offloads->ft_offloads);
717}
fed9ce22
OG
718
719static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
720{
721 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
722 struct mlx5_flow_group *g;
723 struct mlx5_priv *priv = &esw->dev->priv;
724 u32 *flow_group_in;
725 void *match_criteria, *misc;
726 int err = 0;
727 int nvports = priv->sriov.num_vfs + 2;
728
1b9a07ee 729 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
730 if (!flow_group_in)
731 return -ENOMEM;
732
733 /* create vport rx group */
734 memset(flow_group_in, 0, inlen);
735 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
736 MLX5_MATCH_MISC_PARAMETERS);
737
738 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
739 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
740 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
741
742 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
743 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
744
745 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
746
747 if (IS_ERR(g)) {
748 err = PTR_ERR(g);
749 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
750 goto out;
751 }
752
753 esw->offloads.vport_rx_group = g;
754out:
e574978a 755 kvfree(flow_group_in);
fed9ce22
OG
756 return err;
757}
758
759static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
760{
761 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
762}
763
74491de9 764struct mlx5_flow_handle *
c966f7d5
GT
765mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
766 struct mlx5_flow_destination *dest)
fed9ce22 767{
66958ed9 768 struct mlx5_flow_act flow_act = {0};
74491de9 769 struct mlx5_flow_handle *flow_rule;
c5bb1730 770 struct mlx5_flow_spec *spec;
fed9ce22
OG
771 void *misc;
772
1b9a07ee 773 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 774 if (!spec) {
fed9ce22
OG
775 flow_rule = ERR_PTR(-ENOMEM);
776 goto out;
777 }
778
c5bb1730 779 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
780 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
781
c5bb1730 782 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
783 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
784
c5bb1730 785 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 786
66958ed9 787 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 788 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 789 &flow_act, dest, 1);
fed9ce22
OG
790 if (IS_ERR(flow_rule)) {
791 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
792 goto out;
793 }
794
795out:
c5bb1730 796 kvfree(spec);
fed9ce22
OG
797 return flow_rule;
798}
feae9087 799
db7ff19e
EB
800static int esw_offloads_start(struct mlx5_eswitch *esw,
801 struct netlink_ext_ack *extack)
c930a3ad 802{
6c419ba8 803 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
804
805 if (esw->mode != SRIOV_LEGACY) {
8c98ee77
EB
806 NL_SET_ERR_MSG_MOD(extack,
807 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
808 return -EINVAL;
809 }
810
811 mlx5_eswitch_disable_sriov(esw);
812 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 813 if (err) {
8c98ee77
EB
814 NL_SET_ERR_MSG_MOD(extack,
815 "Failed setting eswitch to offloads");
6c419ba8 816 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
817 if (err1) {
818 NL_SET_ERR_MSG_MOD(extack,
819 "Failed setting eswitch back to legacy");
820 }
6c419ba8 821 }
bffaa916
RD
822 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
823 if (mlx5_eswitch_inline_mode_get(esw,
824 num_vfs,
825 &esw->offloads.inline_mode)) {
826 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
827 NL_SET_ERR_MSG_MOD(extack,
828 "Inline mode is different between vports");
bffaa916
RD
829 }
830 }
c930a3ad
OG
831 return err;
832}
833
e8d31c4d
MB
834void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
835{
836 kfree(esw->offloads.vport_reps);
837}
838
839int esw_offloads_init_reps(struct mlx5_eswitch *esw)
840{
841 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
842 struct mlx5_core_dev *dev = esw->dev;
843 struct mlx5_esw_offload *offloads;
844 struct mlx5_eswitch_rep *rep;
845 u8 hw_id[ETH_ALEN];
846 int vport;
847
848 esw->offloads.vport_reps = kcalloc(total_vfs,
849 sizeof(struct mlx5_eswitch_rep),
850 GFP_KERNEL);
851 if (!esw->offloads.vport_reps)
852 return -ENOMEM;
853
854 offloads = &esw->offloads;
855 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
856
857 for (vport = 0; vport < total_vfs; vport++) {
858 rep = &offloads->vport_reps[vport];
859
860 rep->vport = vport;
861 ether_addr_copy(rep->hw_id, hw_id);
862 }
863
864 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
865
866 return 0;
867}
868
a4b97ab4
MB
869static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
870 u8 rep_type)
6ed1803a
MB
871{
872 struct mlx5_eswitch_rep *rep;
873 int vport;
874
875 for (vport = nvports - 1; vport >= 0; vport--) {
876 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 877 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
878 continue;
879
a4b97ab4 880 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
881 }
882}
883
a4b97ab4
MB
884static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
885{
886 u8 rep_type = NUM_REP_TYPES;
887
888 while (rep_type-- > 0)
889 esw_offloads_unload_reps_type(esw, nvports, rep_type);
890}
891
892static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
893 u8 rep_type)
c930a3ad 894{
cb67b832
HHZ
895 struct mlx5_eswitch_rep *rep;
896 int vport;
c930a3ad
OG
897 int err;
898
6ed1803a
MB
899 for (vport = 0; vport < nvports; vport++) {
900 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 901 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
902 continue;
903
a4b97ab4 904 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
905 if (err)
906 goto err_reps;
907 }
908
909 return 0;
910
911err_reps:
a4b97ab4
MB
912 esw_offloads_unload_reps_type(esw, vport, rep_type);
913 return err;
914}
915
916static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
917{
918 u8 rep_type = 0;
919 int err;
920
921 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
922 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
923 if (err)
924 goto err_reps;
925 }
926
927 return err;
928
929err_reps:
930 while (rep_type-- > 0)
931 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
932 return err;
933}
934
935int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
936{
937 int err;
938
1967ce6e 939 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 940 if (err)
c5447c70 941 return err;
c930a3ad
OG
942
943 err = esw_create_offloads_table(esw);
944 if (err)
945 goto create_ft_err;
946
947 err = esw_create_vport_rx_group(esw);
948 if (err)
949 goto create_fg_err;
950
6ed1803a
MB
951 err = esw_offloads_load_reps(esw, nvports);
952 if (err)
953 goto err_reps;
9da34cd3 954
c930a3ad
OG
955 return 0;
956
cb67b832 957err_reps:
cb67b832
HHZ
958 esw_destroy_vport_rx_group(esw);
959
c930a3ad
OG
960create_fg_err:
961 esw_destroy_offloads_table(esw);
962
963create_ft_err:
1967ce6e 964 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 965
c930a3ad
OG
966 return err;
967}
968
db7ff19e
EB
969static int esw_offloads_stop(struct mlx5_eswitch *esw,
970 struct netlink_ext_ack *extack)
c930a3ad 971{
6c419ba8 972 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
973
974 mlx5_eswitch_disable_sriov(esw);
975 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 976 if (err) {
8c98ee77 977 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 978 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
979 if (err1) {
980 NL_SET_ERR_MSG_MOD(extack,
981 "Failed setting eswitch back to offloads");
982 }
6c419ba8 983 }
c930a3ad 984
5bae8c03 985 /* enable back PF RoCE */
c5447c70 986 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5bae8c03 987
c930a3ad
OG
988 return err;
989}
990
991void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
992{
6ed1803a 993 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
994 esw_destroy_vport_rx_group(esw);
995 esw_destroy_offloads_table(esw);
1967ce6e 996 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
997}
998
ef78618b 999static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1000{
1001 switch (mode) {
1002 case DEVLINK_ESWITCH_MODE_LEGACY:
1003 *mlx5_mode = SRIOV_LEGACY;
1004 break;
1005 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1006 *mlx5_mode = SRIOV_OFFLOADS;
1007 break;
1008 default:
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013}
1014
ef78618b
OG
1015static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1016{
1017 switch (mlx5_mode) {
1018 case SRIOV_LEGACY:
1019 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1020 break;
1021 case SRIOV_OFFLOADS:
1022 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1023 break;
1024 default:
1025 return -EINVAL;
1026 }
1027
1028 return 0;
1029}
1030
bffaa916
RD
1031static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1032{
1033 switch (mode) {
1034 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1035 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1036 break;
1037 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1038 *mlx5_mode = MLX5_INLINE_MODE_L2;
1039 break;
1040 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1041 *mlx5_mode = MLX5_INLINE_MODE_IP;
1042 break;
1043 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1044 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1045 break;
1046 default:
1047 return -EINVAL;
1048 }
1049
1050 return 0;
1051}
1052
1053static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1054{
1055 switch (mlx5_mode) {
1056 case MLX5_INLINE_MODE_NONE:
1057 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1058 break;
1059 case MLX5_INLINE_MODE_L2:
1060 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1061 break;
1062 case MLX5_INLINE_MODE_IP:
1063 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1064 break;
1065 case MLX5_INLINE_MODE_TCP_UDP:
1066 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1067 break;
1068 default:
1069 return -EINVAL;
1070 }
1071
1072 return 0;
1073}
1074
9d1cef19 1075static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1076{
9d1cef19 1077 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1078
9d1cef19
OG
1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1080 return -EOPNOTSUPP;
c930a3ad 1081
733d3e54
OG
1082 if(!MLX5_ESWITCH_MANAGER(dev))
1083 return -EPERM;
c930a3ad 1084
9d1cef19 1085 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
1086 return -EOPNOTSUPP;
1087
9d1cef19
OG
1088 return 0;
1089}
1090
db7ff19e
EB
1091int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1092 struct netlink_ext_ack *extack)
9d1cef19
OG
1093{
1094 struct mlx5_core_dev *dev = devlink_priv(devlink);
1095 u16 cur_mlx5_mode, mlx5_mode = 0;
1096 int err;
1097
1098 err = mlx5_devlink_eswitch_check(devlink);
1099 if (err)
1100 return err;
1101
1102 cur_mlx5_mode = dev->priv.eswitch->mode;
1103
ef78618b 1104 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1105 return -EINVAL;
1106
1107 if (cur_mlx5_mode == mlx5_mode)
1108 return 0;
1109
1110 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 1111 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 1112 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 1113 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
1114 else
1115 return -EINVAL;
feae9087
OG
1116}
1117
1118int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1119{
9d1cef19
OG
1120 struct mlx5_core_dev *dev = devlink_priv(devlink);
1121 int err;
c930a3ad 1122
9d1cef19
OG
1123 err = mlx5_devlink_eswitch_check(devlink);
1124 if (err)
1125 return err;
c930a3ad 1126
ef78618b 1127 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1128}
127ea380 1129
db7ff19e
EB
1130int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1131 struct netlink_ext_ack *extack)
bffaa916
RD
1132{
1133 struct mlx5_core_dev *dev = devlink_priv(devlink);
1134 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1135 int err, vport;
bffaa916
RD
1136 u8 mlx5_mode;
1137
9d1cef19
OG
1138 err = mlx5_devlink_eswitch_check(devlink);
1139 if (err)
1140 return err;
bffaa916 1141
c415f704
OG
1142 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1143 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1144 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1145 return 0;
1146 /* fall through */
1147 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 1148 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 1149 return -EOPNOTSUPP;
c415f704
OG
1150 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1151 break;
1152 }
bffaa916 1153
375f51e2 1154 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1155 NL_SET_ERR_MSG_MOD(extack,
1156 "Can't set inline mode when flows are configured");
375f51e2
RD
1157 return -EOPNOTSUPP;
1158 }
1159
bffaa916
RD
1160 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1161 if (err)
1162 goto out;
1163
9d1cef19 1164 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1165 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1166 if (err) {
8c98ee77
EB
1167 NL_SET_ERR_MSG_MOD(extack,
1168 "Failed to set min inline on vport");
bffaa916
RD
1169 goto revert_inline_mode;
1170 }
1171 }
1172
1173 esw->offloads.inline_mode = mlx5_mode;
1174 return 0;
1175
1176revert_inline_mode:
1177 while (--vport > 0)
1178 mlx5_modify_nic_vport_min_inline(dev,
1179 vport,
1180 esw->offloads.inline_mode);
1181out:
1182 return err;
1183}
1184
1185int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1186{
1187 struct mlx5_core_dev *dev = devlink_priv(devlink);
1188 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1189 int err;
bffaa916 1190
9d1cef19
OG
1191 err = mlx5_devlink_eswitch_check(devlink);
1192 if (err)
1193 return err;
bffaa916 1194
bffaa916
RD
1195 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1196}
1197
1198int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1199{
c415f704 1200 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1201 struct mlx5_core_dev *dev = esw->dev;
1202 int vport;
bffaa916
RD
1203
1204 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1205 return -EOPNOTSUPP;
1206
1207 if (esw->mode == SRIOV_NONE)
1208 return -EOPNOTSUPP;
1209
c415f704
OG
1210 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1211 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1212 mlx5_mode = MLX5_INLINE_MODE_NONE;
1213 goto out;
1214 case MLX5_CAP_INLINE_MODE_L2:
1215 mlx5_mode = MLX5_INLINE_MODE_L2;
1216 goto out;
1217 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1218 goto query_vports;
1219 }
bffaa916 1220
c415f704 1221query_vports:
bffaa916
RD
1222 for (vport = 1; vport <= nvfs; vport++) {
1223 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1224 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1225 return -EINVAL;
1226 prev_mlx5_mode = mlx5_mode;
1227 }
1228
c415f704 1229out:
bffaa916
RD
1230 *mode = mlx5_mode;
1231 return 0;
1232}
1233
db7ff19e
EB
1234int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
1235 struct netlink_ext_ack *extack)
7768d197
RD
1236{
1237 struct mlx5_core_dev *dev = devlink_priv(devlink);
1238 struct mlx5_eswitch *esw = dev->priv.eswitch;
1239 int err;
1240
9d1cef19
OG
1241 err = mlx5_devlink_eswitch_check(devlink);
1242 if (err)
1243 return err;
7768d197
RD
1244
1245 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 1246 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
1247 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1248 return -EOPNOTSUPP;
1249
1250 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1251 return -EOPNOTSUPP;
1252
1253 if (esw->mode == SRIOV_LEGACY) {
1254 esw->offloads.encap = encap;
1255 return 0;
1256 }
1257
1258 if (esw->offloads.encap == encap)
1259 return 0;
1260
1261 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1262 NL_SET_ERR_MSG_MOD(extack,
1263 "Can't set encapsulation when flows are configured");
7768d197
RD
1264 return -EOPNOTSUPP;
1265 }
1266
1267 esw_destroy_offloads_fast_fdb_table(esw);
1268
1269 esw->offloads.encap = encap;
1270 err = esw_create_offloads_fast_fdb_table(esw);
1271 if (err) {
8c98ee77
EB
1272 NL_SET_ERR_MSG_MOD(extack,
1273 "Failed re-creating fast FDB table");
7768d197 1274 esw->offloads.encap = !encap;
2fe30e23 1275 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1276 }
1277 return err;
1278}
1279
1280int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1281{
1282 struct mlx5_core_dev *dev = devlink_priv(devlink);
1283 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1284 int err;
7768d197 1285
9d1cef19
OG
1286 err = mlx5_devlink_eswitch_check(devlink);
1287 if (err)
1288 return err;
7768d197
RD
1289
1290 *encap = esw->offloads.encap;
1291 return 0;
1292}
1293
127ea380 1294void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1295 int vport_index,
a4b97ab4
MB
1296 struct mlx5_eswitch_rep_if *__rep_if,
1297 u8 rep_type)
127ea380
HHZ
1298{
1299 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1300 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1301
a4b97ab4 1302 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1303
a4b97ab4
MB
1304 rep_if->load = __rep_if->load;
1305 rep_if->unload = __rep_if->unload;
22215908 1306 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1307 rep_if->priv = __rep_if->priv;
127ea380 1308
a4b97ab4 1309 rep_if->valid = true;
127ea380 1310}
57cbd893 1311EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1312
1313void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1314 int vport_index, u8 rep_type)
127ea380
HHZ
1315{
1316 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1317 struct mlx5_eswitch_rep *rep;
1318
9deb2241 1319 rep = &offloads->vport_reps[vport_index];
cb67b832 1320
9deb2241 1321 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1322 rep->rep_if[rep_type].unload(rep);
127ea380 1323
a4b97ab4 1324 rep->rep_if[rep_type].valid = false;
127ea380 1325}
57cbd893 1326EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1327
a4b97ab4 1328void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1329{
1330#define UPLINK_REP_INDEX 0
1331 struct mlx5_esw_offload *offloads = &esw->offloads;
1332 struct mlx5_eswitch_rep *rep;
1333
1334 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1335 return rep->rep_if[rep_type].priv;
726293f1 1336}
22215908
MB
1337
1338void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1339 int vport,
1340 u8 rep_type)
1341{
1342 struct mlx5_esw_offload *offloads = &esw->offloads;
1343 struct mlx5_eswitch_rep *rep;
1344
1345 if (vport == FDB_UPLINK_VPORT)
1346 vport = UPLINK_REP_INDEX;
1347
1348 rep = &offloads->vport_reps[vport];
1349
1350 if (rep->rep_if[rep_type].valid &&
1351 rep->rep_if[rep_type].get_proto_dev)
1352 return rep->rep_if[rep_type].get_proto_dev(rep);
1353 return NULL;
1354}
57cbd893 1355EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1356
1357void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1358{
1359 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1360}
57cbd893
MB
1361EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1362
1363struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1364 int vport)
1365{
1366 return &esw->offloads.vport_reps[vport];
1367}
1368EXPORT_SYMBOL(mlx5_eswitch_vport_rep);