]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Move header encap type to IFC header file
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
592d3651 51 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
e4ad91f2 53 struct mlx5_flow_table *ft = NULL;
3d80d1a2 54 struct mlx5_fc *counter = NULL;
74491de9 55 struct mlx5_flow_handle *rule;
592d3651 56 int j, i = 0;
3d80d1a2
OG
57 void *misc;
58
59 if (esw->mode != SRIOV_OFFLOADS)
60 return ERR_PTR(-EOPNOTSUPP);
61
e4ad91f2
CM
62 if (attr->mirror_count)
63 ft = esw->fdb_table.offloads.fwd_fdb;
64 else
65 ft = esw->fdb_table.offloads.fast_fdb;
66
6acfbf38
OG
67 flow_act.action = attr->action;
68 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 69 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
70 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
71 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
72 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
73 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
74 flow_act.vlan[0].vid = attr->vlan_vid[0];
75 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
76 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
77 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
78 flow_act.vlan[1].vid = attr->vlan_vid[1];
79 flow_act.vlan[1].prio = attr->vlan_prio[1];
80 }
6acfbf38 81 }
776b12b6 82
66958ed9 83 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
592d3651
CM
84 for (j = attr->mirror_count; j < attr->out_count; j++) {
85 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
86 dest[i].vport.num = attr->out_rep[j]->vport;
e4ad91f2
CM
87 dest[i].vport.vhca_id =
88 MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
89 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
592d3651 90 i++;
56e858df 91 }
e37a79e5 92 }
66958ed9 93 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 94 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
95 if (IS_ERR(counter)) {
96 rule = ERR_CAST(counter);
97 goto err_counter_alloc;
98 }
e37a79e5
MB
99 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
100 dest[i].counter = counter;
101 i++;
3d80d1a2
OG
102 }
103
104 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 105 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 106
10ff5359
SK
107 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
108 MLX5_SET(fte_match_set_misc, misc,
109 source_eswitch_owner_vhca_id,
110 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
111
3d80d1a2
OG
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
114 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
115 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
116 source_eswitch_owner_vhca_id);
3d80d1a2 117
38aa51c1
OG
118 if (attr->match_level == MLX5_MATCH_NONE)
119 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
120 else
121 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
122 MLX5_MATCH_MISC_PARAMETERS;
123
bbd00f7e
HHZ
124 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
125 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 126
aa24670e 127 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
128 flow_act.modify_id = attr->mod_hdr_id;
129
aa24670e 130 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 131 flow_act.encap_id = attr->encap_id;
a54e20b4 132
e4ad91f2 133 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
3d80d1a2 134 if (IS_ERR(rule))
aa0cbbae 135 goto err_add_rule;
375f51e2
RD
136 else
137 esw->offloads.num_flows++;
3d80d1a2
OG
138
139 return rule;
aa0cbbae
OG
140
141err_add_rule:
142 mlx5_fc_destroy(esw->dev, counter);
143err_counter_alloc:
144 return rule;
3d80d1a2
OG
145}
146
e4ad91f2
CM
147struct mlx5_flow_handle *
148mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
149 struct mlx5_flow_spec *spec,
150 struct mlx5_esw_flow_attr *attr)
151{
152 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
153 struct mlx5_flow_act flow_act = {0};
154 struct mlx5_flow_handle *rule;
155 void *misc;
156 int i;
157
158 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
159 for (i = 0; i < attr->mirror_count; i++) {
160 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
161 dest[i].vport.num = attr->out_rep[i]->vport;
162 dest[i].vport.vhca_id =
163 MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
164 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
165 }
166 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
167 dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
168 i++;
169
170 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
171 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
172
173 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
174 MLX5_SET(fte_match_set_misc, misc,
175 source_eswitch_owner_vhca_id,
176 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
177
178 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
179 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
180 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
181 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
182 source_eswitch_owner_vhca_id);
183
184 if (attr->match_level == MLX5_MATCH_NONE)
185 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
186 else
187 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
188 MLX5_MATCH_MISC_PARAMETERS;
189
190 rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
191
192 if (!IS_ERR(rule))
193 esw->offloads.num_flows++;
194
195 return rule;
196}
197
d85cdccb
OG
198void
199mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
200 struct mlx5_flow_handle *rule,
201 struct mlx5_esw_flow_attr *attr)
202{
203 struct mlx5_fc *counter = NULL;
204
aa0cbbae
OG
205 counter = mlx5_flow_rule_counter(rule);
206 mlx5_del_flow_rules(rule);
207 mlx5_fc_destroy(esw->dev, counter);
208 esw->offloads.num_flows--;
d85cdccb
OG
209}
210
f5f82476
OG
211static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
212{
213 struct mlx5_eswitch_rep *rep;
214 int vf_vport, err = 0;
215
216 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
217 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
218 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 219 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
220 continue;
221
222 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
223 if (err)
224 goto out;
225 }
226
227out:
228 return err;
229}
230
231static struct mlx5_eswitch_rep *
232esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
233{
234 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
235
236 in_rep = attr->in_rep;
592d3651 237 out_rep = attr->out_rep[0];
f5f82476
OG
238
239 if (push)
240 vport = in_rep;
241 else if (pop)
242 vport = out_rep;
243 else
244 vport = in_rep;
245
246 return vport;
247}
248
249static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
250 bool push, bool pop, bool fwd)
251{
252 struct mlx5_eswitch_rep *in_rep, *out_rep;
253
254 if ((push || pop) && !fwd)
255 goto out_notsupp;
256
257 in_rep = attr->in_rep;
592d3651 258 out_rep = attr->out_rep[0];
f5f82476
OG
259
260 if (push && in_rep->vport == FDB_UPLINK_VPORT)
261 goto out_notsupp;
262
263 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
264 goto out_notsupp;
265
266 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
267 if (!push && !pop && fwd)
268 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
269 goto out_notsupp;
270
271 /* protects against (1) setting rules with different vlans to push and
272 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
273 */
1482bd3d 274 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
275 goto out_notsupp;
276
277 return 0;
278
279out_notsupp:
9eb78923 280 return -EOPNOTSUPP;
f5f82476
OG
281}
282
283int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
284 struct mlx5_esw_flow_attr *attr)
285{
286 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
287 struct mlx5_eswitch_rep *vport = NULL;
288 bool push, pop, fwd;
289 int err = 0;
290
6acfbf38 291 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 292 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
293 return 0;
294
f5f82476
OG
295 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
296 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
297 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
298
299 err = esw_add_vlan_action_check(attr, push, pop, fwd);
300 if (err)
301 return err;
302
303 attr->vlan_handled = false;
304
305 vport = esw_vlan_action_get_vport(attr, push, pop);
306
307 if (!push && !pop && fwd) {
308 /* tracks VF --> wire rules without vlan push action */
592d3651 309 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
f5f82476
OG
310 vport->vlan_refcount++;
311 attr->vlan_handled = true;
312 }
313
314 return 0;
315 }
316
317 if (!push && !pop)
318 return 0;
319
320 if (!(offloads->vlan_push_pop_refcount)) {
321 /* it's the 1st vlan rule, apply global vlan pop policy */
322 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
323 if (err)
324 goto out;
325 }
326 offloads->vlan_push_pop_refcount++;
327
328 if (push) {
329 if (vport->vlan_refcount)
330 goto skip_set_push;
331
1482bd3d 332 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
333 SET_VLAN_INSERT | SET_VLAN_STRIP);
334 if (err)
335 goto out;
1482bd3d 336 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
337skip_set_push:
338 vport->vlan_refcount++;
339 }
340out:
341 if (!err)
342 attr->vlan_handled = true;
343 return err;
344}
345
346int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
347 struct mlx5_esw_flow_attr *attr)
348{
349 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
350 struct mlx5_eswitch_rep *vport = NULL;
351 bool push, pop, fwd;
352 int err = 0;
353
6acfbf38 354 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 355 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
356 return 0;
357
f5f82476
OG
358 if (!attr->vlan_handled)
359 return 0;
360
361 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
362 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
363 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
364
365 vport = esw_vlan_action_get_vport(attr, push, pop);
366
367 if (!push && !pop && fwd) {
368 /* tracks VF --> wire rules without vlan push action */
592d3651 369 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
f5f82476
OG
370 vport->vlan_refcount--;
371
372 return 0;
373 }
374
375 if (push) {
376 vport->vlan_refcount--;
377 if (vport->vlan_refcount)
378 goto skip_unset_push;
379
380 vport->vlan = 0;
381 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
382 0, 0, SET_VLAN_STRIP);
383 if (err)
384 goto out;
385 }
386
387skip_unset_push:
388 offloads->vlan_push_pop_refcount--;
389 if (offloads->vlan_push_pop_refcount)
390 return 0;
391
392 /* no more vlan rules, stop global vlan pop policy */
393 err = esw_set_global_vlan_pop(esw, 0);
394
395out:
396 return err;
397}
398
f7a68945 399struct mlx5_flow_handle *
ab22be9b
OG
400mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
401{
66958ed9 402 struct mlx5_flow_act flow_act = {0};
4c5009c5 403 struct mlx5_flow_destination dest = {};
74491de9 404 struct mlx5_flow_handle *flow_rule;
c5bb1730 405 struct mlx5_flow_spec *spec;
ab22be9b
OG
406 void *misc;
407
1b9a07ee 408 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 409 if (!spec) {
ab22be9b
OG
410 flow_rule = ERR_PTR(-ENOMEM);
411 goto out;
412 }
413
c5bb1730 414 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
415 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
416 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
417
c5bb1730 418 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
419 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
420 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
421
c5bb1730 422 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 423 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 424 dest.vport.num = vport;
66958ed9 425 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 426
52fff327 427 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 428 &flow_act, &dest, 1);
ab22be9b
OG
429 if (IS_ERR(flow_rule))
430 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
431out:
c5bb1730 432 kvfree(spec);
ab22be9b
OG
433 return flow_rule;
434}
57cbd893 435EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 436
159fe639
MB
437void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
438{
439 mlx5_del_flow_rules(rule);
440}
441
3aa33572
OG
442static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
443{
66958ed9 444 struct mlx5_flow_act flow_act = {0};
4c5009c5 445 struct mlx5_flow_destination dest = {};
74491de9 446 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 447 struct mlx5_flow_spec *spec;
f80be543
MB
448 void *headers_c;
449 void *headers_v;
3aa33572 450 int err = 0;
f80be543
MB
451 u8 *dmac_c;
452 u8 *dmac_v;
3aa33572 453
1b9a07ee 454 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 455 if (!spec) {
3aa33572
OG
456 err = -ENOMEM;
457 goto out;
458 }
459
f80be543
MB
460 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
461 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
462 outer_headers);
463 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
464 outer_headers.dmac_47_16);
465 dmac_c[0] = 0x01;
466
3aa33572 467 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 468 dest.vport.num = 0;
66958ed9 469 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 470
52fff327 471 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 472 &flow_act, &dest, 1);
3aa33572
OG
473 if (IS_ERR(flow_rule)) {
474 err = PTR_ERR(flow_rule);
f80be543 475 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
476 goto out;
477 }
478
f80be543
MB
479 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
480
481 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
482 outer_headers);
483 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
484 outer_headers.dmac_47_16);
485 dmac_v[0] = 0x01;
52fff327 486 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
487 &flow_act, &dest, 1);
488 if (IS_ERR(flow_rule)) {
489 err = PTR_ERR(flow_rule);
490 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
491 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
492 goto out;
493 }
494
495 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
496
3aa33572 497out:
c5bb1730 498 kvfree(spec);
3aa33572
OG
499 return err;
500}
501
1033665e 502#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 503
1967ce6e 504static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 505{
69697b6e
OG
506 struct mlx5_core_dev *dev = esw->dev;
507 struct mlx5_flow_namespace *root_ns;
508 struct mlx5_flow_table *fdb = NULL;
1967ce6e 509 int esw_size, err = 0;
bbd00f7e 510 u32 flags = 0;
a8ffcc74
RL
511 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
512 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 513
69697b6e
OG
514 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
515 if (!root_ns) {
516 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 517 err = -EOPNOTSUPP;
a842dd04 518 goto out_namespace;
69697b6e
OG
519 }
520
264d7bf3
OG
521 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
522 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 523 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 524
a8ffcc74 525 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 526 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 527
a842dd04
CM
528 if (mlx5_esw_has_fwd_fdb(dev))
529 esw_size >>= 1;
530
7768d197 531 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
61444b45
MB
532 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_ENCAP |
533 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 534
1033665e 535 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 536 esw_size,
c9f1b073 537 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 538 flags);
69697b6e
OG
539 if (IS_ERR(fdb)) {
540 err = PTR_ERR(fdb);
1033665e 541 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
a842dd04 542 goto out_namespace;
69697b6e 543 }
52fff327 544 esw->fdb_table.offloads.fast_fdb = fdb;
69697b6e 545
a842dd04
CM
546 if (!mlx5_esw_has_fwd_fdb(dev))
547 goto out_namespace;
548
549 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
550 esw_size,
551 ESW_OFFLOADS_NUM_GROUPS, 1,
552 flags);
553 if (IS_ERR(fdb)) {
554 err = PTR_ERR(fdb);
555 esw_warn(dev, "Failed to create fwd table err %d\n", err);
556 goto out_ft;
557 }
558 esw->fdb_table.offloads.fwd_fdb = fdb;
559
560 return err;
561
562out_ft:
563 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
564out_namespace:
1967ce6e
OG
565 return err;
566}
567
568static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
569{
a842dd04
CM
570 if (mlx5_esw_has_fwd_fdb(esw->dev))
571 mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
52fff327 572 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
1967ce6e
OG
573}
574
575#define MAX_PF_SQ 256
cd3d07e7 576#define MAX_SQ_NVPORTS 32
1967ce6e
OG
577
578static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
579{
580 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
581 struct mlx5_flow_table_attr ft_attr = {};
582 struct mlx5_core_dev *dev = esw->dev;
583 struct mlx5_flow_namespace *root_ns;
584 struct mlx5_flow_table *fdb = NULL;
585 int table_size, ix, err = 0;
586 struct mlx5_flow_group *g;
587 void *match_criteria;
588 u32 *flow_group_in;
f80be543 589 u8 *dmac;
1967ce6e
OG
590
591 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 592 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
593 if (!flow_group_in)
594 return -ENOMEM;
595
596 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
597 if (!root_ns) {
598 esw_warn(dev, "Failed to get FDB flow namespace\n");
599 err = -EOPNOTSUPP;
600 goto ns_err;
601 }
602
603 err = esw_create_offloads_fast_fdb_table(esw);
604 if (err)
605 goto fast_fdb_err;
606
f80be543 607 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
b3ba5149
ES
608
609 ft_attr.max_fte = table_size;
610 ft_attr.prio = FDB_SLOW_PATH;
611
612 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
613 if (IS_ERR(fdb)) {
614 err = PTR_ERR(fdb);
615 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
616 goto slow_fdb_err;
617 }
52fff327 618 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 619
69697b6e
OG
620 /* create send-to-vport group */
621 memset(flow_group_in, 0, inlen);
622 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
623 MLX5_MATCH_MISC_PARAMETERS);
624
625 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
626
627 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
628 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
629
cd3d07e7 630 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
631 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
632 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
633
634 g = mlx5_create_flow_group(fdb, flow_group_in);
635 if (IS_ERR(g)) {
636 err = PTR_ERR(g);
637 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
638 goto send_vport_err;
639 }
640 esw->fdb_table.offloads.send_to_vport_grp = g;
641
642 /* create miss group */
643 memset(flow_group_in, 0, inlen);
f80be543
MB
644 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
645 MLX5_MATCH_OUTER_HEADERS);
646 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
647 match_criteria);
648 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
649 outer_headers.dmac_47_16);
650 dmac[0] = 0x01;
69697b6e
OG
651
652 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 653 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
654
655 g = mlx5_create_flow_group(fdb, flow_group_in);
656 if (IS_ERR(g)) {
657 err = PTR_ERR(g);
658 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
659 goto miss_err;
660 }
661 esw->fdb_table.offloads.miss_grp = g;
662
3aa33572
OG
663 err = esw_add_fdb_miss_rule(esw);
664 if (err)
665 goto miss_rule_err;
666
69697b6e
OG
667 return 0;
668
3aa33572
OG
669miss_rule_err:
670 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
671miss_err:
672 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
673send_vport_err:
52fff327 674 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 675slow_fdb_err:
a842dd04 676 esw_destroy_offloads_fast_fdb_table(esw);
1033665e 677fast_fdb_err:
69697b6e
OG
678ns_err:
679 kvfree(flow_group_in);
680 return err;
681}
682
1967ce6e 683static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 684{
52fff327 685 if (!esw->fdb_table.offloads.fast_fdb)
69697b6e
OG
686 return;
687
1967ce6e 688 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
689 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
690 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e
OG
691 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
692 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
693
52fff327 694 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1967ce6e 695 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 696}
c116c6ee
OG
697
698static int esw_create_offloads_table(struct mlx5_eswitch *esw)
699{
b3ba5149 700 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 701 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
702 struct mlx5_flow_table *ft_offloads;
703 struct mlx5_flow_namespace *ns;
c116c6ee
OG
704 int err = 0;
705
706 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
707 if (!ns) {
708 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 709 return -EOPNOTSUPP;
c116c6ee
OG
710 }
711
b3ba5149
ES
712 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
713
714 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
715 if (IS_ERR(ft_offloads)) {
716 err = PTR_ERR(ft_offloads);
717 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
718 return err;
719 }
720
721 esw->offloads.ft_offloads = ft_offloads;
722 return 0;
723}
724
725static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
726{
727 struct mlx5_esw_offload *offloads = &esw->offloads;
728
729 mlx5_destroy_flow_table(offloads->ft_offloads);
730}
fed9ce22
OG
731
732static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
733{
734 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
735 struct mlx5_flow_group *g;
736 struct mlx5_priv *priv = &esw->dev->priv;
737 u32 *flow_group_in;
738 void *match_criteria, *misc;
739 int err = 0;
740 int nvports = priv->sriov.num_vfs + 2;
741
1b9a07ee 742 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
743 if (!flow_group_in)
744 return -ENOMEM;
745
746 /* create vport rx group */
747 memset(flow_group_in, 0, inlen);
748 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
749 MLX5_MATCH_MISC_PARAMETERS);
750
751 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
752 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
753 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
754
755 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
756 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
757
758 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
759
760 if (IS_ERR(g)) {
761 err = PTR_ERR(g);
762 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
763 goto out;
764 }
765
766 esw->offloads.vport_rx_group = g;
767out:
e574978a 768 kvfree(flow_group_in);
fed9ce22
OG
769 return err;
770}
771
772static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
773{
774 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
775}
776
74491de9 777struct mlx5_flow_handle *
fed9ce22
OG
778mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
779{
66958ed9 780 struct mlx5_flow_act flow_act = {0};
4c5009c5 781 struct mlx5_flow_destination dest = {};
74491de9 782 struct mlx5_flow_handle *flow_rule;
c5bb1730 783 struct mlx5_flow_spec *spec;
fed9ce22
OG
784 void *misc;
785
1b9a07ee 786 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 787 if (!spec) {
fed9ce22
OG
788 flow_rule = ERR_PTR(-ENOMEM);
789 goto out;
790 }
791
c5bb1730 792 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
793 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
794
c5bb1730 795 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
796 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
797
c5bb1730 798 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
799 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
800 dest.tir_num = tirn;
801
66958ed9 802 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 803 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
e53eef63 804 &flow_act, &dest, 1);
fed9ce22
OG
805 if (IS_ERR(flow_rule)) {
806 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
807 goto out;
808 }
809
810out:
c5bb1730 811 kvfree(spec);
fed9ce22
OG
812 return flow_rule;
813}
feae9087 814
c930a3ad
OG
815static int esw_offloads_start(struct mlx5_eswitch *esw)
816{
6c419ba8 817 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
818
819 if (esw->mode != SRIOV_LEGACY) {
820 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
821 return -EINVAL;
822 }
823
824 mlx5_eswitch_disable_sriov(esw);
825 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
826 if (err) {
827 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
828 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
829 if (err1)
5403dc70 830 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 831 }
bffaa916
RD
832 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
833 if (mlx5_eswitch_inline_mode_get(esw,
834 num_vfs,
835 &esw->offloads.inline_mode)) {
836 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
837 esw_warn(esw->dev, "Inline mode is different between vports\n");
838 }
839 }
c930a3ad
OG
840 return err;
841}
842
e8d31c4d
MB
843void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
844{
845 kfree(esw->offloads.vport_reps);
846}
847
848int esw_offloads_init_reps(struct mlx5_eswitch *esw)
849{
850 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
851 struct mlx5_core_dev *dev = esw->dev;
852 struct mlx5_esw_offload *offloads;
853 struct mlx5_eswitch_rep *rep;
854 u8 hw_id[ETH_ALEN];
855 int vport;
856
857 esw->offloads.vport_reps = kcalloc(total_vfs,
858 sizeof(struct mlx5_eswitch_rep),
859 GFP_KERNEL);
860 if (!esw->offloads.vport_reps)
861 return -ENOMEM;
862
863 offloads = &esw->offloads;
864 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
865
866 for (vport = 0; vport < total_vfs; vport++) {
867 rep = &offloads->vport_reps[vport];
868
869 rep->vport = vport;
870 ether_addr_copy(rep->hw_id, hw_id);
871 }
872
873 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
874
875 return 0;
876}
877
a4b97ab4
MB
878static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
879 u8 rep_type)
6ed1803a
MB
880{
881 struct mlx5_eswitch_rep *rep;
882 int vport;
883
884 for (vport = nvports - 1; vport >= 0; vport--) {
885 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 886 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
887 continue;
888
a4b97ab4 889 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
890 }
891}
892
a4b97ab4
MB
893static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
894{
895 u8 rep_type = NUM_REP_TYPES;
896
897 while (rep_type-- > 0)
898 esw_offloads_unload_reps_type(esw, nvports, rep_type);
899}
900
901static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
902 u8 rep_type)
c930a3ad 903{
cb67b832
HHZ
904 struct mlx5_eswitch_rep *rep;
905 int vport;
c930a3ad
OG
906 int err;
907
6ed1803a
MB
908 for (vport = 0; vport < nvports; vport++) {
909 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 910 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
911 continue;
912
a4b97ab4 913 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
914 if (err)
915 goto err_reps;
916 }
917
918 return 0;
919
920err_reps:
a4b97ab4
MB
921 esw_offloads_unload_reps_type(esw, vport, rep_type);
922 return err;
923}
924
925static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
926{
927 u8 rep_type = 0;
928 int err;
929
930 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
931 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
932 if (err)
933 goto err_reps;
934 }
935
936 return err;
937
938err_reps:
939 while (rep_type-- > 0)
940 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
941 return err;
942}
943
944int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
945{
946 int err;
947
1967ce6e 948 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 949 if (err)
c5447c70 950 return err;
c930a3ad
OG
951
952 err = esw_create_offloads_table(esw);
953 if (err)
954 goto create_ft_err;
955
956 err = esw_create_vport_rx_group(esw);
957 if (err)
958 goto create_fg_err;
959
6ed1803a
MB
960 err = esw_offloads_load_reps(esw, nvports);
961 if (err)
962 goto err_reps;
9da34cd3 963
c930a3ad
OG
964 return 0;
965
cb67b832 966err_reps:
cb67b832
HHZ
967 esw_destroy_vport_rx_group(esw);
968
c930a3ad
OG
969create_fg_err:
970 esw_destroy_offloads_table(esw);
971
972create_ft_err:
1967ce6e 973 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 974
c930a3ad
OG
975 return err;
976}
977
978static int esw_offloads_stop(struct mlx5_eswitch *esw)
979{
6c419ba8 980 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
981
982 mlx5_eswitch_disable_sriov(esw);
983 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
984 if (err) {
985 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
986 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
987 if (err1)
988 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
989 }
c930a3ad 990
5bae8c03 991 /* enable back PF RoCE */
c5447c70 992 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5bae8c03 993
c930a3ad
OG
994 return err;
995}
996
997void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
998{
6ed1803a 999 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
1000 esw_destroy_vport_rx_group(esw);
1001 esw_destroy_offloads_table(esw);
1967ce6e 1002 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
1003}
1004
ef78618b 1005static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1006{
1007 switch (mode) {
1008 case DEVLINK_ESWITCH_MODE_LEGACY:
1009 *mlx5_mode = SRIOV_LEGACY;
1010 break;
1011 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1012 *mlx5_mode = SRIOV_OFFLOADS;
1013 break;
1014 default:
1015 return -EINVAL;
1016 }
1017
1018 return 0;
1019}
1020
ef78618b
OG
1021static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1022{
1023 switch (mlx5_mode) {
1024 case SRIOV_LEGACY:
1025 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1026 break;
1027 case SRIOV_OFFLOADS:
1028 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1029 break;
1030 default:
1031 return -EINVAL;
1032 }
1033
1034 return 0;
1035}
1036
bffaa916
RD
1037static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1038{
1039 switch (mode) {
1040 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1041 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1042 break;
1043 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1044 *mlx5_mode = MLX5_INLINE_MODE_L2;
1045 break;
1046 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1047 *mlx5_mode = MLX5_INLINE_MODE_IP;
1048 break;
1049 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1050 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1051 break;
1052 default:
1053 return -EINVAL;
1054 }
1055
1056 return 0;
1057}
1058
1059static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1060{
1061 switch (mlx5_mode) {
1062 case MLX5_INLINE_MODE_NONE:
1063 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1064 break;
1065 case MLX5_INLINE_MODE_L2:
1066 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1067 break;
1068 case MLX5_INLINE_MODE_IP:
1069 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1070 break;
1071 case MLX5_INLINE_MODE_TCP_UDP:
1072 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1073 break;
1074 default:
1075 return -EINVAL;
1076 }
1077
1078 return 0;
1079}
1080
9d1cef19 1081static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1082{
9d1cef19 1083 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1084
9d1cef19
OG
1085 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1086 return -EOPNOTSUPP;
c930a3ad 1087
733d3e54
OG
1088 if(!MLX5_ESWITCH_MANAGER(dev))
1089 return -EPERM;
c930a3ad 1090
9d1cef19 1091 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
1092 return -EOPNOTSUPP;
1093
9d1cef19
OG
1094 return 0;
1095}
1096
1097int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
1098{
1099 struct mlx5_core_dev *dev = devlink_priv(devlink);
1100 u16 cur_mlx5_mode, mlx5_mode = 0;
1101 int err;
1102
1103 err = mlx5_devlink_eswitch_check(devlink);
1104 if (err)
1105 return err;
1106
1107 cur_mlx5_mode = dev->priv.eswitch->mode;
1108
ef78618b 1109 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1110 return -EINVAL;
1111
1112 if (cur_mlx5_mode == mlx5_mode)
1113 return 0;
1114
1115 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1116 return esw_offloads_start(dev->priv.eswitch);
1117 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1118 return esw_offloads_stop(dev->priv.eswitch);
1119 else
1120 return -EINVAL;
feae9087
OG
1121}
1122
1123int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1124{
9d1cef19
OG
1125 struct mlx5_core_dev *dev = devlink_priv(devlink);
1126 int err;
c930a3ad 1127
9d1cef19
OG
1128 err = mlx5_devlink_eswitch_check(devlink);
1129 if (err)
1130 return err;
c930a3ad 1131
ef78618b 1132 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1133}
127ea380 1134
bffaa916
RD
1135int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1136{
1137 struct mlx5_core_dev *dev = devlink_priv(devlink);
1138 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1139 int err, vport;
bffaa916
RD
1140 u8 mlx5_mode;
1141
9d1cef19
OG
1142 err = mlx5_devlink_eswitch_check(devlink);
1143 if (err)
1144 return err;
bffaa916 1145
c415f704
OG
1146 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1147 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1148 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1149 return 0;
1150 /* fall through */
1151 case MLX5_CAP_INLINE_MODE_L2:
1152 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 1153 return -EOPNOTSUPP;
c415f704
OG
1154 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1155 break;
1156 }
bffaa916 1157
375f51e2
RD
1158 if (esw->offloads.num_flows > 0) {
1159 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1160 return -EOPNOTSUPP;
1161 }
1162
bffaa916
RD
1163 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1164 if (err)
1165 goto out;
1166
9d1cef19 1167 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1168 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1169 if (err) {
1170 esw_warn(dev, "Failed to set min inline on vport %d\n",
1171 vport);
1172 goto revert_inline_mode;
1173 }
1174 }
1175
1176 esw->offloads.inline_mode = mlx5_mode;
1177 return 0;
1178
1179revert_inline_mode:
1180 while (--vport > 0)
1181 mlx5_modify_nic_vport_min_inline(dev,
1182 vport,
1183 esw->offloads.inline_mode);
1184out:
1185 return err;
1186}
1187
1188int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1189{
1190 struct mlx5_core_dev *dev = devlink_priv(devlink);
1191 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1192 int err;
bffaa916 1193
9d1cef19
OG
1194 err = mlx5_devlink_eswitch_check(devlink);
1195 if (err)
1196 return err;
bffaa916 1197
bffaa916
RD
1198 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1199}
1200
1201int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1202{
c415f704 1203 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1204 struct mlx5_core_dev *dev = esw->dev;
1205 int vport;
bffaa916
RD
1206
1207 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1208 return -EOPNOTSUPP;
1209
1210 if (esw->mode == SRIOV_NONE)
1211 return -EOPNOTSUPP;
1212
c415f704
OG
1213 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1214 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1215 mlx5_mode = MLX5_INLINE_MODE_NONE;
1216 goto out;
1217 case MLX5_CAP_INLINE_MODE_L2:
1218 mlx5_mode = MLX5_INLINE_MODE_L2;
1219 goto out;
1220 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1221 goto query_vports;
1222 }
bffaa916 1223
c415f704 1224query_vports:
bffaa916
RD
1225 for (vport = 1; vport <= nvfs; vport++) {
1226 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1227 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1228 return -EINVAL;
1229 prev_mlx5_mode = mlx5_mode;
1230 }
1231
c415f704 1232out:
bffaa916
RD
1233 *mode = mlx5_mode;
1234 return 0;
1235}
1236
7768d197
RD
1237int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1238{
1239 struct mlx5_core_dev *dev = devlink_priv(devlink);
1240 struct mlx5_eswitch *esw = dev->priv.eswitch;
1241 int err;
1242
9d1cef19
OG
1243 err = mlx5_devlink_eswitch_check(devlink);
1244 if (err)
1245 return err;
7768d197
RD
1246
1247 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1248 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1249 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1250 return -EOPNOTSUPP;
1251
1252 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1253 return -EOPNOTSUPP;
1254
1255 if (esw->mode == SRIOV_LEGACY) {
1256 esw->offloads.encap = encap;
1257 return 0;
1258 }
1259
1260 if (esw->offloads.encap == encap)
1261 return 0;
1262
1263 if (esw->offloads.num_flows > 0) {
1264 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1265 return -EOPNOTSUPP;
1266 }
1267
1268 esw_destroy_offloads_fast_fdb_table(esw);
1269
1270 esw->offloads.encap = encap;
1271 err = esw_create_offloads_fast_fdb_table(esw);
1272 if (err) {
1273 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1274 esw->offloads.encap = !encap;
2fe30e23 1275 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1276 }
1277 return err;
1278}
1279
1280int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1281{
1282 struct mlx5_core_dev *dev = devlink_priv(devlink);
1283 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1284 int err;
7768d197 1285
9d1cef19
OG
1286 err = mlx5_devlink_eswitch_check(devlink);
1287 if (err)
1288 return err;
7768d197
RD
1289
1290 *encap = esw->offloads.encap;
1291 return 0;
1292}
1293
127ea380 1294void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1295 int vport_index,
a4b97ab4
MB
1296 struct mlx5_eswitch_rep_if *__rep_if,
1297 u8 rep_type)
127ea380
HHZ
1298{
1299 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1300 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1301
a4b97ab4 1302 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1303
a4b97ab4
MB
1304 rep_if->load = __rep_if->load;
1305 rep_if->unload = __rep_if->unload;
22215908 1306 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1307 rep_if->priv = __rep_if->priv;
127ea380 1308
a4b97ab4 1309 rep_if->valid = true;
127ea380 1310}
57cbd893 1311EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1312
1313void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1314 int vport_index, u8 rep_type)
127ea380
HHZ
1315{
1316 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1317 struct mlx5_eswitch_rep *rep;
1318
9deb2241 1319 rep = &offloads->vport_reps[vport_index];
cb67b832 1320
9deb2241 1321 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1322 rep->rep_if[rep_type].unload(rep);
127ea380 1323
a4b97ab4 1324 rep->rep_if[rep_type].valid = false;
127ea380 1325}
57cbd893 1326EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1327
a4b97ab4 1328void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1329{
1330#define UPLINK_REP_INDEX 0
1331 struct mlx5_esw_offload *offloads = &esw->offloads;
1332 struct mlx5_eswitch_rep *rep;
1333
1334 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1335 return rep->rep_if[rep_type].priv;
726293f1 1336}
22215908
MB
1337
1338void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1339 int vport,
1340 u8 rep_type)
1341{
1342 struct mlx5_esw_offload *offloads = &esw->offloads;
1343 struct mlx5_eswitch_rep *rep;
1344
1345 if (vport == FDB_UPLINK_VPORT)
1346 vport = UPLINK_REP_INDEX;
1347
1348 rep = &offloads->vport_reps[vport];
1349
1350 if (rep->rep_if[rep_type].valid &&
1351 rep->rep_if[rep_type].get_proto_dev)
1352 return rep->rep_if[rep_type].get_proto_dev(rep);
1353 return NULL;
1354}
57cbd893 1355EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1356
1357void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1358{
1359 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1360}
57cbd893
MB
1361EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1362
1363struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1364 int vport)
1365{
1366 return &esw->offloads.vport_reps[vport];
1367}
1368EXPORT_SYMBOL(mlx5_eswitch_vport_rep);