]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: E-Switch, Have explicit API to delete fwd rules
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
592d3651 51 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
e4ad91f2 53 struct mlx5_flow_table *ft = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
592d3651 55 int j, i = 0;
3d80d1a2
OG
56 void *misc;
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
e4ad91f2
CM
61 if (attr->mirror_count)
62 ft = esw->fdb_table.offloads.fwd_fdb;
63 else
64 ft = esw->fdb_table.offloads.fast_fdb;
65
6acfbf38
OG
66 flow_act.action = attr->action;
67 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 68 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
69 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
70 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
71 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
72 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
73 flow_act.vlan[0].vid = attr->vlan_vid[0];
74 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
75 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
76 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
77 flow_act.vlan[1].vid = attr->vlan_vid[1];
78 flow_act.vlan[1].prio = attr->vlan_prio[1];
79 }
6acfbf38 80 }
776b12b6 81
66958ed9 82 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
592d3651
CM
83 for (j = attr->mirror_count; j < attr->out_count; j++) {
84 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
85 dest[i].vport.num = attr->out_rep[j]->vport;
e4ad91f2
CM
86 dest[i].vport.vhca_id =
87 MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
88 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
592d3651 89 i++;
56e858df 90 }
e37a79e5 91 }
66958ed9 92 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 93 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 94 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 95 i++;
3d80d1a2
OG
96 }
97
98 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 99 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 100
10ff5359
SK
101 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
102 MLX5_SET(fte_match_set_misc, misc,
103 source_eswitch_owner_vhca_id,
104 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
105
3d80d1a2
OG
106 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
107 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
108 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
109 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
110 source_eswitch_owner_vhca_id);
3d80d1a2 111
38aa51c1
OG
112 if (attr->match_level == MLX5_MATCH_NONE)
113 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
114 else
115 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
116 MLX5_MATCH_MISC_PARAMETERS;
117
bbd00f7e
HHZ
118 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
119 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 120
aa24670e 121 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
122 flow_act.modify_id = attr->mod_hdr_id;
123
60786f09
MB
124 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
125 flow_act.reformat_id = attr->encap_id;
a54e20b4 126
e4ad91f2 127 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
3d80d1a2 128 if (IS_ERR(rule))
b8aee822 129 goto out;
375f51e2
RD
130 else
131 esw->offloads.num_flows++;
3d80d1a2 132
b8aee822 133out:
aa0cbbae 134 return rule;
3d80d1a2
OG
135}
136
e4ad91f2
CM
137struct mlx5_flow_handle *
138mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
139 struct mlx5_flow_spec *spec,
140 struct mlx5_esw_flow_attr *attr)
141{
142 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
143 struct mlx5_flow_act flow_act = {0};
144 struct mlx5_flow_handle *rule;
145 void *misc;
146 int i;
147
148 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
149 for (i = 0; i < attr->mirror_count; i++) {
150 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
151 dest[i].vport.num = attr->out_rep[i]->vport;
152 dest[i].vport.vhca_id =
153 MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
154 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
155 }
156 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
157 dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
158 i++;
159
160 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
161 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
162
163 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
164 MLX5_SET(fte_match_set_misc, misc,
165 source_eswitch_owner_vhca_id,
166 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
167
168 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
169 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
170 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
171 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
172 source_eswitch_owner_vhca_id);
173
174 if (attr->match_level == MLX5_MATCH_NONE)
175 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
176 else
177 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
178 MLX5_MATCH_MISC_PARAMETERS;
179
180 rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
181
182 if (!IS_ERR(rule))
183 esw->offloads.num_flows++;
184
185 return rule;
186}
187
d85cdccb
OG
188void
189mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
190 struct mlx5_flow_handle *rule,
191 struct mlx5_esw_flow_attr *attr)
192{
aa0cbbae 193 mlx5_del_flow_rules(rule);
aa0cbbae 194 esw->offloads.num_flows--;
d85cdccb
OG
195}
196
48265006
OG
197void
198mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
199 struct mlx5_flow_handle *rule,
200 struct mlx5_esw_flow_attr *attr)
201{
202 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
203}
204
f5f82476
OG
205static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
206{
207 struct mlx5_eswitch_rep *rep;
208 int vf_vport, err = 0;
209
210 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
211 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
212 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 213 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
214 continue;
215
216 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
217 if (err)
218 goto out;
219 }
220
221out:
222 return err;
223}
224
225static struct mlx5_eswitch_rep *
226esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
227{
228 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
229
230 in_rep = attr->in_rep;
592d3651 231 out_rep = attr->out_rep[0];
f5f82476
OG
232
233 if (push)
234 vport = in_rep;
235 else if (pop)
236 vport = out_rep;
237 else
238 vport = in_rep;
239
240 return vport;
241}
242
243static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
244 bool push, bool pop, bool fwd)
245{
246 struct mlx5_eswitch_rep *in_rep, *out_rep;
247
248 if ((push || pop) && !fwd)
249 goto out_notsupp;
250
251 in_rep = attr->in_rep;
592d3651 252 out_rep = attr->out_rep[0];
f5f82476
OG
253
254 if (push && in_rep->vport == FDB_UPLINK_VPORT)
255 goto out_notsupp;
256
257 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
258 goto out_notsupp;
259
260 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
261 if (!push && !pop && fwd)
262 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
263 goto out_notsupp;
264
265 /* protects against (1) setting rules with different vlans to push and
266 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
267 */
1482bd3d 268 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
269 goto out_notsupp;
270
271 return 0;
272
273out_notsupp:
9eb78923 274 return -EOPNOTSUPP;
f5f82476
OG
275}
276
277int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
278 struct mlx5_esw_flow_attr *attr)
279{
280 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
281 struct mlx5_eswitch_rep *vport = NULL;
282 bool push, pop, fwd;
283 int err = 0;
284
6acfbf38 285 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 286 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
287 return 0;
288
f5f82476
OG
289 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
290 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
291 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
292
293 err = esw_add_vlan_action_check(attr, push, pop, fwd);
294 if (err)
295 return err;
296
297 attr->vlan_handled = false;
298
299 vport = esw_vlan_action_get_vport(attr, push, pop);
300
301 if (!push && !pop && fwd) {
302 /* tracks VF --> wire rules without vlan push action */
592d3651 303 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
f5f82476
OG
304 vport->vlan_refcount++;
305 attr->vlan_handled = true;
306 }
307
308 return 0;
309 }
310
311 if (!push && !pop)
312 return 0;
313
314 if (!(offloads->vlan_push_pop_refcount)) {
315 /* it's the 1st vlan rule, apply global vlan pop policy */
316 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
317 if (err)
318 goto out;
319 }
320 offloads->vlan_push_pop_refcount++;
321
322 if (push) {
323 if (vport->vlan_refcount)
324 goto skip_set_push;
325
1482bd3d 326 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
327 SET_VLAN_INSERT | SET_VLAN_STRIP);
328 if (err)
329 goto out;
1482bd3d 330 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
331skip_set_push:
332 vport->vlan_refcount++;
333 }
334out:
335 if (!err)
336 attr->vlan_handled = true;
337 return err;
338}
339
340int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
341 struct mlx5_esw_flow_attr *attr)
342{
343 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
344 struct mlx5_eswitch_rep *vport = NULL;
345 bool push, pop, fwd;
346 int err = 0;
347
6acfbf38 348 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 349 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
350 return 0;
351
f5f82476
OG
352 if (!attr->vlan_handled)
353 return 0;
354
355 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
356 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
357 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
358
359 vport = esw_vlan_action_get_vport(attr, push, pop);
360
361 if (!push && !pop && fwd) {
362 /* tracks VF --> wire rules without vlan push action */
592d3651 363 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
f5f82476
OG
364 vport->vlan_refcount--;
365
366 return 0;
367 }
368
369 if (push) {
370 vport->vlan_refcount--;
371 if (vport->vlan_refcount)
372 goto skip_unset_push;
373
374 vport->vlan = 0;
375 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
376 0, 0, SET_VLAN_STRIP);
377 if (err)
378 goto out;
379 }
380
381skip_unset_push:
382 offloads->vlan_push_pop_refcount--;
383 if (offloads->vlan_push_pop_refcount)
384 return 0;
385
386 /* no more vlan rules, stop global vlan pop policy */
387 err = esw_set_global_vlan_pop(esw, 0);
388
389out:
390 return err;
391}
392
f7a68945 393struct mlx5_flow_handle *
ab22be9b
OG
394mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
395{
66958ed9 396 struct mlx5_flow_act flow_act = {0};
4c5009c5 397 struct mlx5_flow_destination dest = {};
74491de9 398 struct mlx5_flow_handle *flow_rule;
c5bb1730 399 struct mlx5_flow_spec *spec;
ab22be9b
OG
400 void *misc;
401
1b9a07ee 402 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 403 if (!spec) {
ab22be9b
OG
404 flow_rule = ERR_PTR(-ENOMEM);
405 goto out;
406 }
407
c5bb1730 408 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
409 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
410 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
411
c5bb1730 412 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
413 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
414 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
415
c5bb1730 416 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 417 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 418 dest.vport.num = vport;
66958ed9 419 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 420
52fff327 421 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 422 &flow_act, &dest, 1);
ab22be9b
OG
423 if (IS_ERR(flow_rule))
424 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
425out:
c5bb1730 426 kvfree(spec);
ab22be9b
OG
427 return flow_rule;
428}
57cbd893 429EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 430
159fe639
MB
431void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
432{
433 mlx5_del_flow_rules(rule);
434}
435
3aa33572
OG
436static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
437{
66958ed9 438 struct mlx5_flow_act flow_act = {0};
4c5009c5 439 struct mlx5_flow_destination dest = {};
74491de9 440 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 441 struct mlx5_flow_spec *spec;
f80be543
MB
442 void *headers_c;
443 void *headers_v;
3aa33572 444 int err = 0;
f80be543
MB
445 u8 *dmac_c;
446 u8 *dmac_v;
3aa33572 447
1b9a07ee 448 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 449 if (!spec) {
3aa33572
OG
450 err = -ENOMEM;
451 goto out;
452 }
453
f80be543
MB
454 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
455 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
456 outer_headers);
457 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
458 outer_headers.dmac_47_16);
459 dmac_c[0] = 0x01;
460
3aa33572 461 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 462 dest.vport.num = 0;
66958ed9 463 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 464
52fff327 465 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 466 &flow_act, &dest, 1);
3aa33572
OG
467 if (IS_ERR(flow_rule)) {
468 err = PTR_ERR(flow_rule);
f80be543 469 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
470 goto out;
471 }
472
f80be543
MB
473 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
474
475 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
476 outer_headers);
477 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
478 outer_headers.dmac_47_16);
479 dmac_v[0] = 0x01;
52fff327 480 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
481 &flow_act, &dest, 1);
482 if (IS_ERR(flow_rule)) {
483 err = PTR_ERR(flow_rule);
484 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
485 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
486 goto out;
487 }
488
489 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
490
3aa33572 491out:
c5bb1730 492 kvfree(spec);
3aa33572
OG
493 return err;
494}
495
1033665e 496#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 497
1967ce6e 498static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 499{
69697b6e
OG
500 struct mlx5_core_dev *dev = esw->dev;
501 struct mlx5_flow_namespace *root_ns;
502 struct mlx5_flow_table *fdb = NULL;
1967ce6e 503 int esw_size, err = 0;
bbd00f7e 504 u32 flags = 0;
a8ffcc74
RL
505 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
506 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 507
69697b6e
OG
508 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
509 if (!root_ns) {
510 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 511 err = -EOPNOTSUPP;
a842dd04 512 goto out_namespace;
69697b6e
OG
513 }
514
264d7bf3
OG
515 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
516 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 517 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 518
a8ffcc74 519 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 520 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 521
a842dd04
CM
522 if (mlx5_esw_has_fwd_fdb(dev))
523 esw_size >>= 1;
524
7768d197 525 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 526 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 527 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 528
1033665e 529 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 530 esw_size,
c9f1b073 531 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 532 flags);
69697b6e
OG
533 if (IS_ERR(fdb)) {
534 err = PTR_ERR(fdb);
1033665e 535 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
a842dd04 536 goto out_namespace;
69697b6e 537 }
52fff327 538 esw->fdb_table.offloads.fast_fdb = fdb;
69697b6e 539
a842dd04
CM
540 if (!mlx5_esw_has_fwd_fdb(dev))
541 goto out_namespace;
542
543 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
544 esw_size,
545 ESW_OFFLOADS_NUM_GROUPS, 1,
546 flags);
547 if (IS_ERR(fdb)) {
548 err = PTR_ERR(fdb);
549 esw_warn(dev, "Failed to create fwd table err %d\n", err);
550 goto out_ft;
551 }
552 esw->fdb_table.offloads.fwd_fdb = fdb;
553
554 return err;
555
556out_ft:
557 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
558out_namespace:
1967ce6e
OG
559 return err;
560}
561
562static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
563{
a842dd04
CM
564 if (mlx5_esw_has_fwd_fdb(esw->dev))
565 mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
52fff327 566 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
1967ce6e
OG
567}
568
569#define MAX_PF_SQ 256
cd3d07e7 570#define MAX_SQ_NVPORTS 32
1967ce6e
OG
571
572static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
573{
574 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
575 struct mlx5_flow_table_attr ft_attr = {};
576 struct mlx5_core_dev *dev = esw->dev;
577 struct mlx5_flow_namespace *root_ns;
578 struct mlx5_flow_table *fdb = NULL;
579 int table_size, ix, err = 0;
580 struct mlx5_flow_group *g;
581 void *match_criteria;
582 u32 *flow_group_in;
f80be543 583 u8 *dmac;
1967ce6e
OG
584
585 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 586 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
587 if (!flow_group_in)
588 return -ENOMEM;
589
590 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
591 if (!root_ns) {
592 esw_warn(dev, "Failed to get FDB flow namespace\n");
593 err = -EOPNOTSUPP;
594 goto ns_err;
595 }
596
597 err = esw_create_offloads_fast_fdb_table(esw);
598 if (err)
599 goto fast_fdb_err;
600
f80be543 601 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
b3ba5149
ES
602
603 ft_attr.max_fte = table_size;
604 ft_attr.prio = FDB_SLOW_PATH;
605
606 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
607 if (IS_ERR(fdb)) {
608 err = PTR_ERR(fdb);
609 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
610 goto slow_fdb_err;
611 }
52fff327 612 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 613
69697b6e
OG
614 /* create send-to-vport group */
615 memset(flow_group_in, 0, inlen);
616 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
617 MLX5_MATCH_MISC_PARAMETERS);
618
619 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
620
621 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
622 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
623
cd3d07e7 624 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
625 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
626 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
627
628 g = mlx5_create_flow_group(fdb, flow_group_in);
629 if (IS_ERR(g)) {
630 err = PTR_ERR(g);
631 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
632 goto send_vport_err;
633 }
634 esw->fdb_table.offloads.send_to_vport_grp = g;
635
636 /* create miss group */
637 memset(flow_group_in, 0, inlen);
f80be543
MB
638 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
639 MLX5_MATCH_OUTER_HEADERS);
640 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
641 match_criteria);
642 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
643 outer_headers.dmac_47_16);
644 dmac[0] = 0x01;
69697b6e
OG
645
646 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 647 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
648
649 g = mlx5_create_flow_group(fdb, flow_group_in);
650 if (IS_ERR(g)) {
651 err = PTR_ERR(g);
652 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
653 goto miss_err;
654 }
655 esw->fdb_table.offloads.miss_grp = g;
656
3aa33572
OG
657 err = esw_add_fdb_miss_rule(esw);
658 if (err)
659 goto miss_rule_err;
660
c88a026e 661 kvfree(flow_group_in);
69697b6e
OG
662 return 0;
663
3aa33572
OG
664miss_rule_err:
665 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
666miss_err:
667 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
668send_vport_err:
52fff327 669 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 670slow_fdb_err:
a842dd04 671 esw_destroy_offloads_fast_fdb_table(esw);
1033665e 672fast_fdb_err:
69697b6e
OG
673ns_err:
674 kvfree(flow_group_in);
675 return err;
676}
677
1967ce6e 678static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 679{
52fff327 680 if (!esw->fdb_table.offloads.fast_fdb)
69697b6e
OG
681 return;
682
1967ce6e 683 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
684 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
685 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e
OG
686 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
687 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
688
52fff327 689 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1967ce6e 690 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 691}
c116c6ee
OG
692
693static int esw_create_offloads_table(struct mlx5_eswitch *esw)
694{
b3ba5149 695 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 696 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
697 struct mlx5_flow_table *ft_offloads;
698 struct mlx5_flow_namespace *ns;
c116c6ee
OG
699 int err = 0;
700
701 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
702 if (!ns) {
703 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 704 return -EOPNOTSUPP;
c116c6ee
OG
705 }
706
b3ba5149
ES
707 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
708
709 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
710 if (IS_ERR(ft_offloads)) {
711 err = PTR_ERR(ft_offloads);
712 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
713 return err;
714 }
715
716 esw->offloads.ft_offloads = ft_offloads;
717 return 0;
718}
719
720static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
721{
722 struct mlx5_esw_offload *offloads = &esw->offloads;
723
724 mlx5_destroy_flow_table(offloads->ft_offloads);
725}
fed9ce22
OG
726
727static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
728{
729 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
730 struct mlx5_flow_group *g;
731 struct mlx5_priv *priv = &esw->dev->priv;
732 u32 *flow_group_in;
733 void *match_criteria, *misc;
734 int err = 0;
735 int nvports = priv->sriov.num_vfs + 2;
736
1b9a07ee 737 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
738 if (!flow_group_in)
739 return -ENOMEM;
740
741 /* create vport rx group */
742 memset(flow_group_in, 0, inlen);
743 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
744 MLX5_MATCH_MISC_PARAMETERS);
745
746 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
747 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
748 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
749
750 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
751 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
752
753 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
754
755 if (IS_ERR(g)) {
756 err = PTR_ERR(g);
757 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
758 goto out;
759 }
760
761 esw->offloads.vport_rx_group = g;
762out:
e574978a 763 kvfree(flow_group_in);
fed9ce22
OG
764 return err;
765}
766
767static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
768{
769 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
770}
771
74491de9 772struct mlx5_flow_handle *
c966f7d5
GT
773mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
774 struct mlx5_flow_destination *dest)
fed9ce22 775{
66958ed9 776 struct mlx5_flow_act flow_act = {0};
74491de9 777 struct mlx5_flow_handle *flow_rule;
c5bb1730 778 struct mlx5_flow_spec *spec;
fed9ce22
OG
779 void *misc;
780
1b9a07ee 781 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 782 if (!spec) {
fed9ce22
OG
783 flow_rule = ERR_PTR(-ENOMEM);
784 goto out;
785 }
786
c5bb1730 787 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
788 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
789
c5bb1730 790 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
791 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
792
c5bb1730 793 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 794
66958ed9 795 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 796 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 797 &flow_act, dest, 1);
fed9ce22
OG
798 if (IS_ERR(flow_rule)) {
799 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
800 goto out;
801 }
802
803out:
c5bb1730 804 kvfree(spec);
fed9ce22
OG
805 return flow_rule;
806}
feae9087 807
db7ff19e
EB
808static int esw_offloads_start(struct mlx5_eswitch *esw,
809 struct netlink_ext_ack *extack)
c930a3ad 810{
6c419ba8 811 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
812
813 if (esw->mode != SRIOV_LEGACY) {
8c98ee77
EB
814 NL_SET_ERR_MSG_MOD(extack,
815 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
816 return -EINVAL;
817 }
818
819 mlx5_eswitch_disable_sriov(esw);
820 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 821 if (err) {
8c98ee77
EB
822 NL_SET_ERR_MSG_MOD(extack,
823 "Failed setting eswitch to offloads");
6c419ba8 824 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
825 if (err1) {
826 NL_SET_ERR_MSG_MOD(extack,
827 "Failed setting eswitch back to legacy");
828 }
6c419ba8 829 }
bffaa916
RD
830 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
831 if (mlx5_eswitch_inline_mode_get(esw,
832 num_vfs,
833 &esw->offloads.inline_mode)) {
834 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
835 NL_SET_ERR_MSG_MOD(extack,
836 "Inline mode is different between vports");
bffaa916
RD
837 }
838 }
c930a3ad
OG
839 return err;
840}
841
e8d31c4d
MB
842void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
843{
844 kfree(esw->offloads.vport_reps);
845}
846
847int esw_offloads_init_reps(struct mlx5_eswitch *esw)
848{
849 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
850 struct mlx5_core_dev *dev = esw->dev;
851 struct mlx5_esw_offload *offloads;
852 struct mlx5_eswitch_rep *rep;
853 u8 hw_id[ETH_ALEN];
854 int vport;
855
856 esw->offloads.vport_reps = kcalloc(total_vfs,
857 sizeof(struct mlx5_eswitch_rep),
858 GFP_KERNEL);
859 if (!esw->offloads.vport_reps)
860 return -ENOMEM;
861
862 offloads = &esw->offloads;
863 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
864
865 for (vport = 0; vport < total_vfs; vport++) {
866 rep = &offloads->vport_reps[vport];
867
868 rep->vport = vport;
869 ether_addr_copy(rep->hw_id, hw_id);
870 }
871
872 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
873
874 return 0;
875}
876
a4b97ab4
MB
877static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
878 u8 rep_type)
6ed1803a
MB
879{
880 struct mlx5_eswitch_rep *rep;
881 int vport;
882
883 for (vport = nvports - 1; vport >= 0; vport--) {
884 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 885 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
886 continue;
887
a4b97ab4 888 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
889 }
890}
891
a4b97ab4
MB
892static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
893{
894 u8 rep_type = NUM_REP_TYPES;
895
896 while (rep_type-- > 0)
897 esw_offloads_unload_reps_type(esw, nvports, rep_type);
898}
899
900static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
901 u8 rep_type)
c930a3ad 902{
cb67b832
HHZ
903 struct mlx5_eswitch_rep *rep;
904 int vport;
c930a3ad
OG
905 int err;
906
6ed1803a
MB
907 for (vport = 0; vport < nvports; vport++) {
908 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 909 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
910 continue;
911
a4b97ab4 912 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
913 if (err)
914 goto err_reps;
915 }
916
917 return 0;
918
919err_reps:
a4b97ab4
MB
920 esw_offloads_unload_reps_type(esw, vport, rep_type);
921 return err;
922}
923
924static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
925{
926 u8 rep_type = 0;
927 int err;
928
929 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
930 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
931 if (err)
932 goto err_reps;
933 }
934
935 return err;
936
937err_reps:
938 while (rep_type-- > 0)
939 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
940 return err;
941}
942
943int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
944{
945 int err;
946
1967ce6e 947 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 948 if (err)
c5447c70 949 return err;
c930a3ad
OG
950
951 err = esw_create_offloads_table(esw);
952 if (err)
953 goto create_ft_err;
954
955 err = esw_create_vport_rx_group(esw);
956 if (err)
957 goto create_fg_err;
958
6ed1803a
MB
959 err = esw_offloads_load_reps(esw, nvports);
960 if (err)
961 goto err_reps;
9da34cd3 962
c930a3ad
OG
963 return 0;
964
cb67b832 965err_reps:
cb67b832
HHZ
966 esw_destroy_vport_rx_group(esw);
967
c930a3ad
OG
968create_fg_err:
969 esw_destroy_offloads_table(esw);
970
971create_ft_err:
1967ce6e 972 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 973
c930a3ad
OG
974 return err;
975}
976
db7ff19e
EB
977static int esw_offloads_stop(struct mlx5_eswitch *esw,
978 struct netlink_ext_ack *extack)
c930a3ad 979{
6c419ba8 980 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
981
982 mlx5_eswitch_disable_sriov(esw);
983 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 984 if (err) {
8c98ee77 985 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 986 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
987 if (err1) {
988 NL_SET_ERR_MSG_MOD(extack,
989 "Failed setting eswitch back to offloads");
990 }
6c419ba8 991 }
c930a3ad 992
5bae8c03 993 /* enable back PF RoCE */
c5447c70 994 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5bae8c03 995
c930a3ad
OG
996 return err;
997}
998
999void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
1000{
6ed1803a 1001 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
1002 esw_destroy_vport_rx_group(esw);
1003 esw_destroy_offloads_table(esw);
1967ce6e 1004 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
1005}
1006
ef78618b 1007static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1008{
1009 switch (mode) {
1010 case DEVLINK_ESWITCH_MODE_LEGACY:
1011 *mlx5_mode = SRIOV_LEGACY;
1012 break;
1013 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1014 *mlx5_mode = SRIOV_OFFLOADS;
1015 break;
1016 default:
1017 return -EINVAL;
1018 }
1019
1020 return 0;
1021}
1022
ef78618b
OG
1023static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1024{
1025 switch (mlx5_mode) {
1026 case SRIOV_LEGACY:
1027 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1028 break;
1029 case SRIOV_OFFLOADS:
1030 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1031 break;
1032 default:
1033 return -EINVAL;
1034 }
1035
1036 return 0;
1037}
1038
bffaa916
RD
1039static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1040{
1041 switch (mode) {
1042 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1043 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1044 break;
1045 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1046 *mlx5_mode = MLX5_INLINE_MODE_L2;
1047 break;
1048 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1049 *mlx5_mode = MLX5_INLINE_MODE_IP;
1050 break;
1051 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1052 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1053 break;
1054 default:
1055 return -EINVAL;
1056 }
1057
1058 return 0;
1059}
1060
1061static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1062{
1063 switch (mlx5_mode) {
1064 case MLX5_INLINE_MODE_NONE:
1065 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1066 break;
1067 case MLX5_INLINE_MODE_L2:
1068 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1069 break;
1070 case MLX5_INLINE_MODE_IP:
1071 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1072 break;
1073 case MLX5_INLINE_MODE_TCP_UDP:
1074 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1075 break;
1076 default:
1077 return -EINVAL;
1078 }
1079
1080 return 0;
1081}
1082
9d1cef19 1083static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1084{
9d1cef19 1085 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1086
9d1cef19
OG
1087 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1088 return -EOPNOTSUPP;
c930a3ad 1089
733d3e54
OG
1090 if(!MLX5_ESWITCH_MANAGER(dev))
1091 return -EPERM;
c930a3ad 1092
9d1cef19 1093 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
1094 return -EOPNOTSUPP;
1095
9d1cef19
OG
1096 return 0;
1097}
1098
db7ff19e
EB
1099int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1100 struct netlink_ext_ack *extack)
9d1cef19
OG
1101{
1102 struct mlx5_core_dev *dev = devlink_priv(devlink);
1103 u16 cur_mlx5_mode, mlx5_mode = 0;
1104 int err;
1105
1106 err = mlx5_devlink_eswitch_check(devlink);
1107 if (err)
1108 return err;
1109
1110 cur_mlx5_mode = dev->priv.eswitch->mode;
1111
ef78618b 1112 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1113 return -EINVAL;
1114
1115 if (cur_mlx5_mode == mlx5_mode)
1116 return 0;
1117
1118 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 1119 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 1120 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 1121 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
1122 else
1123 return -EINVAL;
feae9087
OG
1124}
1125
1126int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1127{
9d1cef19
OG
1128 struct mlx5_core_dev *dev = devlink_priv(devlink);
1129 int err;
c930a3ad 1130
9d1cef19
OG
1131 err = mlx5_devlink_eswitch_check(devlink);
1132 if (err)
1133 return err;
c930a3ad 1134
ef78618b 1135 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1136}
127ea380 1137
db7ff19e
EB
1138int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1139 struct netlink_ext_ack *extack)
bffaa916
RD
1140{
1141 struct mlx5_core_dev *dev = devlink_priv(devlink);
1142 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1143 int err, vport;
bffaa916
RD
1144 u8 mlx5_mode;
1145
9d1cef19
OG
1146 err = mlx5_devlink_eswitch_check(devlink);
1147 if (err)
1148 return err;
bffaa916 1149
c415f704
OG
1150 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1151 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1152 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1153 return 0;
1154 /* fall through */
1155 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 1156 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 1157 return -EOPNOTSUPP;
c415f704
OG
1158 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1159 break;
1160 }
bffaa916 1161
375f51e2 1162 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1163 NL_SET_ERR_MSG_MOD(extack,
1164 "Can't set inline mode when flows are configured");
375f51e2
RD
1165 return -EOPNOTSUPP;
1166 }
1167
bffaa916
RD
1168 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1169 if (err)
1170 goto out;
1171
9d1cef19 1172 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1173 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1174 if (err) {
8c98ee77
EB
1175 NL_SET_ERR_MSG_MOD(extack,
1176 "Failed to set min inline on vport");
bffaa916
RD
1177 goto revert_inline_mode;
1178 }
1179 }
1180
1181 esw->offloads.inline_mode = mlx5_mode;
1182 return 0;
1183
1184revert_inline_mode:
1185 while (--vport > 0)
1186 mlx5_modify_nic_vport_min_inline(dev,
1187 vport,
1188 esw->offloads.inline_mode);
1189out:
1190 return err;
1191}
1192
1193int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1194{
1195 struct mlx5_core_dev *dev = devlink_priv(devlink);
1196 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1197 int err;
bffaa916 1198
9d1cef19
OG
1199 err = mlx5_devlink_eswitch_check(devlink);
1200 if (err)
1201 return err;
bffaa916 1202
bffaa916
RD
1203 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1204}
1205
1206int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1207{
c415f704 1208 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1209 struct mlx5_core_dev *dev = esw->dev;
1210 int vport;
bffaa916
RD
1211
1212 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1213 return -EOPNOTSUPP;
1214
1215 if (esw->mode == SRIOV_NONE)
1216 return -EOPNOTSUPP;
1217
c415f704
OG
1218 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1219 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1220 mlx5_mode = MLX5_INLINE_MODE_NONE;
1221 goto out;
1222 case MLX5_CAP_INLINE_MODE_L2:
1223 mlx5_mode = MLX5_INLINE_MODE_L2;
1224 goto out;
1225 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1226 goto query_vports;
1227 }
bffaa916 1228
c415f704 1229query_vports:
bffaa916
RD
1230 for (vport = 1; vport <= nvfs; vport++) {
1231 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1232 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1233 return -EINVAL;
1234 prev_mlx5_mode = mlx5_mode;
1235 }
1236
c415f704 1237out:
bffaa916
RD
1238 *mode = mlx5_mode;
1239 return 0;
1240}
1241
db7ff19e
EB
1242int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
1243 struct netlink_ext_ack *extack)
7768d197
RD
1244{
1245 struct mlx5_core_dev *dev = devlink_priv(devlink);
1246 struct mlx5_eswitch *esw = dev->priv.eswitch;
1247 int err;
1248
9d1cef19
OG
1249 err = mlx5_devlink_eswitch_check(devlink);
1250 if (err)
1251 return err;
7768d197
RD
1252
1253 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 1254 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
1255 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1256 return -EOPNOTSUPP;
1257
1258 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1259 return -EOPNOTSUPP;
1260
1261 if (esw->mode == SRIOV_LEGACY) {
1262 esw->offloads.encap = encap;
1263 return 0;
1264 }
1265
1266 if (esw->offloads.encap == encap)
1267 return 0;
1268
1269 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1270 NL_SET_ERR_MSG_MOD(extack,
1271 "Can't set encapsulation when flows are configured");
7768d197
RD
1272 return -EOPNOTSUPP;
1273 }
1274
1275 esw_destroy_offloads_fast_fdb_table(esw);
1276
1277 esw->offloads.encap = encap;
1278 err = esw_create_offloads_fast_fdb_table(esw);
1279 if (err) {
8c98ee77
EB
1280 NL_SET_ERR_MSG_MOD(extack,
1281 "Failed re-creating fast FDB table");
7768d197 1282 esw->offloads.encap = !encap;
2fe30e23 1283 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1284 }
1285 return err;
1286}
1287
1288int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1289{
1290 struct mlx5_core_dev *dev = devlink_priv(devlink);
1291 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1292 int err;
7768d197 1293
9d1cef19
OG
1294 err = mlx5_devlink_eswitch_check(devlink);
1295 if (err)
1296 return err;
7768d197
RD
1297
1298 *encap = esw->offloads.encap;
1299 return 0;
1300}
1301
127ea380 1302void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1303 int vport_index,
a4b97ab4
MB
1304 struct mlx5_eswitch_rep_if *__rep_if,
1305 u8 rep_type)
127ea380
HHZ
1306{
1307 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1308 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1309
a4b97ab4 1310 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1311
a4b97ab4
MB
1312 rep_if->load = __rep_if->load;
1313 rep_if->unload = __rep_if->unload;
22215908 1314 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1315 rep_if->priv = __rep_if->priv;
127ea380 1316
a4b97ab4 1317 rep_if->valid = true;
127ea380 1318}
57cbd893 1319EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1320
1321void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1322 int vport_index, u8 rep_type)
127ea380
HHZ
1323{
1324 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1325 struct mlx5_eswitch_rep *rep;
1326
9deb2241 1327 rep = &offloads->vport_reps[vport_index];
cb67b832 1328
9deb2241 1329 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1330 rep->rep_if[rep_type].unload(rep);
127ea380 1331
a4b97ab4 1332 rep->rep_if[rep_type].valid = false;
127ea380 1333}
57cbd893 1334EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1335
a4b97ab4 1336void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1337{
1338#define UPLINK_REP_INDEX 0
1339 struct mlx5_esw_offload *offloads = &esw->offloads;
1340 struct mlx5_eswitch_rep *rep;
1341
1342 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1343 return rep->rep_if[rep_type].priv;
726293f1 1344}
22215908
MB
1345
1346void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1347 int vport,
1348 u8 rep_type)
1349{
1350 struct mlx5_esw_offload *offloads = &esw->offloads;
1351 struct mlx5_eswitch_rep *rep;
1352
1353 if (vport == FDB_UPLINK_VPORT)
1354 vport = UPLINK_REP_INDEX;
1355
1356 rep = &offloads->vport_reps[vport];
1357
1358 if (rep->rep_if[rep_type].valid &&
1359 rep->rep_if[rep_type].get_proto_dev)
1360 return rep->rep_if[rep_type].get_proto_dev(rep);
1361 return NULL;
1362}
57cbd893 1363EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1364
1365void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1366{
1367 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1368}
57cbd893
MB
1369EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1370
1371struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1372 int vport)
1373{
1374 return &esw->offloads.vport_reps[vport];
1375}
1376EXPORT_SYMBOL(mlx5_eswitch_vport_rep);