]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
{IB,net}/mlx5: Constify rep ops functions pointers
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33 44#include "lib/eq.h"
69697b6e 45
cd7e4186
BW
46/* There are two match-all miss flows, one for unicast dst mac and
47 * one for multicast.
48 */
49#define MLX5_ESW_MISS_FLOWS (2)
50
e52c2802
PB
51#define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
53
c9b99abc
BW
54#define UPLINK_REP_INDEX 0
55
879c8f84
BW
56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
57 u16 vport_num)
58{
5ae51620 59 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
60
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
63}
64
e52c2802
PB
65static struct mlx5_flow_table *
66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
67static void
68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69
70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
71{
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
73}
74
75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
76{
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
78 return FDB_MAX_CHAIN;
79
80 return 0;
81}
82
83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
84{
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
86 return FDB_MAX_PRIO;
87
bf07aa73 88 return 1;
e52c2802
PB
89}
90
74491de9 91struct mlx5_flow_handle *
3d80d1a2
OG
92mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
776b12b6 94 struct mlx5_esw_flow_attr *attr)
3d80d1a2 95{
592d3651 96 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 97 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 98 bool split = !!(attr->split_count);
74491de9 99 struct mlx5_flow_handle *rule;
e52c2802 100 struct mlx5_flow_table *fdb;
592d3651 101 int j, i = 0;
3d80d1a2
OG
102 void *misc;
103
104 if (esw->mode != SRIOV_OFFLOADS)
105 return ERR_PTR(-EOPNOTSUPP);
106
6acfbf38
OG
107 flow_act.action = attr->action;
108 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 109 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
110 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
111 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
112 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
113 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
114 flow_act.vlan[0].vid = attr->vlan_vid[0];
115 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
116 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
117 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
118 flow_act.vlan[1].vid = attr->vlan_vid[1];
119 flow_act.vlan[1].prio = attr->vlan_prio[1];
120 }
6acfbf38 121 }
776b12b6 122
66958ed9 123 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
124 if (attr->dest_chain) {
125 struct mlx5_flow_table *ft;
126
127 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
128 if (IS_ERR(ft)) {
129 rule = ERR_CAST(ft);
130 goto err_create_goto_table;
131 }
132
133 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
134 dest[i].ft = ft;
592d3651 135 i++;
e52c2802 136 } else {
e85e02ba 137 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 138 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 139 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 140 dest[i].vport.vhca_id =
df65a573 141 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
142 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
143 dest[i].vport.flags |=
144 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
145 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
146 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
8c4dc42b 147 flow_act.reformat_id = attr->dests[j].encap_id;
a18e879d 148 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b
EB
149 dest[i].vport.reformat_id =
150 attr->dests[j].encap_id;
f493f155 151 }
e52c2802
PB
152 i++;
153 }
56e858df 154 }
e37a79e5 155 }
66958ed9 156 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 157 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 158 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 159 i++;
3d80d1a2
OG
160 }
161
162 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 163 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 164
10ff5359
SK
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id,
168 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
169
3d80d1a2
OG
170 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
171 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
172 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
173 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
174 source_eswitch_owner_vhca_id);
3d80d1a2 175
6363651d
OG
176 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
177 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
178 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
179 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
180 if (attr->match_level != MLX5_MATCH_NONE)
181 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
182 } else if (attr->match_level != MLX5_MATCH_NONE) {
183 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
184 }
3d80d1a2 185
aa24670e 186 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
187 flow_act.modify_id = attr->mod_hdr_id;
188
e85e02ba 189 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
190 if (IS_ERR(fdb)) {
191 rule = ERR_CAST(fdb);
192 goto err_esw_get;
193 }
194
195 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 196 if (IS_ERR(rule))
e52c2802 197 goto err_add_rule;
375f51e2
RD
198 else
199 esw->offloads.num_flows++;
3d80d1a2 200
e52c2802
PB
201 return rule;
202
203err_add_rule:
e85e02ba 204 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
205err_esw_get:
206 if (attr->dest_chain)
207 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
208err_create_goto_table:
aa0cbbae 209 return rule;
3d80d1a2
OG
210}
211
e4ad91f2
CM
212struct mlx5_flow_handle *
213mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
214 struct mlx5_flow_spec *spec,
215 struct mlx5_esw_flow_attr *attr)
216{
217 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 218 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
219 struct mlx5_flow_table *fast_fdb;
220 struct mlx5_flow_table *fwd_fdb;
e4ad91f2
CM
221 struct mlx5_flow_handle *rule;
222 void *misc;
223 int i;
224
e52c2802
PB
225 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
226 if (IS_ERR(fast_fdb)) {
227 rule = ERR_CAST(fast_fdb);
228 goto err_get_fast;
229 }
230
231 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
232 if (IS_ERR(fwd_fdb)) {
233 rule = ERR_CAST(fwd_fdb);
234 goto err_get_fwd;
235 }
236
e4ad91f2 237 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 238 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 239 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 240 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 241 dest[i].vport.vhca_id =
df65a573 242 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
243 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
244 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
245 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
246 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b 247 dest[i].vport.reformat_id = attr->dests[i].encap_id;
1cc26d74 248 }
e4ad91f2
CM
249 }
250 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 251 dest[i].ft = fwd_fdb,
e4ad91f2
CM
252 i++;
253
254 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
255 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
256
257 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
258 MLX5_SET(fte_match_set_misc, misc,
259 source_eswitch_owner_vhca_id,
260 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
261
262 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
263 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
264 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
265 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
266 source_eswitch_owner_vhca_id);
267
268 if (attr->match_level == MLX5_MATCH_NONE)
269 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
270 else
271 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
272 MLX5_MATCH_MISC_PARAMETERS;
273
e52c2802 274 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 275
e52c2802
PB
276 if (IS_ERR(rule))
277 goto add_err;
e4ad91f2 278
e52c2802
PB
279 esw->offloads.num_flows++;
280
281 return rule;
282add_err:
283 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
284err_get_fwd:
285 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
286err_get_fast:
e4ad91f2
CM
287 return rule;
288}
289
e52c2802
PB
290static void
291__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
292 struct mlx5_flow_handle *rule,
293 struct mlx5_esw_flow_attr *attr,
294 bool fwd_rule)
295{
e85e02ba 296 bool split = (attr->split_count > 0);
e52c2802
PB
297
298 mlx5_del_flow_rules(rule);
299 esw->offloads.num_flows--;
300
301 if (fwd_rule) {
302 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
303 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
304 } else {
e85e02ba 305 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
306 if (attr->dest_chain)
307 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
308 }
309}
310
d85cdccb
OG
311void
312mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
313 struct mlx5_flow_handle *rule,
314 struct mlx5_esw_flow_attr *attr)
315{
e52c2802 316 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
317}
318
48265006
OG
319void
320mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
321 struct mlx5_flow_handle *rule,
322 struct mlx5_esw_flow_attr *attr)
323{
e52c2802 324 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
325}
326
f5f82476
OG
327static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
328{
329 struct mlx5_eswitch_rep *rep;
330 int vf_vport, err = 0;
331
332 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
333 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
334 rep = &esw->offloads.vport_reps[vf_vport];
8693115a 335 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
336 continue;
337
338 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
339 if (err)
340 goto out;
341 }
342
343out:
344 return err;
345}
346
347static struct mlx5_eswitch_rep *
348esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
349{
350 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
351
352 in_rep = attr->in_rep;
df65a573 353 out_rep = attr->dests[0].rep;
f5f82476
OG
354
355 if (push)
356 vport = in_rep;
357 else if (pop)
358 vport = out_rep;
359 else
360 vport = in_rep;
361
362 return vport;
363}
364
365static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
366 bool push, bool pop, bool fwd)
367{
368 struct mlx5_eswitch_rep *in_rep, *out_rep;
369
370 if ((push || pop) && !fwd)
371 goto out_notsupp;
372
373 in_rep = attr->in_rep;
df65a573 374 out_rep = attr->dests[0].rep;
f5f82476 375
b05af6aa 376 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
377 goto out_notsupp;
378
b05af6aa 379 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
380 goto out_notsupp;
381
382 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
383 if (!push && !pop && fwd)
b05af6aa 384 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
385 goto out_notsupp;
386
387 /* protects against (1) setting rules with different vlans to push and
388 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
389 */
1482bd3d 390 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
391 goto out_notsupp;
392
393 return 0;
394
395out_notsupp:
9eb78923 396 return -EOPNOTSUPP;
f5f82476
OG
397}
398
399int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
400 struct mlx5_esw_flow_attr *attr)
401{
402 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
403 struct mlx5_eswitch_rep *vport = NULL;
404 bool push, pop, fwd;
405 int err = 0;
406
6acfbf38 407 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 408 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
409 return 0;
410
f5f82476
OG
411 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
412 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
413 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
414 !attr->dest_chain);
f5f82476
OG
415
416 err = esw_add_vlan_action_check(attr, push, pop, fwd);
417 if (err)
418 return err;
419
420 attr->vlan_handled = false;
421
422 vport = esw_vlan_action_get_vport(attr, push, pop);
423
424 if (!push && !pop && fwd) {
425 /* tracks VF --> wire rules without vlan push action */
b05af6aa 426 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
427 vport->vlan_refcount++;
428 attr->vlan_handled = true;
429 }
430
431 return 0;
432 }
433
434 if (!push && !pop)
435 return 0;
436
437 if (!(offloads->vlan_push_pop_refcount)) {
438 /* it's the 1st vlan rule, apply global vlan pop policy */
439 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
440 if (err)
441 goto out;
442 }
443 offloads->vlan_push_pop_refcount++;
444
445 if (push) {
446 if (vport->vlan_refcount)
447 goto skip_set_push;
448
1482bd3d 449 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
450 SET_VLAN_INSERT | SET_VLAN_STRIP);
451 if (err)
452 goto out;
1482bd3d 453 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
454skip_set_push:
455 vport->vlan_refcount++;
456 }
457out:
458 if (!err)
459 attr->vlan_handled = true;
460 return err;
461}
462
463int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
464 struct mlx5_esw_flow_attr *attr)
465{
466 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
467 struct mlx5_eswitch_rep *vport = NULL;
468 bool push, pop, fwd;
469 int err = 0;
470
6acfbf38 471 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 472 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
473 return 0;
474
f5f82476
OG
475 if (!attr->vlan_handled)
476 return 0;
477
478 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
479 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
480 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
481
482 vport = esw_vlan_action_get_vport(attr, push, pop);
483
484 if (!push && !pop && fwd) {
485 /* tracks VF --> wire rules without vlan push action */
b05af6aa 486 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
487 vport->vlan_refcount--;
488
489 return 0;
490 }
491
492 if (push) {
493 vport->vlan_refcount--;
494 if (vport->vlan_refcount)
495 goto skip_unset_push;
496
497 vport->vlan = 0;
498 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
499 0, 0, SET_VLAN_STRIP);
500 if (err)
501 goto out;
502 }
503
504skip_unset_push:
505 offloads->vlan_push_pop_refcount--;
506 if (offloads->vlan_push_pop_refcount)
507 return 0;
508
509 /* no more vlan rules, stop global vlan pop policy */
510 err = esw_set_global_vlan_pop(esw, 0);
511
512out:
513 return err;
514}
515
f7a68945 516struct mlx5_flow_handle *
ab22be9b
OG
517mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
518{
66958ed9 519 struct mlx5_flow_act flow_act = {0};
4c5009c5 520 struct mlx5_flow_destination dest = {};
74491de9 521 struct mlx5_flow_handle *flow_rule;
c5bb1730 522 struct mlx5_flow_spec *spec;
ab22be9b
OG
523 void *misc;
524
1b9a07ee 525 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 526 if (!spec) {
ab22be9b
OG
527 flow_rule = ERR_PTR(-ENOMEM);
528 goto out;
529 }
530
c5bb1730 531 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 532 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
533 /* source vport is the esw manager */
534 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 535
c5bb1730 536 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
537 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
538 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
539
c5bb1730 540 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 541 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 542 dest.vport.num = vport;
66958ed9 543 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 544
52fff327 545 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 546 &flow_act, &dest, 1);
ab22be9b
OG
547 if (IS_ERR(flow_rule))
548 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
549out:
c5bb1730 550 kvfree(spec);
ab22be9b
OG
551 return flow_rule;
552}
57cbd893 553EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 554
159fe639
MB
555void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
556{
557 mlx5_del_flow_rules(rule);
558}
559
ac004b83
RD
560static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
561 struct mlx5_flow_spec *spec,
562 struct mlx5_flow_destination *dest)
563{
564 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
565 misc_parameters);
566
567 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
568 MLX5_CAP_GEN(peer_dev, vhca_id));
569
570 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
571
572 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
573 misc_parameters);
574 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
575 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
576 source_eswitch_owner_vhca_id);
577
578 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 579 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 580 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 581 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
582}
583
584static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
585 struct mlx5_core_dev *peer_dev)
586{
587 struct mlx5_flow_destination dest = {};
588 struct mlx5_flow_act flow_act = {0};
589 struct mlx5_flow_handle **flows;
590 struct mlx5_flow_handle *flow;
591 struct mlx5_flow_spec *spec;
592 /* total vports is the same for both e-switches */
593 int nvports = esw->total_vports;
594 void *misc;
595 int err, i;
596
597 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
598 if (!spec)
599 return -ENOMEM;
600
601 peer_miss_rules_setup(peer_dev, spec, &dest);
602
603 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
604 if (!flows) {
605 err = -ENOMEM;
606 goto alloc_flows_err;
607 }
608
609 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
610 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
611 misc_parameters);
612
81cd229c
BW
613 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
614 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
615 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
616 spec, &flow_act, &dest, 1);
617 if (IS_ERR(flow)) {
618 err = PTR_ERR(flow);
619 goto add_pf_flow_err;
620 }
621 flows[MLX5_VPORT_PF] = flow;
622 }
623
624 if (mlx5_ecpf_vport_exists(esw->dev)) {
625 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
626 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
627 spec, &flow_act, &dest, 1);
628 if (IS_ERR(flow)) {
629 err = PTR_ERR(flow);
630 goto add_ecpf_flow_err;
631 }
632 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
633 }
634
786ef904 635 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
ac004b83
RD
636 MLX5_SET(fte_match_set_misc, misc, source_port, i);
637 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
638 spec, &flow_act, &dest, 1);
639 if (IS_ERR(flow)) {
640 err = PTR_ERR(flow);
81cd229c 641 goto add_vf_flow_err;
ac004b83
RD
642 }
643 flows[i] = flow;
644 }
645
646 esw->fdb_table.offloads.peer_miss_rules = flows;
647
648 kvfree(spec);
649 return 0;
650
81cd229c 651add_vf_flow_err:
879c8f84 652 nvports = --i;
786ef904 653 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 654 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
655
656 if (mlx5_ecpf_vport_exists(esw->dev))
657 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
658add_ecpf_flow_err:
659 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
660 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
661add_pf_flow_err:
662 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
663 kvfree(flows);
664alloc_flows_err:
665 kvfree(spec);
666 return err;
667}
668
669static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
670{
671 struct mlx5_flow_handle **flows;
672 int i;
673
674 flows = esw->fdb_table.offloads.peer_miss_rules;
675
786ef904
PP
676 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
677 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
678 mlx5_del_flow_rules(flows[i]);
679
81cd229c
BW
680 if (mlx5_ecpf_vport_exists(esw->dev))
681 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
682
683 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
684 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
685
ac004b83
RD
686 kvfree(flows);
687}
688
3aa33572
OG
689static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
690{
66958ed9 691 struct mlx5_flow_act flow_act = {0};
4c5009c5 692 struct mlx5_flow_destination dest = {};
74491de9 693 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 694 struct mlx5_flow_spec *spec;
f80be543
MB
695 void *headers_c;
696 void *headers_v;
3aa33572 697 int err = 0;
f80be543
MB
698 u8 *dmac_c;
699 u8 *dmac_v;
3aa33572 700
1b9a07ee 701 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 702 if (!spec) {
3aa33572
OG
703 err = -ENOMEM;
704 goto out;
705 }
706
f80be543
MB
707 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
708 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
709 outer_headers);
710 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
711 outer_headers.dmac_47_16);
712 dmac_c[0] = 0x01;
713
3aa33572 714 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 715 dest.vport.num = esw->manager_vport;
66958ed9 716 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 717
52fff327 718 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 719 &flow_act, &dest, 1);
3aa33572
OG
720 if (IS_ERR(flow_rule)) {
721 err = PTR_ERR(flow_rule);
f80be543 722 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
723 goto out;
724 }
725
f80be543
MB
726 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
727
728 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
729 outer_headers);
730 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
731 outer_headers.dmac_47_16);
732 dmac_v[0] = 0x01;
52fff327 733 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
734 &flow_act, &dest, 1);
735 if (IS_ERR(flow_rule)) {
736 err = PTR_ERR(flow_rule);
737 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
738 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
739 goto out;
740 }
741
742 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
743
3aa33572 744out:
c5bb1730 745 kvfree(spec);
3aa33572
OG
746 return err;
747}
748
1033665e 749#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 750
e52c2802
PB
751/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
752 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
753 * for each flow table pool. We can allocate up to 16M of each pool,
754 * and we keep track of how much we used via put/get_sz_to_pool.
755 * Firmware doesn't report any of this for now.
756 * ESW_POOL is expected to be sorted from large to small
757 */
758#define ESW_SIZE (16 * 1024 * 1024)
759const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
760 64 * 1024, 4 * 1024 };
761
762static int
763get_sz_from_pool(struct mlx5_eswitch *esw)
764{
765 int sz = 0, i;
766
767 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
768 if (esw->fdb_table.offloads.fdb_left[i]) {
769 --esw->fdb_table.offloads.fdb_left[i];
770 sz = ESW_POOLS[i];
771 break;
772 }
773 }
774
775 return sz;
776}
777
778static void
779put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
780{
781 int i;
782
783 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
784 if (sz >= ESW_POOLS[i]) {
785 ++esw->fdb_table.offloads.fdb_left[i];
786 break;
787 }
788 }
789}
790
791static struct mlx5_flow_table *
792create_next_size_table(struct mlx5_eswitch *esw,
793 struct mlx5_flow_namespace *ns,
794 u16 table_prio,
795 int level,
796 u32 flags)
797{
798 struct mlx5_flow_table *fdb;
799 int sz;
800
801 sz = get_sz_from_pool(esw);
802 if (!sz)
803 return ERR_PTR(-ENOSPC);
804
805 fdb = mlx5_create_auto_grouped_flow_table(ns,
806 table_prio,
807 sz,
808 ESW_OFFLOADS_NUM_GROUPS,
809 level,
810 flags);
811 if (IS_ERR(fdb)) {
812 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
813 (int)PTR_ERR(fdb), table_prio, level, sz);
814 put_sz_to_pool(esw, sz);
815 }
816
817 return fdb;
818}
819
820static struct mlx5_flow_table *
821esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 822{
69697b6e 823 struct mlx5_core_dev *dev = esw->dev;
69697b6e 824 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
825 struct mlx5_flow_namespace *ns;
826 int table_prio, l = 0;
bbd00f7e 827 u32 flags = 0;
69697b6e 828
c92a0b94
PB
829 if (chain == FDB_SLOW_PATH_CHAIN)
830 return esw->fdb_table.offloads.slow_fdb;
831
e52c2802 832 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 833
e52c2802
PB
834 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
835 if (fdb) {
836 /* take ref on earlier levels as well */
837 while (level >= 0)
838 fdb_prio_table(esw, chain, prio, level--).num_rules++;
839 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
840 return fdb;
841 }
69697b6e 842
e52c2802
PB
843 ns = mlx5_get_fdb_sub_ns(dev, chain);
844 if (!ns) {
845 esw_warn(dev, "Failed to get FDB sub namespace\n");
846 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
847 return ERR_PTR(-EOPNOTSUPP);
848 }
a842dd04 849
7768d197 850 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 851 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 852 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 853
e52c2802 854 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 855
e52c2802
PB
856 /* create earlier levels for correct fs_core lookup when
857 * connecting tables
858 */
859 for (l = 0; l <= level; l++) {
860 if (fdb_prio_table(esw, chain, prio, l).fdb) {
861 fdb_prio_table(esw, chain, prio, l).num_rules++;
862 continue;
863 }
a842dd04 864
e52c2802
PB
865 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
866 if (IS_ERR(fdb)) {
867 l--;
868 goto err_create_fdb;
869 }
870
871 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
872 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 873 }
a842dd04 874
e52c2802
PB
875 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
876 return fdb;
a842dd04 877
e52c2802
PB
878err_create_fdb:
879 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
880 if (l >= 0)
881 esw_put_prio_table(esw, chain, prio, l);
882
883 return fdb;
1967ce6e
OG
884}
885
e52c2802
PB
886static void
887esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 888{
e52c2802
PB
889 int l;
890
c92a0b94
PB
891 if (chain == FDB_SLOW_PATH_CHAIN)
892 return;
893
e52c2802
PB
894 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
895
896 for (l = level; l >= 0; l--) {
897 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
898 continue;
899
900 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
901 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
902 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
903 }
904
905 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
906}
907
908static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
909{
910 /* If lazy creation isn't supported, deref the fast path tables */
911 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
912 esw_put_prio_table(esw, 0, 1, 1);
913 esw_put_prio_table(esw, 0, 1, 0);
914 }
1967ce6e
OG
915}
916
917#define MAX_PF_SQ 256
cd3d07e7 918#define MAX_SQ_NVPORTS 32
1967ce6e
OG
919
920static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
921{
922 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
923 struct mlx5_flow_table_attr ft_attr = {};
924 struct mlx5_core_dev *dev = esw->dev;
e52c2802 925 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
926 struct mlx5_flow_namespace *root_ns;
927 struct mlx5_flow_table *fdb = NULL;
e52c2802 928 int table_size, ix, err = 0, i;
1967ce6e 929 struct mlx5_flow_group *g;
e52c2802 930 u32 flags = 0, fdb_max;
1967ce6e 931 void *match_criteria;
f80be543 932 u8 *dmac;
1967ce6e
OG
933
934 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 935 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
936 if (!flow_group_in)
937 return -ENOMEM;
938
939 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
940 if (!root_ns) {
941 esw_warn(dev, "Failed to get FDB flow namespace\n");
942 err = -EOPNOTSUPP;
943 goto ns_err;
944 }
945
e52c2802
PB
946 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
947 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
948 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
949
950 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
951 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
952 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
953 fdb_max);
954
955 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
956 esw->fdb_table.offloads.fdb_left[i] =
957 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 958
cd7e4186
BW
959 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
960 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 961
e52c2802
PB
962 /* create the slow path fdb with encap set, so further table instances
963 * can be created at run time while VFs are probed if the FW allows that.
964 */
965 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
966 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
967 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
968
969 ft_attr.flags = flags;
b3ba5149
ES
970 ft_attr.max_fte = table_size;
971 ft_attr.prio = FDB_SLOW_PATH;
972
973 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
974 if (IS_ERR(fdb)) {
975 err = PTR_ERR(fdb);
976 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
977 goto slow_fdb_err;
978 }
52fff327 979 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 980
e52c2802
PB
981 /* If lazy creation isn't supported, open the fast path tables now */
982 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
983 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
984 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
985 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
986 esw_get_prio_table(esw, 0, 1, 0);
987 esw_get_prio_table(esw, 0, 1, 1);
988 } else {
989 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
990 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
991 }
992
69697b6e
OG
993 /* create send-to-vport group */
994 memset(flow_group_in, 0, inlen);
995 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
996 MLX5_MATCH_MISC_PARAMETERS);
997
998 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
999
1000 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1001 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1002
cd3d07e7 1003 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1004 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1005 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1006
1007 g = mlx5_create_flow_group(fdb, flow_group_in);
1008 if (IS_ERR(g)) {
1009 err = PTR_ERR(g);
1010 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1011 goto send_vport_err;
1012 }
1013 esw->fdb_table.offloads.send_to_vport_grp = g;
1014
ac004b83
RD
1015 /* create peer esw miss group */
1016 memset(flow_group_in, 0, inlen);
1017 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1018 MLX5_MATCH_MISC_PARAMETERS);
1019
1020 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1021 match_criteria);
1022
1023 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1024 misc_parameters.source_port);
1025 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1026 misc_parameters.source_eswitch_owner_vhca_id);
1027
1028 MLX5_SET(create_flow_group_in, flow_group_in,
1029 source_eswitch_owner_vhca_id_valid, 1);
1030 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1031 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1032 ix + esw->total_vports - 1);
1033 ix += esw->total_vports;
1034
1035 g = mlx5_create_flow_group(fdb, flow_group_in);
1036 if (IS_ERR(g)) {
1037 err = PTR_ERR(g);
1038 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1039 goto peer_miss_err;
1040 }
1041 esw->fdb_table.offloads.peer_miss_grp = g;
1042
69697b6e
OG
1043 /* create miss group */
1044 memset(flow_group_in, 0, inlen);
f80be543
MB
1045 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1046 MLX5_MATCH_OUTER_HEADERS);
1047 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1048 match_criteria);
1049 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1050 outer_headers.dmac_47_16);
1051 dmac[0] = 0x01;
69697b6e
OG
1052
1053 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1054 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1055 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1056
1057 g = mlx5_create_flow_group(fdb, flow_group_in);
1058 if (IS_ERR(g)) {
1059 err = PTR_ERR(g);
1060 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1061 goto miss_err;
1062 }
1063 esw->fdb_table.offloads.miss_grp = g;
1064
3aa33572
OG
1065 err = esw_add_fdb_miss_rule(esw);
1066 if (err)
1067 goto miss_rule_err;
1068
e52c2802 1069 esw->nvports = nvports;
c88a026e 1070 kvfree(flow_group_in);
69697b6e
OG
1071 return 0;
1072
3aa33572
OG
1073miss_rule_err:
1074 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1075miss_err:
ac004b83
RD
1076 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1077peer_miss_err:
69697b6e
OG
1078 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1079send_vport_err:
e52c2802 1080 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1081 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1082slow_fdb_err:
69697b6e
OG
1083ns_err:
1084 kvfree(flow_group_in);
1085 return err;
1086}
1087
1967ce6e 1088static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1089{
e52c2802 1090 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1091 return;
1092
1967ce6e 1093 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1094 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1095 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1096 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1097 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1098 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1099
52fff327 1100 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1101 esw_destroy_offloads_fast_fdb_tables(esw);
69697b6e 1102}
c116c6ee 1103
cd7e4186 1104static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1105{
b3ba5149 1106 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1107 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1108 struct mlx5_flow_table *ft_offloads;
1109 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1110 int err = 0;
1111
1112 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1113 if (!ns) {
1114 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1115 return -EOPNOTSUPP;
c116c6ee
OG
1116 }
1117
cd7e4186 1118 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1119
1120 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1121 if (IS_ERR(ft_offloads)) {
1122 err = PTR_ERR(ft_offloads);
1123 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1124 return err;
1125 }
1126
1127 esw->offloads.ft_offloads = ft_offloads;
1128 return 0;
1129}
1130
1131static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1132{
1133 struct mlx5_esw_offload *offloads = &esw->offloads;
1134
1135 mlx5_destroy_flow_table(offloads->ft_offloads);
1136}
fed9ce22 1137
cd7e4186 1138static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1139{
1140 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1141 struct mlx5_flow_group *g;
fed9ce22
OG
1142 u32 *flow_group_in;
1143 void *match_criteria, *misc;
1144 int err = 0;
fed9ce22 1145
cd7e4186 1146 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1147 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1148 if (!flow_group_in)
1149 return -ENOMEM;
1150
1151 /* create vport rx group */
1152 memset(flow_group_in, 0, inlen);
1153 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1154 MLX5_MATCH_MISC_PARAMETERS);
1155
1156 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1157 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1158 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1159
1160 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1161 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1162
1163 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1164
1165 if (IS_ERR(g)) {
1166 err = PTR_ERR(g);
1167 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1168 goto out;
1169 }
1170
1171 esw->offloads.vport_rx_group = g;
1172out:
e574978a 1173 kvfree(flow_group_in);
fed9ce22
OG
1174 return err;
1175}
1176
1177static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1178{
1179 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1180}
1181
74491de9 1182struct mlx5_flow_handle *
c966f7d5
GT
1183mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1184 struct mlx5_flow_destination *dest)
fed9ce22 1185{
66958ed9 1186 struct mlx5_flow_act flow_act = {0};
74491de9 1187 struct mlx5_flow_handle *flow_rule;
c5bb1730 1188 struct mlx5_flow_spec *spec;
fed9ce22
OG
1189 void *misc;
1190
1b9a07ee 1191 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1192 if (!spec) {
fed9ce22
OG
1193 flow_rule = ERR_PTR(-ENOMEM);
1194 goto out;
1195 }
1196
c5bb1730 1197 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
1198 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1199
c5bb1730 1200 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
1201 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1202
c5bb1730 1203 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 1204
66958ed9 1205 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1206 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1207 &flow_act, dest, 1);
fed9ce22
OG
1208 if (IS_ERR(flow_rule)) {
1209 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1210 goto out;
1211 }
1212
1213out:
c5bb1730 1214 kvfree(spec);
fed9ce22
OG
1215 return flow_rule;
1216}
feae9087 1217
db7ff19e
EB
1218static int esw_offloads_start(struct mlx5_eswitch *esw,
1219 struct netlink_ext_ack *extack)
c930a3ad 1220{
6c419ba8 1221 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad 1222
c96692fb
BW
1223 if (esw->mode != SRIOV_LEGACY &&
1224 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1225 NL_SET_ERR_MSG_MOD(extack,
1226 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1227 return -EINVAL;
1228 }
1229
1230 mlx5_eswitch_disable_sriov(esw);
1231 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 1232 if (err) {
8c98ee77
EB
1233 NL_SET_ERR_MSG_MOD(extack,
1234 "Failed setting eswitch to offloads");
6c419ba8 1235 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
1236 if (err1) {
1237 NL_SET_ERR_MSG_MOD(extack,
1238 "Failed setting eswitch back to legacy");
1239 }
6c419ba8 1240 }
bffaa916
RD
1241 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1242 if (mlx5_eswitch_inline_mode_get(esw,
1243 num_vfs,
1244 &esw->offloads.inline_mode)) {
1245 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1246 NL_SET_ERR_MSG_MOD(extack,
1247 "Inline mode is different between vports");
bffaa916
RD
1248 }
1249 }
c930a3ad
OG
1250 return err;
1251}
1252
e8d31c4d
MB
1253void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1254{
1255 kfree(esw->offloads.vport_reps);
1256}
1257
1258int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1259{
2aca1787 1260 int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
e8d31c4d 1261 struct mlx5_core_dev *dev = esw->dev;
e8d31c4d 1262 struct mlx5_eswitch_rep *rep;
f121e0ea 1263 u8 hw_id[ETH_ALEN], rep_type;
e8d31c4d
MB
1264 int vport;
1265
2aca1787 1266 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1267 sizeof(struct mlx5_eswitch_rep),
1268 GFP_KERNEL);
1269 if (!esw->offloads.vport_reps)
1270 return -ENOMEM;
1271
e8d31c4d
MB
1272 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1273
879c8f84 1274 mlx5_esw_for_all_reps(esw, vport, rep) {
5ae51620 1275 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
e8d31c4d 1276 ether_addr_copy(rep->hw_id, hw_id);
f121e0ea
BW
1277
1278 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1279 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1280 REP_UNREGISTERED);
e8d31c4d
MB
1281 }
1282
e8d31c4d
MB
1283 return 0;
1284}
1285
c9b99abc
BW
1286static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1287 struct mlx5_eswitch_rep *rep, u8 rep_type)
1288{
8693115a 1289 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1290 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1291 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1292}
1293
29d9fd7d 1294static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1295{
1296 struct mlx5_eswitch_rep *rep;
c9b99abc 1297
81cd229c
BW
1298 if (mlx5_ecpf_vport_exists(esw->dev)) {
1299 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1300 __esw_offloads_unload_rep(esw, rep, rep_type);
1301 }
1302
1303 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1304 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1305 __esw_offloads_unload_rep(esw, rep, rep_type);
1306 }
1307
879c8f84 1308 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1309 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1310}
1311
29d9fd7d
BW
1312static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1313 u8 rep_type)
1314{
1315 struct mlx5_eswitch_rep *rep;
1316 int i;
1317
1318 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1319 __esw_offloads_unload_rep(esw, rep, rep_type);
1320}
1321
1322static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1323{
1324 u8 rep_type = NUM_REP_TYPES;
1325
1326 while (rep_type-- > 0)
1327 __unload_reps_vf_vport(esw, nvports, rep_type);
1328}
1329
1330static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1331 u8 rep_type)
1332{
1333 __unload_reps_vf_vport(esw, nvports, rep_type);
1334
1335 /* Special vports must be the last to unload. */
1336 __unload_reps_special_vport(esw, rep_type);
1337}
1338
1339static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1340{
1341 u8 rep_type = NUM_REP_TYPES;
1342
1343 while (rep_type-- > 0)
29d9fd7d 1344 __unload_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1345}
1346
c9b99abc
BW
1347static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1348 struct mlx5_eswitch_rep *rep, u8 rep_type)
1349{
f121e0ea
BW
1350 int err = 0;
1351
8693115a 1352 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1353 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
8693115a 1354 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
6f4e0219 1355 if (err)
8693115a 1356 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219
BW
1357 REP_REGISTERED);
1358 }
f121e0ea 1359
6f4e0219 1360 return err;
c9b99abc
BW
1361}
1362
29d9fd7d 1363static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1364{
cb67b832 1365 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1366 int err;
1367
879c8f84 1368 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1369 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1370 if (err)
1371 return err;
1372
1373 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1374 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1375 err = __esw_offloads_load_rep(esw, rep, rep_type);
1376 if (err)
1377 goto err_pf;
1378 }
1379
1380 if (mlx5_ecpf_vport_exists(esw->dev)) {
1381 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1382 err = __esw_offloads_load_rep(esw, rep, rep_type);
1383 if (err)
1384 goto err_ecpf;
1385 }
1386
1387 return 0;
1388
1389err_ecpf:
1390 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1391 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1392 __esw_offloads_unload_rep(esw, rep, rep_type);
1393 }
1394
1395err_pf:
1396 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1397 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1398 return err;
1399}
6ed1803a 1400
29d9fd7d
BW
1401static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1402 u8 rep_type)
1403{
1404 struct mlx5_eswitch_rep *rep;
1405 int err, i;
1406
1407 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1408 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1409 if (err)
29d9fd7d 1410 goto err_vf;
6ed1803a
MB
1411 }
1412
1413 return 0;
1414
29d9fd7d
BW
1415err_vf:
1416 __unload_reps_vf_vport(esw, --i, rep_type);
1417 return err;
1418}
1419
1420static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1421{
1422 u8 rep_type = 0;
1423 int err;
1424
1425 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1426 err = __load_reps_vf_vport(esw, nvports, rep_type);
1427 if (err)
1428 goto err_reps;
1429 }
1430
1431 return err;
1432
6ed1803a 1433err_reps:
29d9fd7d
BW
1434 while (rep_type-- > 0)
1435 __unload_reps_vf_vport(esw, nvports, rep_type);
1436 return err;
1437}
1438
1439static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1440 u8 rep_type)
1441{
1442 int err;
1443
1444 /* Special vports must be loaded first. */
1445 err = __load_reps_special_vport(esw, rep_type);
1446 if (err)
1447 return err;
1448
1449 err = __load_reps_vf_vport(esw, nvports, rep_type);
1450 if (err)
1451 goto err_vfs;
1452
1453 return 0;
1454
1455err_vfs:
1456 __unload_reps_special_vport(esw, rep_type);
a4b97ab4
MB
1457 return err;
1458}
1459
29d9fd7d 1460static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1461{
1462 u8 rep_type = 0;
1463 int err;
1464
1465 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
29d9fd7d 1466 err = __load_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1467 if (err)
1468 goto err_reps;
1469 }
1470
1471 return err;
1472
1473err_reps:
1474 while (rep_type-- > 0)
29d9fd7d 1475 __unload_reps_all_vport(esw, nvports, rep_type);
6ed1803a
MB
1476 return err;
1477}
1478
ac004b83
RD
1479#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1480#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1481
1482static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1483 struct mlx5_eswitch *peer_esw)
1484{
1485 int err;
1486
1487 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1488 if (err)
1489 return err;
1490
1491 return 0;
1492}
1493
1494static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1495{
04de7dda 1496 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1497 esw_del_fdb_peer_miss_rules(esw);
1498}
1499
1500static int mlx5_esw_offloads_devcom_event(int event,
1501 void *my_data,
1502 void *event_data)
1503{
1504 struct mlx5_eswitch *esw = my_data;
1505 struct mlx5_eswitch *peer_esw = event_data;
1506 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1507 int err;
1508
1509 switch (event) {
1510 case ESW_OFFLOADS_DEVCOM_PAIR:
1511 err = mlx5_esw_offloads_pair(esw, peer_esw);
1512 if (err)
1513 goto err_out;
1514
1515 err = mlx5_esw_offloads_pair(peer_esw, esw);
1516 if (err)
1517 goto err_pair;
1518
1519 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1520 break;
1521
1522 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1523 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1524 break;
1525
1526 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1527 mlx5_esw_offloads_unpair(peer_esw);
1528 mlx5_esw_offloads_unpair(esw);
1529 break;
1530 }
1531
1532 return 0;
1533
1534err_pair:
1535 mlx5_esw_offloads_unpair(esw);
1536
1537err_out:
1538 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1539 event, err);
1540 return err;
1541}
1542
1543static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1544{
1545 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1546
04de7dda
RD
1547 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1548 mutex_init(&esw->offloads.peer_mutex);
1549
ac004b83
RD
1550 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1551 return;
1552
1553 mlx5_devcom_register_component(devcom,
1554 MLX5_DEVCOM_ESW_OFFLOADS,
1555 mlx5_esw_offloads_devcom_event,
1556 esw);
1557
1558 mlx5_devcom_send_event(devcom,
1559 MLX5_DEVCOM_ESW_OFFLOADS,
1560 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1561}
1562
1563static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1564{
1565 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1566
1567 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1568 return;
1569
1570 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1571 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1572
1573 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1574}
1575
18486737
EB
1576static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1577 struct mlx5_vport *vport)
1578{
1579 struct mlx5_core_dev *dev = esw->dev;
1580 struct mlx5_flow_act flow_act = {0};
1581 struct mlx5_flow_spec *spec;
1582 int err = 0;
1583
1584 /* For prio tag mode, there is only 1 FTEs:
1585 * 1) Untagged packets - push prio tag VLAN, allow
1586 * Unmatched traffic is allowed by default
1587 */
1588
1589 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1590 return -EOPNOTSUPP;
1591
1592 esw_vport_cleanup_ingress_rules(esw, vport);
1593
1594 err = esw_vport_enable_ingress_acl(esw, vport);
1595 if (err) {
1596 mlx5_core_warn(esw->dev,
1597 "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
1598 err, vport->vport);
1599 return err;
1600 }
1601
1602 esw_debug(esw->dev,
1603 "vport[%d] configure ingress rules\n", vport->vport);
1604
1605 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1606 if (!spec) {
1607 err = -ENOMEM;
1608 goto out_no_mem;
1609 }
1610
1611 /* Untagged packets - push prio tag VLAN, allow */
1612 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1613 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1614 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1615 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1616 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1617 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1618 flow_act.vlan[0].vid = 0;
1619 flow_act.vlan[0].prio = 0;
1620 vport->ingress.allow_rule =
1621 mlx5_add_flow_rules(vport->ingress.acl, spec,
1622 &flow_act, NULL, 0);
1623 if (IS_ERR(vport->ingress.allow_rule)) {
1624 err = PTR_ERR(vport->ingress.allow_rule);
1625 esw_warn(esw->dev,
1626 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1627 vport->vport, err);
1628 vport->ingress.allow_rule = NULL;
1629 goto out;
1630 }
1631
1632out:
1633 kvfree(spec);
1634out_no_mem:
1635 if (err)
1636 esw_vport_cleanup_ingress_rules(esw, vport);
1637 return err;
1638}
1639
1640static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1641 struct mlx5_vport *vport)
1642{
1643 struct mlx5_flow_act flow_act = {0};
1644 struct mlx5_flow_spec *spec;
1645 int err = 0;
1646
1647 /* For prio tag mode, there is only 1 FTEs:
1648 * 1) prio tag packets - pop the prio tag VLAN, allow
1649 * Unmatched traffic is allowed by default
1650 */
1651
1652 esw_vport_cleanup_egress_rules(esw, vport);
1653
1654 err = esw_vport_enable_egress_acl(esw, vport);
1655 if (err) {
1656 mlx5_core_warn(esw->dev,
1657 "failed to enable egress acl (%d) on vport[%d]\n",
1658 err, vport->vport);
1659 return err;
1660 }
1661
1662 esw_debug(esw->dev,
1663 "vport[%d] configure prio tag egress rules\n", vport->vport);
1664
1665 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1666 if (!spec) {
1667 err = -ENOMEM;
1668 goto out_no_mem;
1669 }
1670
1671 /* prio tag vlan rule - pop it so VF receives untagged packets */
1672 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1673 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1674 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1675 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1676
1677 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1678 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1679 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1680 vport->egress.allowed_vlan =
1681 mlx5_add_flow_rules(vport->egress.acl, spec,
1682 &flow_act, NULL, 0);
1683 if (IS_ERR(vport->egress.allowed_vlan)) {
1684 err = PTR_ERR(vport->egress.allowed_vlan);
1685 esw_warn(esw->dev,
1686 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1687 vport->vport, err);
1688 vport->egress.allowed_vlan = NULL;
1689 goto out;
1690 }
1691
1692out:
1693 kvfree(spec);
1694out_no_mem:
1695 if (err)
1696 esw_vport_cleanup_egress_rules(esw, vport);
1697 return err;
1698}
1699
1700static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
1701{
786ef904 1702 struct mlx5_vport *vport = NULL;
18486737
EB
1703 int i, j;
1704 int err;
1705
786ef904
PP
1706 mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
1707 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737
EB
1708 if (err)
1709 goto err_ingress;
786ef904 1710 err = esw_vport_egress_prio_tag_config(esw, vport);
18486737
EB
1711 if (err)
1712 goto err_egress;
1713 }
1714
1715 return 0;
1716
1717err_egress:
786ef904 1718 esw_vport_disable_ingress_acl(esw, vport);
18486737 1719err_ingress:
786ef904
PP
1720 mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
1721 esw_vport_disable_egress_acl(esw, vport);
1722 esw_vport_disable_ingress_acl(esw, vport);
18486737
EB
1723 }
1724
1725 return err;
1726}
1727
1728static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
1729{
786ef904 1730 struct mlx5_vport *vport;
18486737
EB
1731 int i;
1732
786ef904
PP
1733 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) {
1734 esw_vport_disable_egress_acl(esw, vport);
1735 esw_vport_disable_ingress_acl(esw, vport);
18486737
EB
1736 }
1737}
1738
eca8cc38 1739static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
6ed1803a
MB
1740{
1741 int err;
1742
5c1d260e 1743 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
1744 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1745
18486737
EB
1746 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
1747 err = esw_prio_tag_acls_config(esw, nvports);
1748 if (err)
1749 return err;
1750 }
1751
1967ce6e 1752 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 1753 if (err)
c5447c70 1754 return err;
c930a3ad 1755
cd7e4186 1756 err = esw_create_offloads_table(esw, nvports);
c930a3ad
OG
1757 if (err)
1758 goto create_ft_err;
1759
cd7e4186 1760 err = esw_create_vport_rx_group(esw, nvports);
c930a3ad
OG
1761 if (err)
1762 goto create_fg_err;
1763
1764 return 0;
1765
1766create_fg_err:
1767 esw_destroy_offloads_table(esw);
1768
1769create_ft_err:
1967ce6e 1770 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 1771
c930a3ad
OG
1772 return err;
1773}
1774
eca8cc38
BW
1775static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1776{
1777 esw_destroy_vport_rx_group(esw);
1778 esw_destroy_offloads_table(esw);
1779 esw_destroy_offloads_fdb_tables(esw);
18486737
EB
1780 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
1781 esw_prio_tag_acls_cleanup(esw);
eca8cc38
BW
1782}
1783
cd56f929 1784static void esw_functions_changed_event_handler(struct work_struct *work)
a3888f33
BW
1785{
1786 struct mlx5_host_work *host_work;
1787 struct mlx5_eswitch *esw;
cd56f929
VP
1788 u16 num_vfs = 0;
1789 int err;
a3888f33
BW
1790
1791 host_work = container_of(work, struct mlx5_host_work, work);
1792 esw = host_work->esw;
1793
cd56f929
VP
1794 err = mlx5_esw_query_functions(esw->dev, &num_vfs);
1795 if (err || num_vfs == esw->esw_funcs.num_vfs)
a3888f33
BW
1796 goto out;
1797
1798 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929
VP
1799 if (esw->esw_funcs.num_vfs > 0) {
1800 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
a3888f33 1801 } else {
cd56f929 1802 err = esw_offloads_load_vf_reps(esw, num_vfs);
a3888f33
BW
1803
1804 if (err)
1805 goto out;
1806 }
1807
cd56f929 1808 esw->esw_funcs.num_vfs = num_vfs;
a3888f33
BW
1809
1810out:
1811 kfree(host_work);
1812}
1813
cd56f929
VP
1814static int esw_functions_changed_event(struct notifier_block *nb,
1815 unsigned long type, void *data)
a3888f33 1816{
cd56f929 1817 struct mlx5_esw_functions *esw_funcs;
a3888f33 1818 struct mlx5_host_work *host_work;
a3888f33
BW
1819 struct mlx5_eswitch *esw;
1820
1821 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
1822 if (!host_work)
1823 return NOTIFY_DONE;
1824
cd56f929
VP
1825 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
1826 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
1827
1828 host_work->esw = esw;
1829
cd56f929 1830 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
1831 queue_work(esw->work_queue, &host_work->work);
1832
1833 return NOTIFY_OK;
1834}
1835
cd56f929
VP
1836static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
1837 u16 vf_nvports)
1838{
6706a3b9 1839 if (!mlx5_eswitch_is_funcs_handler(esw->dev))
cd56f929
VP
1840 return;
1841
1842 MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
1843 ESW_FUNCTIONS_CHANGED);
1844 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1845 esw->esw_funcs.num_vfs = vf_nvports;
1846}
1847
1848static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
1849{
6706a3b9 1850 if (!mlx5_eswitch_is_funcs_handler(esw->dev))
cd56f929
VP
1851 return;
1852
1853 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1854 flush_workqueue(esw->work_queue);
1855}
1856
c9b99abc
BW
1857int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
1858 int total_nvports)
eca8cc38
BW
1859{
1860 int err;
1861
c9b99abc 1862 err = esw_offloads_steering_init(esw, total_nvports);
eca8cc38
BW
1863 if (err)
1864 return err;
1865
29d9fd7d 1866 err = esw_offloads_load_all_reps(esw, vf_nvports);
eca8cc38
BW
1867 if (err)
1868 goto err_reps;
1869
1870 esw_offloads_devcom_init(esw);
a3888f33 1871
cd56f929 1872 esw_functions_changed_event_init(esw, vf_nvports);
a3888f33 1873
80f09dfc
MG
1874 mlx5_rdma_enable_roce(esw->dev);
1875
eca8cc38
BW
1876 return 0;
1877
1878err_reps:
1879 esw_offloads_steering_cleanup(esw);
1880 return err;
1881}
1882
db7ff19e
EB
1883static int esw_offloads_stop(struct mlx5_eswitch *esw,
1884 struct netlink_ext_ack *extack)
c930a3ad 1885{
6c419ba8 1886 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
1887
1888 mlx5_eswitch_disable_sriov(esw);
1889 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 1890 if (err) {
8c98ee77 1891 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 1892 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
1893 if (err1) {
1894 NL_SET_ERR_MSG_MOD(extack,
1895 "Failed setting eswitch back to offloads");
1896 }
6c419ba8 1897 }
c930a3ad
OG
1898
1899 return err;
1900}
1901
c9b99abc 1902void esw_offloads_cleanup(struct mlx5_eswitch *esw)
c930a3ad 1903{
a3888f33
BW
1904 u16 num_vfs;
1905
cd56f929
VP
1906 esw_functions_changed_event_cleanup(esw);
1907
6706a3b9 1908 if (mlx5_eswitch_is_funcs_handler(esw->dev))
cd56f929
VP
1909 num_vfs = esw->esw_funcs.num_vfs;
1910 else
a3888f33 1911 num_vfs = esw->dev->priv.sriov.num_vfs;
c9b99abc 1912
80f09dfc 1913 mlx5_rdma_disable_roce(esw->dev);
ac004b83 1914 esw_offloads_devcom_cleanup(esw);
29d9fd7d 1915 esw_offloads_unload_all_reps(esw, num_vfs);
eca8cc38 1916 esw_offloads_steering_cleanup(esw);
c930a3ad
OG
1917}
1918
ef78618b 1919static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1920{
1921 switch (mode) {
1922 case DEVLINK_ESWITCH_MODE_LEGACY:
1923 *mlx5_mode = SRIOV_LEGACY;
1924 break;
1925 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1926 *mlx5_mode = SRIOV_OFFLOADS;
1927 break;
1928 default:
1929 return -EINVAL;
1930 }
1931
1932 return 0;
1933}
1934
ef78618b
OG
1935static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1936{
1937 switch (mlx5_mode) {
1938 case SRIOV_LEGACY:
1939 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1940 break;
1941 case SRIOV_OFFLOADS:
1942 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1943 break;
1944 default:
1945 return -EINVAL;
1946 }
1947
1948 return 0;
1949}
1950
bffaa916
RD
1951static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1952{
1953 switch (mode) {
1954 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1955 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1956 break;
1957 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1958 *mlx5_mode = MLX5_INLINE_MODE_L2;
1959 break;
1960 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1961 *mlx5_mode = MLX5_INLINE_MODE_IP;
1962 break;
1963 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1964 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1965 break;
1966 default:
1967 return -EINVAL;
1968 }
1969
1970 return 0;
1971}
1972
1973static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1974{
1975 switch (mlx5_mode) {
1976 case MLX5_INLINE_MODE_NONE:
1977 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1978 break;
1979 case MLX5_INLINE_MODE_L2:
1980 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1981 break;
1982 case MLX5_INLINE_MODE_IP:
1983 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1984 break;
1985 case MLX5_INLINE_MODE_TCP_UDP:
1986 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1987 break;
1988 default:
1989 return -EINVAL;
1990 }
1991
1992 return 0;
1993}
1994
9d1cef19 1995static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1996{
9d1cef19 1997 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1998
9d1cef19
OG
1999 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2000 return -EOPNOTSUPP;
c930a3ad 2001
733d3e54
OG
2002 if(!MLX5_ESWITCH_MANAGER(dev))
2003 return -EPERM;
c930a3ad 2004
c96692fb
BW
2005 if (dev->priv.eswitch->mode == SRIOV_NONE &&
2006 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2007 return -EOPNOTSUPP;
2008
9d1cef19
OG
2009 return 0;
2010}
2011
db7ff19e
EB
2012int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2013 struct netlink_ext_ack *extack)
9d1cef19
OG
2014{
2015 struct mlx5_core_dev *dev = devlink_priv(devlink);
2016 u16 cur_mlx5_mode, mlx5_mode = 0;
2017 int err;
2018
2019 err = mlx5_devlink_eswitch_check(devlink);
2020 if (err)
2021 return err;
2022
2023 cur_mlx5_mode = dev->priv.eswitch->mode;
2024
ef78618b 2025 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2026 return -EINVAL;
2027
2028 if (cur_mlx5_mode == mlx5_mode)
2029 return 0;
2030
2031 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2032 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2033 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2034 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2035 else
2036 return -EINVAL;
feae9087
OG
2037}
2038
2039int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2040{
9d1cef19
OG
2041 struct mlx5_core_dev *dev = devlink_priv(devlink);
2042 int err;
c930a3ad 2043
9d1cef19
OG
2044 err = mlx5_devlink_eswitch_check(devlink);
2045 if (err)
2046 return err;
c930a3ad 2047
ef78618b 2048 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2049}
127ea380 2050
db7ff19e
EB
2051int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2052 struct netlink_ext_ack *extack)
bffaa916
RD
2053{
2054 struct mlx5_core_dev *dev = devlink_priv(devlink);
2055 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 2056 int err, vport;
bffaa916
RD
2057 u8 mlx5_mode;
2058
9d1cef19
OG
2059 err = mlx5_devlink_eswitch_check(devlink);
2060 if (err)
2061 return err;
bffaa916 2062
c415f704
OG
2063 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2064 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2065 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2066 return 0;
2067 /* fall through */
2068 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2069 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2070 return -EOPNOTSUPP;
c415f704
OG
2071 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2072 break;
2073 }
bffaa916 2074
375f51e2 2075 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2076 NL_SET_ERR_MSG_MOD(extack,
2077 "Can't set inline mode when flows are configured");
375f51e2
RD
2078 return -EOPNOTSUPP;
2079 }
2080
bffaa916
RD
2081 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2082 if (err)
2083 goto out;
2084
9d1cef19 2085 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
2086 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2087 if (err) {
8c98ee77
EB
2088 NL_SET_ERR_MSG_MOD(extack,
2089 "Failed to set min inline on vport");
bffaa916
RD
2090 goto revert_inline_mode;
2091 }
2092 }
2093
2094 esw->offloads.inline_mode = mlx5_mode;
2095 return 0;
2096
2097revert_inline_mode:
2098 while (--vport > 0)
2099 mlx5_modify_nic_vport_min_inline(dev,
2100 vport,
2101 esw->offloads.inline_mode);
2102out:
2103 return err;
2104}
2105
2106int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2107{
2108 struct mlx5_core_dev *dev = devlink_priv(devlink);
2109 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2110 int err;
bffaa916 2111
9d1cef19
OG
2112 err = mlx5_devlink_eswitch_check(devlink);
2113 if (err)
2114 return err;
bffaa916 2115
bffaa916
RD
2116 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2117}
2118
2119int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
2120{
c415f704 2121 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2122 struct mlx5_core_dev *dev = esw->dev;
2123 int vport;
bffaa916
RD
2124
2125 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2126 return -EOPNOTSUPP;
2127
2128 if (esw->mode == SRIOV_NONE)
2129 return -EOPNOTSUPP;
2130
c415f704
OG
2131 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2132 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2133 mlx5_mode = MLX5_INLINE_MODE_NONE;
2134 goto out;
2135 case MLX5_CAP_INLINE_MODE_L2:
2136 mlx5_mode = MLX5_INLINE_MODE_L2;
2137 goto out;
2138 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2139 goto query_vports;
2140 }
bffaa916 2141
c415f704 2142query_vports:
bffaa916
RD
2143 for (vport = 1; vport <= nvfs; vport++) {
2144 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2145 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
2146 return -EINVAL;
2147 prev_mlx5_mode = mlx5_mode;
2148 }
2149
c415f704 2150out:
bffaa916
RD
2151 *mode = mlx5_mode;
2152 return 0;
2153}
2154
db7ff19e
EB
2155int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
2156 struct netlink_ext_ack *extack)
7768d197
RD
2157{
2158 struct mlx5_core_dev *dev = devlink_priv(devlink);
2159 struct mlx5_eswitch *esw = dev->priv.eswitch;
2160 int err;
2161
9d1cef19
OG
2162 err = mlx5_devlink_eswitch_check(devlink);
2163 if (err)
2164 return err;
7768d197
RD
2165
2166 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2167 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2168 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2169 return -EOPNOTSUPP;
2170
2171 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2172 return -EOPNOTSUPP;
2173
2174 if (esw->mode == SRIOV_LEGACY) {
2175 esw->offloads.encap = encap;
2176 return 0;
2177 }
2178
2179 if (esw->offloads.encap == encap)
2180 return 0;
2181
2182 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2183 NL_SET_ERR_MSG_MOD(extack,
2184 "Can't set encapsulation when flows are configured");
7768d197
RD
2185 return -EOPNOTSUPP;
2186 }
2187
e52c2802 2188 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2189
2190 esw->offloads.encap = encap;
e52c2802
PB
2191
2192 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2193
7768d197 2194 if (err) {
8c98ee77
EB
2195 NL_SET_ERR_MSG_MOD(extack,
2196 "Failed re-creating fast FDB table");
7768d197 2197 esw->offloads.encap = !encap;
e52c2802 2198 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2199 }
e52c2802 2200
7768d197
RD
2201 return err;
2202}
2203
2204int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
2205{
2206 struct mlx5_core_dev *dev = devlink_priv(devlink);
2207 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2208 int err;
7768d197 2209
9d1cef19
OG
2210 err = mlx5_devlink_eswitch_check(devlink);
2211 if (err)
2212 return err;
7768d197
RD
2213
2214 *encap = esw->offloads.encap;
2215 return 0;
2216}
2217
f8e8fa02 2218void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2219 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2220 u8 rep_type)
127ea380 2221{
8693115a 2222 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2223 struct mlx5_eswitch_rep *rep;
2224 int i;
9deb2241 2225
8693115a 2226 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2227 mlx5_esw_for_all_reps(esw, i, rep) {
8693115a
PP
2228 rep_data = &rep->rep_data[rep_type];
2229 atomic_set(&rep_data->state, REP_REGISTERED);
f8e8fa02 2230 }
127ea380 2231}
f8e8fa02 2232EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2233
f8e8fa02 2234void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2235{
f8e8fa02 2236 u16 max_vf = mlx5_core_max_vfs(esw->dev);
cb67b832 2237 struct mlx5_eswitch_rep *rep;
f8e8fa02 2238 int i;
cb67b832 2239
f8e8fa02
BW
2240 if (esw->mode == SRIOV_OFFLOADS)
2241 __unload_reps_all_vport(esw, max_vf, rep_type);
127ea380 2242
f8e8fa02 2243 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2244 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2245}
f8e8fa02 2246EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2247
a4b97ab4 2248void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2249{
726293f1
HHZ
2250 struct mlx5_eswitch_rep *rep;
2251
879c8f84 2252 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2253 return rep->rep_data[rep_type].priv;
726293f1 2254}
22215908
MB
2255
2256void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2257 int vport,
2258 u8 rep_type)
2259{
22215908
MB
2260 struct mlx5_eswitch_rep *rep;
2261
879c8f84 2262 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2263
8693115a
PP
2264 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2265 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2266 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2267 return NULL;
2268}
57cbd893 2269EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2270
2271void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2272{
879c8f84 2273 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2274}
57cbd893
MB
2275EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2276
2277struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2278 int vport)
2279{
879c8f84 2280 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2281}
2282EXPORT_SYMBOL(mlx5_eswitch_vport_rep);