]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Remove unused mlx5_query_nic_vport_vlans
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33
BW
44#include "ecpf.h"
45#include "lib/eq.h"
69697b6e 46
cd7e4186
BW
47/* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50#define MLX5_ESW_MISS_FLOWS (2)
51
e52c2802
PB
52#define fdb_prio_table(esw, chain, prio, level) \
53 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
54
c9b99abc
BW
55#define UPLINK_REP_INDEX 0
56
879c8f84
BW
57/* The rep getter/iterator are only valid after esw->total_vports
58 * and vport->vport are initialized in mlx5_eswitch_init.
59 */
60#define mlx5_esw_for_all_reps(esw, i, rep) \
61 for ((i) = MLX5_VPORT_PF; \
62 (rep) = &(esw)->offloads.vport_reps[i], \
63 (i) < (esw)->total_vports; (i)++)
64
65#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
66 for ((i) = MLX5_VPORT_FIRST_VF; \
67 (rep) = &(esw)->offloads.vport_reps[i], \
68 (i) <= (nvfs); (i)++)
69
70#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
71 for ((i) = (nvfs); \
72 (rep) = &(esw)->offloads.vport_reps[i], \
73 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
74
75#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
76 for ((vport) = MLX5_VPORT_FIRST_VF; \
77 (vport) <= (nvfs); (vport)++)
78
79#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
80 for ((vport) = (nvfs); \
81 (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
82
83static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
84 u16 vport_num)
85{
5ae51620 86 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
87
88 WARN_ON(idx > esw->total_vports - 1);
89 return &esw->offloads.vport_reps[idx];
90}
91
e52c2802
PB
92static struct mlx5_flow_table *
93esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
94static void
95esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
96
97bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
98{
99 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
100}
101
102u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
103{
104 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
105 return FDB_MAX_CHAIN;
106
107 return 0;
108}
109
110u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
111{
112 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
113 return FDB_MAX_PRIO;
114
bf07aa73 115 return 1;
e52c2802
PB
116}
117
74491de9 118struct mlx5_flow_handle *
3d80d1a2
OG
119mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
120 struct mlx5_flow_spec *spec,
776b12b6 121 struct mlx5_esw_flow_attr *attr)
3d80d1a2 122{
592d3651 123 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 124 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 125 bool split = !!(attr->split_count);
74491de9 126 struct mlx5_flow_handle *rule;
e52c2802 127 struct mlx5_flow_table *fdb;
592d3651 128 int j, i = 0;
3d80d1a2
OG
129 void *misc;
130
131 if (esw->mode != SRIOV_OFFLOADS)
132 return ERR_PTR(-EOPNOTSUPP);
133
6acfbf38
OG
134 flow_act.action = attr->action;
135 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 136 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
137 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
138 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
139 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
140 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
141 flow_act.vlan[0].vid = attr->vlan_vid[0];
142 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
143 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
144 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
145 flow_act.vlan[1].vid = attr->vlan_vid[1];
146 flow_act.vlan[1].prio = attr->vlan_prio[1];
147 }
6acfbf38 148 }
776b12b6 149
66958ed9 150 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
151 if (attr->dest_chain) {
152 struct mlx5_flow_table *ft;
153
154 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
155 if (IS_ERR(ft)) {
156 rule = ERR_CAST(ft);
157 goto err_create_goto_table;
158 }
159
160 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
161 dest[i].ft = ft;
592d3651 162 i++;
e52c2802 163 } else {
e85e02ba 164 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 165 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 166 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 167 dest[i].vport.vhca_id =
df65a573 168 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
169 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
170 dest[i].vport.flags |=
171 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
172 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
173 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
8c4dc42b 174 flow_act.reformat_id = attr->dests[j].encap_id;
a18e879d 175 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b
EB
176 dest[i].vport.reformat_id =
177 attr->dests[j].encap_id;
f493f155 178 }
e52c2802
PB
179 i++;
180 }
56e858df 181 }
e37a79e5 182 }
66958ed9 183 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 185 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 186 i++;
3d80d1a2
OG
187 }
188
189 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 190 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 191
10ff5359
SK
192 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
193 MLX5_SET(fte_match_set_misc, misc,
194 source_eswitch_owner_vhca_id,
195 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
196
3d80d1a2
OG
197 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
198 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
199 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
200 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
201 source_eswitch_owner_vhca_id);
3d80d1a2 202
6363651d
OG
203 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
204 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
205 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
206 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
207 if (attr->match_level != MLX5_MATCH_NONE)
208 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
209 } else if (attr->match_level != MLX5_MATCH_NONE) {
210 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
211 }
3d80d1a2 212
aa24670e 213 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
214 flow_act.modify_id = attr->mod_hdr_id;
215
e85e02ba 216 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
217 if (IS_ERR(fdb)) {
218 rule = ERR_CAST(fdb);
219 goto err_esw_get;
220 }
221
222 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 223 if (IS_ERR(rule))
e52c2802 224 goto err_add_rule;
375f51e2
RD
225 else
226 esw->offloads.num_flows++;
3d80d1a2 227
e52c2802
PB
228 return rule;
229
230err_add_rule:
e85e02ba 231 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
232err_esw_get:
233 if (attr->dest_chain)
234 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
235err_create_goto_table:
aa0cbbae 236 return rule;
3d80d1a2
OG
237}
238
e4ad91f2
CM
239struct mlx5_flow_handle *
240mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
241 struct mlx5_flow_spec *spec,
242 struct mlx5_esw_flow_attr *attr)
243{
244 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 245 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
246 struct mlx5_flow_table *fast_fdb;
247 struct mlx5_flow_table *fwd_fdb;
e4ad91f2
CM
248 struct mlx5_flow_handle *rule;
249 void *misc;
250 int i;
251
e52c2802
PB
252 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
253 if (IS_ERR(fast_fdb)) {
254 rule = ERR_CAST(fast_fdb);
255 goto err_get_fast;
256 }
257
258 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
259 if (IS_ERR(fwd_fdb)) {
260 rule = ERR_CAST(fwd_fdb);
261 goto err_get_fwd;
262 }
263
e4ad91f2 264 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 265 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 266 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 267 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 268 dest[i].vport.vhca_id =
df65a573 269 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
270 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
271 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
272 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
273 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b 274 dest[i].vport.reformat_id = attr->dests[i].encap_id;
1cc26d74 275 }
e4ad91f2
CM
276 }
277 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 278 dest[i].ft = fwd_fdb,
e4ad91f2
CM
279 i++;
280
281 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
282 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
283
284 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
285 MLX5_SET(fte_match_set_misc, misc,
286 source_eswitch_owner_vhca_id,
287 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
288
289 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
290 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
291 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
292 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
293 source_eswitch_owner_vhca_id);
294
295 if (attr->match_level == MLX5_MATCH_NONE)
296 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
297 else
298 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
299 MLX5_MATCH_MISC_PARAMETERS;
300
e52c2802 301 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 302
e52c2802
PB
303 if (IS_ERR(rule))
304 goto add_err;
e4ad91f2 305
e52c2802
PB
306 esw->offloads.num_flows++;
307
308 return rule;
309add_err:
310 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
311err_get_fwd:
312 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
313err_get_fast:
e4ad91f2
CM
314 return rule;
315}
316
e52c2802
PB
317static void
318__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
319 struct mlx5_flow_handle *rule,
320 struct mlx5_esw_flow_attr *attr,
321 bool fwd_rule)
322{
e85e02ba 323 bool split = (attr->split_count > 0);
e52c2802
PB
324
325 mlx5_del_flow_rules(rule);
326 esw->offloads.num_flows--;
327
328 if (fwd_rule) {
329 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
330 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
331 } else {
e85e02ba 332 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
333 if (attr->dest_chain)
334 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
335 }
336}
337
d85cdccb
OG
338void
339mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
340 struct mlx5_flow_handle *rule,
341 struct mlx5_esw_flow_attr *attr)
342{
e52c2802 343 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
344}
345
48265006
OG
346void
347mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
348 struct mlx5_flow_handle *rule,
349 struct mlx5_esw_flow_attr *attr)
350{
e52c2802 351 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
352}
353
f5f82476
OG
354static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
355{
356 struct mlx5_eswitch_rep *rep;
357 int vf_vport, err = 0;
358
359 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
360 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
361 rep = &esw->offloads.vport_reps[vf_vport];
f121e0ea 362 if (rep->rep_if[REP_ETH].state != REP_LOADED)
f5f82476
OG
363 continue;
364
365 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
366 if (err)
367 goto out;
368 }
369
370out:
371 return err;
372}
373
374static struct mlx5_eswitch_rep *
375esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
376{
377 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
378
379 in_rep = attr->in_rep;
df65a573 380 out_rep = attr->dests[0].rep;
f5f82476
OG
381
382 if (push)
383 vport = in_rep;
384 else if (pop)
385 vport = out_rep;
386 else
387 vport = in_rep;
388
389 return vport;
390}
391
392static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
393 bool push, bool pop, bool fwd)
394{
395 struct mlx5_eswitch_rep *in_rep, *out_rep;
396
397 if ((push || pop) && !fwd)
398 goto out_notsupp;
399
400 in_rep = attr->in_rep;
df65a573 401 out_rep = attr->dests[0].rep;
f5f82476 402
b05af6aa 403 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
404 goto out_notsupp;
405
b05af6aa 406 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
407 goto out_notsupp;
408
409 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
410 if (!push && !pop && fwd)
b05af6aa 411 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
412 goto out_notsupp;
413
414 /* protects against (1) setting rules with different vlans to push and
415 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
416 */
1482bd3d 417 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
418 goto out_notsupp;
419
420 return 0;
421
422out_notsupp:
9eb78923 423 return -EOPNOTSUPP;
f5f82476
OG
424}
425
426int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
427 struct mlx5_esw_flow_attr *attr)
428{
429 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
430 struct mlx5_eswitch_rep *vport = NULL;
431 bool push, pop, fwd;
432 int err = 0;
433
6acfbf38 434 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 435 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
436 return 0;
437
f5f82476
OG
438 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
439 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
440 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
441 !attr->dest_chain);
f5f82476
OG
442
443 err = esw_add_vlan_action_check(attr, push, pop, fwd);
444 if (err)
445 return err;
446
447 attr->vlan_handled = false;
448
449 vport = esw_vlan_action_get_vport(attr, push, pop);
450
451 if (!push && !pop && fwd) {
452 /* tracks VF --> wire rules without vlan push action */
b05af6aa 453 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
454 vport->vlan_refcount++;
455 attr->vlan_handled = true;
456 }
457
458 return 0;
459 }
460
461 if (!push && !pop)
462 return 0;
463
464 if (!(offloads->vlan_push_pop_refcount)) {
465 /* it's the 1st vlan rule, apply global vlan pop policy */
466 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
467 if (err)
468 goto out;
469 }
470 offloads->vlan_push_pop_refcount++;
471
472 if (push) {
473 if (vport->vlan_refcount)
474 goto skip_set_push;
475
1482bd3d 476 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
477 SET_VLAN_INSERT | SET_VLAN_STRIP);
478 if (err)
479 goto out;
1482bd3d 480 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
481skip_set_push:
482 vport->vlan_refcount++;
483 }
484out:
485 if (!err)
486 attr->vlan_handled = true;
487 return err;
488}
489
490int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
491 struct mlx5_esw_flow_attr *attr)
492{
493 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
494 struct mlx5_eswitch_rep *vport = NULL;
495 bool push, pop, fwd;
496 int err = 0;
497
6acfbf38 498 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 499 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
500 return 0;
501
f5f82476
OG
502 if (!attr->vlan_handled)
503 return 0;
504
505 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
506 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
507 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
508
509 vport = esw_vlan_action_get_vport(attr, push, pop);
510
511 if (!push && !pop && fwd) {
512 /* tracks VF --> wire rules without vlan push action */
b05af6aa 513 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
514 vport->vlan_refcount--;
515
516 return 0;
517 }
518
519 if (push) {
520 vport->vlan_refcount--;
521 if (vport->vlan_refcount)
522 goto skip_unset_push;
523
524 vport->vlan = 0;
525 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
526 0, 0, SET_VLAN_STRIP);
527 if (err)
528 goto out;
529 }
530
531skip_unset_push:
532 offloads->vlan_push_pop_refcount--;
533 if (offloads->vlan_push_pop_refcount)
534 return 0;
535
536 /* no more vlan rules, stop global vlan pop policy */
537 err = esw_set_global_vlan_pop(esw, 0);
538
539out:
540 return err;
541}
542
f7a68945 543struct mlx5_flow_handle *
ab22be9b
OG
544mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
545{
66958ed9 546 struct mlx5_flow_act flow_act = {0};
4c5009c5 547 struct mlx5_flow_destination dest = {};
74491de9 548 struct mlx5_flow_handle *flow_rule;
c5bb1730 549 struct mlx5_flow_spec *spec;
ab22be9b
OG
550 void *misc;
551
1b9a07ee 552 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 553 if (!spec) {
ab22be9b
OG
554 flow_rule = ERR_PTR(-ENOMEM);
555 goto out;
556 }
557
c5bb1730 558 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 559 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
560 /* source vport is the esw manager */
561 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 562
c5bb1730 563 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
564 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
565 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
566
c5bb1730 567 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 568 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 569 dest.vport.num = vport;
66958ed9 570 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 571
52fff327 572 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 573 &flow_act, &dest, 1);
ab22be9b
OG
574 if (IS_ERR(flow_rule))
575 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
576out:
c5bb1730 577 kvfree(spec);
ab22be9b
OG
578 return flow_rule;
579}
57cbd893 580EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 581
159fe639
MB
582void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
583{
584 mlx5_del_flow_rules(rule);
585}
586
ac004b83
RD
587static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
588 struct mlx5_flow_spec *spec,
589 struct mlx5_flow_destination *dest)
590{
591 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
592 misc_parameters);
593
594 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
595 MLX5_CAP_GEN(peer_dev, vhca_id));
596
597 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
598
599 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
600 misc_parameters);
601 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
602 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
603 source_eswitch_owner_vhca_id);
604
605 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 606 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 607 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 608 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
609}
610
611static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
612 struct mlx5_core_dev *peer_dev)
613{
614 struct mlx5_flow_destination dest = {};
615 struct mlx5_flow_act flow_act = {0};
616 struct mlx5_flow_handle **flows;
617 struct mlx5_flow_handle *flow;
618 struct mlx5_flow_spec *spec;
619 /* total vports is the same for both e-switches */
620 int nvports = esw->total_vports;
621 void *misc;
622 int err, i;
623
624 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
625 if (!spec)
626 return -ENOMEM;
627
628 peer_miss_rules_setup(peer_dev, spec, &dest);
629
630 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
631 if (!flows) {
632 err = -ENOMEM;
633 goto alloc_flows_err;
634 }
635
636 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
637 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
638 misc_parameters);
639
81cd229c
BW
640 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
641 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
642 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
643 spec, &flow_act, &dest, 1);
644 if (IS_ERR(flow)) {
645 err = PTR_ERR(flow);
646 goto add_pf_flow_err;
647 }
648 flows[MLX5_VPORT_PF] = flow;
649 }
650
651 if (mlx5_ecpf_vport_exists(esw->dev)) {
652 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
653 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
654 spec, &flow_act, &dest, 1);
655 if (IS_ERR(flow)) {
656 err = PTR_ERR(flow);
657 goto add_ecpf_flow_err;
658 }
659 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
660 }
661
879c8f84 662 mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
ac004b83
RD
663 MLX5_SET(fte_match_set_misc, misc, source_port, i);
664 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
665 spec, &flow_act, &dest, 1);
666 if (IS_ERR(flow)) {
667 err = PTR_ERR(flow);
81cd229c 668 goto add_vf_flow_err;
ac004b83
RD
669 }
670 flows[i] = flow;
671 }
672
673 esw->fdb_table.offloads.peer_miss_rules = flows;
674
675 kvfree(spec);
676 return 0;
677
81cd229c 678add_vf_flow_err:
879c8f84
BW
679 nvports = --i;
680 mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
ac004b83 681 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
682
683 if (mlx5_ecpf_vport_exists(esw->dev))
684 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
685add_ecpf_flow_err:
686 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
687 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
688add_pf_flow_err:
689 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
690 kvfree(flows);
691alloc_flows_err:
692 kvfree(spec);
693 return err;
694}
695
696static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
697{
698 struct mlx5_flow_handle **flows;
699 int i;
700
701 flows = esw->fdb_table.offloads.peer_miss_rules;
702
879c8f84 703 mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
ac004b83
RD
704 mlx5_del_flow_rules(flows[i]);
705
81cd229c
BW
706 if (mlx5_ecpf_vport_exists(esw->dev))
707 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
708
709 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
710 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
711
ac004b83
RD
712 kvfree(flows);
713}
714
3aa33572
OG
715static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
716{
66958ed9 717 struct mlx5_flow_act flow_act = {0};
4c5009c5 718 struct mlx5_flow_destination dest = {};
74491de9 719 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 720 struct mlx5_flow_spec *spec;
f80be543
MB
721 void *headers_c;
722 void *headers_v;
3aa33572 723 int err = 0;
f80be543
MB
724 u8 *dmac_c;
725 u8 *dmac_v;
3aa33572 726
1b9a07ee 727 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 728 if (!spec) {
3aa33572
OG
729 err = -ENOMEM;
730 goto out;
731 }
732
f80be543
MB
733 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
734 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
735 outer_headers);
736 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
737 outer_headers.dmac_47_16);
738 dmac_c[0] = 0x01;
739
3aa33572 740 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 741 dest.vport.num = esw->manager_vport;
66958ed9 742 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 743
52fff327 744 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 745 &flow_act, &dest, 1);
3aa33572
OG
746 if (IS_ERR(flow_rule)) {
747 err = PTR_ERR(flow_rule);
f80be543 748 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
749 goto out;
750 }
751
f80be543
MB
752 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
753
754 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
755 outer_headers);
756 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
757 outer_headers.dmac_47_16);
758 dmac_v[0] = 0x01;
52fff327 759 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
760 &flow_act, &dest, 1);
761 if (IS_ERR(flow_rule)) {
762 err = PTR_ERR(flow_rule);
763 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
764 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
765 goto out;
766 }
767
768 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
769
3aa33572 770out:
c5bb1730 771 kvfree(spec);
3aa33572
OG
772 return err;
773}
774
1033665e 775#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 776
e52c2802
PB
777/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
778 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
779 * for each flow table pool. We can allocate up to 16M of each pool,
780 * and we keep track of how much we used via put/get_sz_to_pool.
781 * Firmware doesn't report any of this for now.
782 * ESW_POOL is expected to be sorted from large to small
783 */
784#define ESW_SIZE (16 * 1024 * 1024)
785const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
786 64 * 1024, 4 * 1024 };
787
788static int
789get_sz_from_pool(struct mlx5_eswitch *esw)
790{
791 int sz = 0, i;
792
793 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
794 if (esw->fdb_table.offloads.fdb_left[i]) {
795 --esw->fdb_table.offloads.fdb_left[i];
796 sz = ESW_POOLS[i];
797 break;
798 }
799 }
800
801 return sz;
802}
803
804static void
805put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
806{
807 int i;
808
809 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
810 if (sz >= ESW_POOLS[i]) {
811 ++esw->fdb_table.offloads.fdb_left[i];
812 break;
813 }
814 }
815}
816
817static struct mlx5_flow_table *
818create_next_size_table(struct mlx5_eswitch *esw,
819 struct mlx5_flow_namespace *ns,
820 u16 table_prio,
821 int level,
822 u32 flags)
823{
824 struct mlx5_flow_table *fdb;
825 int sz;
826
827 sz = get_sz_from_pool(esw);
828 if (!sz)
829 return ERR_PTR(-ENOSPC);
830
831 fdb = mlx5_create_auto_grouped_flow_table(ns,
832 table_prio,
833 sz,
834 ESW_OFFLOADS_NUM_GROUPS,
835 level,
836 flags);
837 if (IS_ERR(fdb)) {
838 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
839 (int)PTR_ERR(fdb), table_prio, level, sz);
840 put_sz_to_pool(esw, sz);
841 }
842
843 return fdb;
844}
845
846static struct mlx5_flow_table *
847esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 848{
69697b6e 849 struct mlx5_core_dev *dev = esw->dev;
69697b6e 850 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
851 struct mlx5_flow_namespace *ns;
852 int table_prio, l = 0;
bbd00f7e 853 u32 flags = 0;
69697b6e 854
c92a0b94
PB
855 if (chain == FDB_SLOW_PATH_CHAIN)
856 return esw->fdb_table.offloads.slow_fdb;
857
e52c2802 858 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 859
e52c2802
PB
860 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
861 if (fdb) {
862 /* take ref on earlier levels as well */
863 while (level >= 0)
864 fdb_prio_table(esw, chain, prio, level--).num_rules++;
865 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
866 return fdb;
867 }
69697b6e 868
e52c2802
PB
869 ns = mlx5_get_fdb_sub_ns(dev, chain);
870 if (!ns) {
871 esw_warn(dev, "Failed to get FDB sub namespace\n");
872 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
873 return ERR_PTR(-EOPNOTSUPP);
874 }
a842dd04 875
7768d197 876 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 877 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 878 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 879
e52c2802 880 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 881
e52c2802
PB
882 /* create earlier levels for correct fs_core lookup when
883 * connecting tables
884 */
885 for (l = 0; l <= level; l++) {
886 if (fdb_prio_table(esw, chain, prio, l).fdb) {
887 fdb_prio_table(esw, chain, prio, l).num_rules++;
888 continue;
889 }
a842dd04 890
e52c2802
PB
891 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
892 if (IS_ERR(fdb)) {
893 l--;
894 goto err_create_fdb;
895 }
896
897 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
898 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 899 }
a842dd04 900
e52c2802
PB
901 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
902 return fdb;
a842dd04 903
e52c2802
PB
904err_create_fdb:
905 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
906 if (l >= 0)
907 esw_put_prio_table(esw, chain, prio, l);
908
909 return fdb;
1967ce6e
OG
910}
911
e52c2802
PB
912static void
913esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 914{
e52c2802
PB
915 int l;
916
c92a0b94
PB
917 if (chain == FDB_SLOW_PATH_CHAIN)
918 return;
919
e52c2802
PB
920 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
921
922 for (l = level; l >= 0; l--) {
923 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
924 continue;
925
926 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
927 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
928 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
929 }
930
931 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
932}
933
934static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
935{
936 /* If lazy creation isn't supported, deref the fast path tables */
937 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
938 esw_put_prio_table(esw, 0, 1, 1);
939 esw_put_prio_table(esw, 0, 1, 0);
940 }
1967ce6e
OG
941}
942
943#define MAX_PF_SQ 256
cd3d07e7 944#define MAX_SQ_NVPORTS 32
1967ce6e
OG
945
946static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
947{
948 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
949 struct mlx5_flow_table_attr ft_attr = {};
950 struct mlx5_core_dev *dev = esw->dev;
e52c2802 951 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
952 struct mlx5_flow_namespace *root_ns;
953 struct mlx5_flow_table *fdb = NULL;
e52c2802 954 int table_size, ix, err = 0, i;
1967ce6e 955 struct mlx5_flow_group *g;
e52c2802 956 u32 flags = 0, fdb_max;
1967ce6e 957 void *match_criteria;
f80be543 958 u8 *dmac;
1967ce6e
OG
959
960 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 961 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
962 if (!flow_group_in)
963 return -ENOMEM;
964
965 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
966 if (!root_ns) {
967 esw_warn(dev, "Failed to get FDB flow namespace\n");
968 err = -EOPNOTSUPP;
969 goto ns_err;
970 }
971
e52c2802
PB
972 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
973 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
974 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
975
976 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
977 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
978 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
979 fdb_max);
980
981 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
982 esw->fdb_table.offloads.fdb_left[i] =
983 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 984
cd7e4186
BW
985 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
986 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 987
e52c2802
PB
988 /* create the slow path fdb with encap set, so further table instances
989 * can be created at run time while VFs are probed if the FW allows that.
990 */
991 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
992 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
993 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
994
995 ft_attr.flags = flags;
b3ba5149
ES
996 ft_attr.max_fte = table_size;
997 ft_attr.prio = FDB_SLOW_PATH;
998
999 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1000 if (IS_ERR(fdb)) {
1001 err = PTR_ERR(fdb);
1002 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1003 goto slow_fdb_err;
1004 }
52fff327 1005 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1006
e52c2802
PB
1007 /* If lazy creation isn't supported, open the fast path tables now */
1008 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1009 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1010 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1011 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1012 esw_get_prio_table(esw, 0, 1, 0);
1013 esw_get_prio_table(esw, 0, 1, 1);
1014 } else {
1015 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1016 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1017 }
1018
69697b6e
OG
1019 /* create send-to-vport group */
1020 memset(flow_group_in, 0, inlen);
1021 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1022 MLX5_MATCH_MISC_PARAMETERS);
1023
1024 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1025
1026 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1027 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1028
cd3d07e7 1029 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1030 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1031 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1032
1033 g = mlx5_create_flow_group(fdb, flow_group_in);
1034 if (IS_ERR(g)) {
1035 err = PTR_ERR(g);
1036 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1037 goto send_vport_err;
1038 }
1039 esw->fdb_table.offloads.send_to_vport_grp = g;
1040
ac004b83
RD
1041 /* create peer esw miss group */
1042 memset(flow_group_in, 0, inlen);
1043 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1044 MLX5_MATCH_MISC_PARAMETERS);
1045
1046 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1047 match_criteria);
1048
1049 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1050 misc_parameters.source_port);
1051 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1052 misc_parameters.source_eswitch_owner_vhca_id);
1053
1054 MLX5_SET(create_flow_group_in, flow_group_in,
1055 source_eswitch_owner_vhca_id_valid, 1);
1056 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1057 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1058 ix + esw->total_vports - 1);
1059 ix += esw->total_vports;
1060
1061 g = mlx5_create_flow_group(fdb, flow_group_in);
1062 if (IS_ERR(g)) {
1063 err = PTR_ERR(g);
1064 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1065 goto peer_miss_err;
1066 }
1067 esw->fdb_table.offloads.peer_miss_grp = g;
1068
69697b6e
OG
1069 /* create miss group */
1070 memset(flow_group_in, 0, inlen);
f80be543
MB
1071 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1072 MLX5_MATCH_OUTER_HEADERS);
1073 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1074 match_criteria);
1075 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1076 outer_headers.dmac_47_16);
1077 dmac[0] = 0x01;
69697b6e
OG
1078
1079 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1080 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1081 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1082
1083 g = mlx5_create_flow_group(fdb, flow_group_in);
1084 if (IS_ERR(g)) {
1085 err = PTR_ERR(g);
1086 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1087 goto miss_err;
1088 }
1089 esw->fdb_table.offloads.miss_grp = g;
1090
3aa33572
OG
1091 err = esw_add_fdb_miss_rule(esw);
1092 if (err)
1093 goto miss_rule_err;
1094
e52c2802 1095 esw->nvports = nvports;
c88a026e 1096 kvfree(flow_group_in);
69697b6e
OG
1097 return 0;
1098
3aa33572
OG
1099miss_rule_err:
1100 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1101miss_err:
ac004b83
RD
1102 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1103peer_miss_err:
69697b6e
OG
1104 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1105send_vport_err:
e52c2802 1106 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1107 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1108slow_fdb_err:
69697b6e
OG
1109ns_err:
1110 kvfree(flow_group_in);
1111 return err;
1112}
1113
1967ce6e 1114static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1115{
e52c2802 1116 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1117 return;
1118
1967ce6e 1119 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1120 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1121 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1122 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1123 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1124 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1125
52fff327 1126 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1127 esw_destroy_offloads_fast_fdb_tables(esw);
69697b6e 1128}
c116c6ee 1129
cd7e4186 1130static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1131{
b3ba5149 1132 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1133 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1134 struct mlx5_flow_table *ft_offloads;
1135 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1136 int err = 0;
1137
1138 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1139 if (!ns) {
1140 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1141 return -EOPNOTSUPP;
c116c6ee
OG
1142 }
1143
cd7e4186 1144 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1145
1146 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1147 if (IS_ERR(ft_offloads)) {
1148 err = PTR_ERR(ft_offloads);
1149 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1150 return err;
1151 }
1152
1153 esw->offloads.ft_offloads = ft_offloads;
1154 return 0;
1155}
1156
1157static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1158{
1159 struct mlx5_esw_offload *offloads = &esw->offloads;
1160
1161 mlx5_destroy_flow_table(offloads->ft_offloads);
1162}
fed9ce22 1163
cd7e4186 1164static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1165{
1166 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1167 struct mlx5_flow_group *g;
fed9ce22
OG
1168 u32 *flow_group_in;
1169 void *match_criteria, *misc;
1170 int err = 0;
fed9ce22 1171
cd7e4186 1172 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1173 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1174 if (!flow_group_in)
1175 return -ENOMEM;
1176
1177 /* create vport rx group */
1178 memset(flow_group_in, 0, inlen);
1179 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1180 MLX5_MATCH_MISC_PARAMETERS);
1181
1182 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1183 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1184 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1185
1186 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1187 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1188
1189 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1190
1191 if (IS_ERR(g)) {
1192 err = PTR_ERR(g);
1193 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1194 goto out;
1195 }
1196
1197 esw->offloads.vport_rx_group = g;
1198out:
e574978a 1199 kvfree(flow_group_in);
fed9ce22
OG
1200 return err;
1201}
1202
1203static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1204{
1205 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1206}
1207
74491de9 1208struct mlx5_flow_handle *
c966f7d5
GT
1209mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1210 struct mlx5_flow_destination *dest)
fed9ce22 1211{
66958ed9 1212 struct mlx5_flow_act flow_act = {0};
74491de9 1213 struct mlx5_flow_handle *flow_rule;
c5bb1730 1214 struct mlx5_flow_spec *spec;
fed9ce22
OG
1215 void *misc;
1216
1b9a07ee 1217 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1218 if (!spec) {
fed9ce22
OG
1219 flow_rule = ERR_PTR(-ENOMEM);
1220 goto out;
1221 }
1222
c5bb1730 1223 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
1224 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1225
c5bb1730 1226 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
1227 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1228
c5bb1730 1229 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 1230
66958ed9 1231 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1232 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1233 &flow_act, dest, 1);
fed9ce22
OG
1234 if (IS_ERR(flow_rule)) {
1235 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1236 goto out;
1237 }
1238
1239out:
c5bb1730 1240 kvfree(spec);
fed9ce22
OG
1241 return flow_rule;
1242}
feae9087 1243
db7ff19e
EB
1244static int esw_offloads_start(struct mlx5_eswitch *esw,
1245 struct netlink_ext_ack *extack)
c930a3ad 1246{
6c419ba8 1247 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad 1248
c96692fb
BW
1249 if (esw->mode != SRIOV_LEGACY &&
1250 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1251 NL_SET_ERR_MSG_MOD(extack,
1252 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1253 return -EINVAL;
1254 }
1255
1256 mlx5_eswitch_disable_sriov(esw);
1257 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 1258 if (err) {
8c98ee77
EB
1259 NL_SET_ERR_MSG_MOD(extack,
1260 "Failed setting eswitch to offloads");
6c419ba8 1261 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
1262 if (err1) {
1263 NL_SET_ERR_MSG_MOD(extack,
1264 "Failed setting eswitch back to legacy");
1265 }
6c419ba8 1266 }
bffaa916
RD
1267 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1268 if (mlx5_eswitch_inline_mode_get(esw,
1269 num_vfs,
1270 &esw->offloads.inline_mode)) {
1271 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1272 NL_SET_ERR_MSG_MOD(extack,
1273 "Inline mode is different between vports");
bffaa916
RD
1274 }
1275 }
c930a3ad
OG
1276 return err;
1277}
1278
e8d31c4d
MB
1279void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1280{
1281 kfree(esw->offloads.vport_reps);
1282}
1283
1284int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1285{
2aca1787 1286 int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
e8d31c4d 1287 struct mlx5_core_dev *dev = esw->dev;
e8d31c4d 1288 struct mlx5_eswitch_rep *rep;
f121e0ea 1289 u8 hw_id[ETH_ALEN], rep_type;
e8d31c4d
MB
1290 int vport;
1291
2aca1787 1292 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1293 sizeof(struct mlx5_eswitch_rep),
1294 GFP_KERNEL);
1295 if (!esw->offloads.vport_reps)
1296 return -ENOMEM;
1297
e8d31c4d
MB
1298 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1299
879c8f84 1300 mlx5_esw_for_all_reps(esw, vport, rep) {
5ae51620 1301 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
e8d31c4d 1302 ether_addr_copy(rep->hw_id, hw_id);
f121e0ea
BW
1303
1304 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1305 rep->rep_if[rep_type].state = REP_UNREGISTERED;
e8d31c4d
MB
1306 }
1307
e8d31c4d
MB
1308 return 0;
1309}
1310
c9b99abc
BW
1311static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1312 struct mlx5_eswitch_rep *rep, u8 rep_type)
1313{
f121e0ea 1314 if (rep->rep_if[rep_type].state != REP_LOADED)
c9b99abc
BW
1315 return;
1316
1317 rep->rep_if[rep_type].unload(rep);
f121e0ea 1318 rep->rep_if[rep_type].state = REP_REGISTERED;
c9b99abc
BW
1319}
1320
29d9fd7d 1321static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1322{
1323 struct mlx5_eswitch_rep *rep;
c9b99abc 1324
81cd229c
BW
1325 if (mlx5_ecpf_vport_exists(esw->dev)) {
1326 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1327 __esw_offloads_unload_rep(esw, rep, rep_type);
1328 }
1329
1330 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1331 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1332 __esw_offloads_unload_rep(esw, rep, rep_type);
1333 }
1334
879c8f84 1335 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1336 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1337}
1338
29d9fd7d
BW
1339static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1340 u8 rep_type)
1341{
1342 struct mlx5_eswitch_rep *rep;
1343 int i;
1344
1345 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1346 __esw_offloads_unload_rep(esw, rep, rep_type);
1347}
1348
1349static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1350{
1351 u8 rep_type = NUM_REP_TYPES;
1352
1353 while (rep_type-- > 0)
1354 __unload_reps_vf_vport(esw, nvports, rep_type);
1355}
1356
1357static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1358 u8 rep_type)
1359{
1360 __unload_reps_vf_vport(esw, nvports, rep_type);
1361
1362 /* Special vports must be the last to unload. */
1363 __unload_reps_special_vport(esw, rep_type);
1364}
1365
1366static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1367{
1368 u8 rep_type = NUM_REP_TYPES;
1369
1370 while (rep_type-- > 0)
29d9fd7d 1371 __unload_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1372}
1373
c9b99abc
BW
1374static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1375 struct mlx5_eswitch_rep *rep, u8 rep_type)
1376{
f121e0ea
BW
1377 int err = 0;
1378
1379 if (rep->rep_if[rep_type].state != REP_REGISTERED)
c9b99abc
BW
1380 return 0;
1381
f121e0ea
BW
1382 err = rep->rep_if[rep_type].load(esw->dev, rep);
1383 if (err)
1384 return err;
1385
1386 rep->rep_if[rep_type].state = REP_LOADED;
1387
1388 return 0;
c9b99abc
BW
1389}
1390
29d9fd7d 1391static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1392{
cb67b832 1393 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1394 int err;
1395
879c8f84 1396 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1397 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1398 if (err)
1399 return err;
1400
1401 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1402 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1403 err = __esw_offloads_load_rep(esw, rep, rep_type);
1404 if (err)
1405 goto err_pf;
1406 }
1407
1408 if (mlx5_ecpf_vport_exists(esw->dev)) {
1409 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1410 err = __esw_offloads_load_rep(esw, rep, rep_type);
1411 if (err)
1412 goto err_ecpf;
1413 }
1414
1415 return 0;
1416
1417err_ecpf:
1418 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1419 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1420 __esw_offloads_unload_rep(esw, rep, rep_type);
1421 }
1422
1423err_pf:
1424 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1425 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1426 return err;
1427}
6ed1803a 1428
29d9fd7d
BW
1429static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1430 u8 rep_type)
1431{
1432 struct mlx5_eswitch_rep *rep;
1433 int err, i;
1434
1435 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1436 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1437 if (err)
29d9fd7d 1438 goto err_vf;
6ed1803a
MB
1439 }
1440
1441 return 0;
1442
29d9fd7d
BW
1443err_vf:
1444 __unload_reps_vf_vport(esw, --i, rep_type);
1445 return err;
1446}
1447
1448static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1449{
1450 u8 rep_type = 0;
1451 int err;
1452
1453 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1454 err = __load_reps_vf_vport(esw, nvports, rep_type);
1455 if (err)
1456 goto err_reps;
1457 }
1458
1459 return err;
1460
6ed1803a 1461err_reps:
29d9fd7d
BW
1462 while (rep_type-- > 0)
1463 __unload_reps_vf_vport(esw, nvports, rep_type);
1464 return err;
1465}
1466
1467static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1468 u8 rep_type)
1469{
1470 int err;
1471
1472 /* Special vports must be loaded first. */
1473 err = __load_reps_special_vport(esw, rep_type);
1474 if (err)
1475 return err;
1476
1477 err = __load_reps_vf_vport(esw, nvports, rep_type);
1478 if (err)
1479 goto err_vfs;
1480
1481 return 0;
1482
1483err_vfs:
1484 __unload_reps_special_vport(esw, rep_type);
a4b97ab4
MB
1485 return err;
1486}
1487
29d9fd7d 1488static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1489{
1490 u8 rep_type = 0;
1491 int err;
1492
1493 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
29d9fd7d 1494 err = __load_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1495 if (err)
1496 goto err_reps;
1497 }
1498
1499 return err;
1500
1501err_reps:
1502 while (rep_type-- > 0)
29d9fd7d 1503 __unload_reps_all_vport(esw, nvports, rep_type);
6ed1803a
MB
1504 return err;
1505}
1506
ac004b83
RD
1507#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1508#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1509
1510static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1511 struct mlx5_eswitch *peer_esw)
1512{
1513 int err;
1514
1515 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1516 if (err)
1517 return err;
1518
1519 return 0;
1520}
1521
1522static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1523{
04de7dda 1524 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1525 esw_del_fdb_peer_miss_rules(esw);
1526}
1527
1528static int mlx5_esw_offloads_devcom_event(int event,
1529 void *my_data,
1530 void *event_data)
1531{
1532 struct mlx5_eswitch *esw = my_data;
1533 struct mlx5_eswitch *peer_esw = event_data;
1534 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1535 int err;
1536
1537 switch (event) {
1538 case ESW_OFFLOADS_DEVCOM_PAIR:
1539 err = mlx5_esw_offloads_pair(esw, peer_esw);
1540 if (err)
1541 goto err_out;
1542
1543 err = mlx5_esw_offloads_pair(peer_esw, esw);
1544 if (err)
1545 goto err_pair;
1546
1547 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1548 break;
1549
1550 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1551 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1552 break;
1553
1554 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1555 mlx5_esw_offloads_unpair(peer_esw);
1556 mlx5_esw_offloads_unpair(esw);
1557 break;
1558 }
1559
1560 return 0;
1561
1562err_pair:
1563 mlx5_esw_offloads_unpair(esw);
1564
1565err_out:
1566 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1567 event, err);
1568 return err;
1569}
1570
1571static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1572{
1573 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1574
04de7dda
RD
1575 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1576 mutex_init(&esw->offloads.peer_mutex);
1577
ac004b83
RD
1578 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1579 return;
1580
1581 mlx5_devcom_register_component(devcom,
1582 MLX5_DEVCOM_ESW_OFFLOADS,
1583 mlx5_esw_offloads_devcom_event,
1584 esw);
1585
1586 mlx5_devcom_send_event(devcom,
1587 MLX5_DEVCOM_ESW_OFFLOADS,
1588 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1589}
1590
1591static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1592{
1593 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1594
1595 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1596 return;
1597
1598 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1599 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1600
1601 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1602}
1603
18486737
EB
1604static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1605 struct mlx5_vport *vport)
1606{
1607 struct mlx5_core_dev *dev = esw->dev;
1608 struct mlx5_flow_act flow_act = {0};
1609 struct mlx5_flow_spec *spec;
1610 int err = 0;
1611
1612 /* For prio tag mode, there is only 1 FTEs:
1613 * 1) Untagged packets - push prio tag VLAN, allow
1614 * Unmatched traffic is allowed by default
1615 */
1616
1617 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1618 return -EOPNOTSUPP;
1619
1620 esw_vport_cleanup_ingress_rules(esw, vport);
1621
1622 err = esw_vport_enable_ingress_acl(esw, vport);
1623 if (err) {
1624 mlx5_core_warn(esw->dev,
1625 "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
1626 err, vport->vport);
1627 return err;
1628 }
1629
1630 esw_debug(esw->dev,
1631 "vport[%d] configure ingress rules\n", vport->vport);
1632
1633 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1634 if (!spec) {
1635 err = -ENOMEM;
1636 goto out_no_mem;
1637 }
1638
1639 /* Untagged packets - push prio tag VLAN, allow */
1640 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1641 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1642 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1643 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1644 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1645 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1646 flow_act.vlan[0].vid = 0;
1647 flow_act.vlan[0].prio = 0;
1648 vport->ingress.allow_rule =
1649 mlx5_add_flow_rules(vport->ingress.acl, spec,
1650 &flow_act, NULL, 0);
1651 if (IS_ERR(vport->ingress.allow_rule)) {
1652 err = PTR_ERR(vport->ingress.allow_rule);
1653 esw_warn(esw->dev,
1654 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1655 vport->vport, err);
1656 vport->ingress.allow_rule = NULL;
1657 goto out;
1658 }
1659
1660out:
1661 kvfree(spec);
1662out_no_mem:
1663 if (err)
1664 esw_vport_cleanup_ingress_rules(esw, vport);
1665 return err;
1666}
1667
1668static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1669 struct mlx5_vport *vport)
1670{
1671 struct mlx5_flow_act flow_act = {0};
1672 struct mlx5_flow_spec *spec;
1673 int err = 0;
1674
1675 /* For prio tag mode, there is only 1 FTEs:
1676 * 1) prio tag packets - pop the prio tag VLAN, allow
1677 * Unmatched traffic is allowed by default
1678 */
1679
1680 esw_vport_cleanup_egress_rules(esw, vport);
1681
1682 err = esw_vport_enable_egress_acl(esw, vport);
1683 if (err) {
1684 mlx5_core_warn(esw->dev,
1685 "failed to enable egress acl (%d) on vport[%d]\n",
1686 err, vport->vport);
1687 return err;
1688 }
1689
1690 esw_debug(esw->dev,
1691 "vport[%d] configure prio tag egress rules\n", vport->vport);
1692
1693 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1694 if (!spec) {
1695 err = -ENOMEM;
1696 goto out_no_mem;
1697 }
1698
1699 /* prio tag vlan rule - pop it so VF receives untagged packets */
1700 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1701 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1702 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1703 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1704
1705 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1706 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1707 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1708 vport->egress.allowed_vlan =
1709 mlx5_add_flow_rules(vport->egress.acl, spec,
1710 &flow_act, NULL, 0);
1711 if (IS_ERR(vport->egress.allowed_vlan)) {
1712 err = PTR_ERR(vport->egress.allowed_vlan);
1713 esw_warn(esw->dev,
1714 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1715 vport->vport, err);
1716 vport->egress.allowed_vlan = NULL;
1717 goto out;
1718 }
1719
1720out:
1721 kvfree(spec);
1722out_no_mem:
1723 if (err)
1724 esw_vport_cleanup_egress_rules(esw, vport);
1725 return err;
1726}
1727
1728static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
1729{
1730 int i, j;
1731 int err;
1732
1733 mlx5_esw_for_each_vf_vport(esw, i, nvports) {
1734 err = esw_vport_ingress_prio_tag_config(esw, &esw->vports[i]);
1735 if (err)
1736 goto err_ingress;
1737 err = esw_vport_egress_prio_tag_config(esw, &esw->vports[i]);
1738 if (err)
1739 goto err_egress;
1740 }
1741
1742 return 0;
1743
1744err_egress:
1745 esw_vport_disable_ingress_acl(esw, &esw->vports[i]);
1746err_ingress:
1747 mlx5_esw_for_each_vf_vport_reverse(esw, j, i - 1) {
1748 esw_vport_disable_egress_acl(esw, &esw->vports[j]);
1749 esw_vport_disable_ingress_acl(esw, &esw->vports[j]);
1750 }
1751
1752 return err;
1753}
1754
1755static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
1756{
1757 int i;
1758
1759 mlx5_esw_for_each_vf_vport(esw, i, esw->nvports) {
1760 esw_vport_disable_egress_acl(esw, &esw->vports[i]);
1761 esw_vport_disable_ingress_acl(esw, &esw->vports[i]);
1762 }
1763}
1764
eca8cc38 1765static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
6ed1803a
MB
1766{
1767 int err;
1768
5c1d260e 1769 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
1770 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1771
18486737
EB
1772 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
1773 err = esw_prio_tag_acls_config(esw, nvports);
1774 if (err)
1775 return err;
1776 }
1777
1967ce6e 1778 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 1779 if (err)
c5447c70 1780 return err;
c930a3ad 1781
cd7e4186 1782 err = esw_create_offloads_table(esw, nvports);
c930a3ad
OG
1783 if (err)
1784 goto create_ft_err;
1785
cd7e4186 1786 err = esw_create_vport_rx_group(esw, nvports);
c930a3ad
OG
1787 if (err)
1788 goto create_fg_err;
1789
1790 return 0;
1791
1792create_fg_err:
1793 esw_destroy_offloads_table(esw);
1794
1795create_ft_err:
1967ce6e 1796 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 1797
c930a3ad
OG
1798 return err;
1799}
1800
eca8cc38
BW
1801static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1802{
1803 esw_destroy_vport_rx_group(esw);
1804 esw_destroy_offloads_table(esw);
1805 esw_destroy_offloads_fdb_tables(esw);
18486737
EB
1806 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
1807 esw_prio_tag_acls_cleanup(esw);
eca8cc38
BW
1808}
1809
a3888f33
BW
1810static void esw_host_params_event_handler(struct work_struct *work)
1811{
1812 struct mlx5_host_work *host_work;
1813 struct mlx5_eswitch *esw;
1814 int err, num_vf = 0;
1815
1816 host_work = container_of(work, struct mlx5_host_work, work);
1817 esw = host_work->esw;
1818
1819 err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
1820 if (err || num_vf == esw->host_info.num_vfs)
1821 goto out;
1822
1823 /* Number of VFs can only change from "0 to x" or "x to 0". */
1824 if (esw->host_info.num_vfs > 0) {
1825 esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
1826 } else {
1827 err = esw_offloads_load_vf_reps(esw, num_vf);
1828
1829 if (err)
1830 goto out;
1831 }
1832
1833 esw->host_info.num_vfs = num_vf;
1834
1835out:
1836 kfree(host_work);
1837}
1838
1839static int esw_host_params_event(struct notifier_block *nb,
1840 unsigned long type, void *data)
1841{
1842 struct mlx5_host_work *host_work;
1843 struct mlx5_host_info *host_info;
1844 struct mlx5_eswitch *esw;
1845
1846 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
1847 if (!host_work)
1848 return NOTIFY_DONE;
1849
1850 host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
1851 esw = container_of(host_info, struct mlx5_eswitch, host_info);
1852
1853 host_work->esw = esw;
1854
1855 INIT_WORK(&host_work->work, esw_host_params_event_handler);
1856 queue_work(esw->work_queue, &host_work->work);
1857
1858 return NOTIFY_OK;
1859}
1860
c9b99abc
BW
1861int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
1862 int total_nvports)
eca8cc38
BW
1863{
1864 int err;
1865
c9b99abc 1866 err = esw_offloads_steering_init(esw, total_nvports);
eca8cc38
BW
1867 if (err)
1868 return err;
1869
29d9fd7d 1870 err = esw_offloads_load_all_reps(esw, vf_nvports);
eca8cc38
BW
1871 if (err)
1872 goto err_reps;
1873
1874 esw_offloads_devcom_init(esw);
a3888f33
BW
1875
1876 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1877 MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
1878 HOST_PARAMS_CHANGE);
1879 mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
1880 esw->host_info.num_vfs = vf_nvports;
1881 }
1882
80f09dfc
MG
1883 mlx5_rdma_enable_roce(esw->dev);
1884
eca8cc38
BW
1885 return 0;
1886
1887err_reps:
1888 esw_offloads_steering_cleanup(esw);
1889 return err;
1890}
1891
db7ff19e
EB
1892static int esw_offloads_stop(struct mlx5_eswitch *esw,
1893 struct netlink_ext_ack *extack)
c930a3ad 1894{
6c419ba8 1895 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
1896
1897 mlx5_eswitch_disable_sriov(esw);
1898 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 1899 if (err) {
8c98ee77 1900 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 1901 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
1902 if (err1) {
1903 NL_SET_ERR_MSG_MOD(extack,
1904 "Failed setting eswitch back to offloads");
1905 }
6c419ba8 1906 }
c930a3ad
OG
1907
1908 return err;
1909}
1910
c9b99abc 1911void esw_offloads_cleanup(struct mlx5_eswitch *esw)
c930a3ad 1912{
a3888f33
BW
1913 u16 num_vfs;
1914
1915 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1916 mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
1917 flush_workqueue(esw->work_queue);
1918 num_vfs = esw->host_info.num_vfs;
1919 } else {
1920 num_vfs = esw->dev->priv.sriov.num_vfs;
1921 }
c9b99abc 1922
80f09dfc 1923 mlx5_rdma_disable_roce(esw->dev);
ac004b83 1924 esw_offloads_devcom_cleanup(esw);
29d9fd7d 1925 esw_offloads_unload_all_reps(esw, num_vfs);
eca8cc38 1926 esw_offloads_steering_cleanup(esw);
c930a3ad
OG
1927}
1928
ef78618b 1929static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1930{
1931 switch (mode) {
1932 case DEVLINK_ESWITCH_MODE_LEGACY:
1933 *mlx5_mode = SRIOV_LEGACY;
1934 break;
1935 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1936 *mlx5_mode = SRIOV_OFFLOADS;
1937 break;
1938 default:
1939 return -EINVAL;
1940 }
1941
1942 return 0;
1943}
1944
ef78618b
OG
1945static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1946{
1947 switch (mlx5_mode) {
1948 case SRIOV_LEGACY:
1949 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1950 break;
1951 case SRIOV_OFFLOADS:
1952 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1953 break;
1954 default:
1955 return -EINVAL;
1956 }
1957
1958 return 0;
1959}
1960
bffaa916
RD
1961static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1962{
1963 switch (mode) {
1964 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1965 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1966 break;
1967 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1968 *mlx5_mode = MLX5_INLINE_MODE_L2;
1969 break;
1970 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1971 *mlx5_mode = MLX5_INLINE_MODE_IP;
1972 break;
1973 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1974 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1975 break;
1976 default:
1977 return -EINVAL;
1978 }
1979
1980 return 0;
1981}
1982
1983static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1984{
1985 switch (mlx5_mode) {
1986 case MLX5_INLINE_MODE_NONE:
1987 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1988 break;
1989 case MLX5_INLINE_MODE_L2:
1990 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1991 break;
1992 case MLX5_INLINE_MODE_IP:
1993 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1994 break;
1995 case MLX5_INLINE_MODE_TCP_UDP:
1996 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1997 break;
1998 default:
1999 return -EINVAL;
2000 }
2001
2002 return 0;
2003}
2004
9d1cef19 2005static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2006{
9d1cef19 2007 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2008
9d1cef19
OG
2009 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2010 return -EOPNOTSUPP;
c930a3ad 2011
733d3e54
OG
2012 if(!MLX5_ESWITCH_MANAGER(dev))
2013 return -EPERM;
c930a3ad 2014
c96692fb
BW
2015 if (dev->priv.eswitch->mode == SRIOV_NONE &&
2016 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2017 return -EOPNOTSUPP;
2018
9d1cef19
OG
2019 return 0;
2020}
2021
db7ff19e
EB
2022int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2023 struct netlink_ext_ack *extack)
9d1cef19
OG
2024{
2025 struct mlx5_core_dev *dev = devlink_priv(devlink);
2026 u16 cur_mlx5_mode, mlx5_mode = 0;
2027 int err;
2028
2029 err = mlx5_devlink_eswitch_check(devlink);
2030 if (err)
2031 return err;
2032
2033 cur_mlx5_mode = dev->priv.eswitch->mode;
2034
ef78618b 2035 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2036 return -EINVAL;
2037
2038 if (cur_mlx5_mode == mlx5_mode)
2039 return 0;
2040
2041 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2042 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2043 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2044 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2045 else
2046 return -EINVAL;
feae9087
OG
2047}
2048
2049int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2050{
9d1cef19
OG
2051 struct mlx5_core_dev *dev = devlink_priv(devlink);
2052 int err;
c930a3ad 2053
9d1cef19
OG
2054 err = mlx5_devlink_eswitch_check(devlink);
2055 if (err)
2056 return err;
c930a3ad 2057
ef78618b 2058 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2059}
127ea380 2060
db7ff19e
EB
2061int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2062 struct netlink_ext_ack *extack)
bffaa916
RD
2063{
2064 struct mlx5_core_dev *dev = devlink_priv(devlink);
2065 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 2066 int err, vport;
bffaa916
RD
2067 u8 mlx5_mode;
2068
9d1cef19
OG
2069 err = mlx5_devlink_eswitch_check(devlink);
2070 if (err)
2071 return err;
bffaa916 2072
c415f704
OG
2073 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2074 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2075 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2076 return 0;
2077 /* fall through */
2078 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2079 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2080 return -EOPNOTSUPP;
c415f704
OG
2081 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2082 break;
2083 }
bffaa916 2084
375f51e2 2085 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2086 NL_SET_ERR_MSG_MOD(extack,
2087 "Can't set inline mode when flows are configured");
375f51e2
RD
2088 return -EOPNOTSUPP;
2089 }
2090
bffaa916
RD
2091 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2092 if (err)
2093 goto out;
2094
9d1cef19 2095 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
2096 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2097 if (err) {
8c98ee77
EB
2098 NL_SET_ERR_MSG_MOD(extack,
2099 "Failed to set min inline on vport");
bffaa916
RD
2100 goto revert_inline_mode;
2101 }
2102 }
2103
2104 esw->offloads.inline_mode = mlx5_mode;
2105 return 0;
2106
2107revert_inline_mode:
2108 while (--vport > 0)
2109 mlx5_modify_nic_vport_min_inline(dev,
2110 vport,
2111 esw->offloads.inline_mode);
2112out:
2113 return err;
2114}
2115
2116int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2117{
2118 struct mlx5_core_dev *dev = devlink_priv(devlink);
2119 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2120 int err;
bffaa916 2121
9d1cef19
OG
2122 err = mlx5_devlink_eswitch_check(devlink);
2123 if (err)
2124 return err;
bffaa916 2125
bffaa916
RD
2126 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2127}
2128
2129int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
2130{
c415f704 2131 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2132 struct mlx5_core_dev *dev = esw->dev;
2133 int vport;
bffaa916
RD
2134
2135 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2136 return -EOPNOTSUPP;
2137
2138 if (esw->mode == SRIOV_NONE)
2139 return -EOPNOTSUPP;
2140
c415f704
OG
2141 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2142 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2143 mlx5_mode = MLX5_INLINE_MODE_NONE;
2144 goto out;
2145 case MLX5_CAP_INLINE_MODE_L2:
2146 mlx5_mode = MLX5_INLINE_MODE_L2;
2147 goto out;
2148 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2149 goto query_vports;
2150 }
bffaa916 2151
c415f704 2152query_vports:
bffaa916
RD
2153 for (vport = 1; vport <= nvfs; vport++) {
2154 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2155 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
2156 return -EINVAL;
2157 prev_mlx5_mode = mlx5_mode;
2158 }
2159
c415f704 2160out:
bffaa916
RD
2161 *mode = mlx5_mode;
2162 return 0;
2163}
2164
db7ff19e
EB
2165int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
2166 struct netlink_ext_ack *extack)
7768d197
RD
2167{
2168 struct mlx5_core_dev *dev = devlink_priv(devlink);
2169 struct mlx5_eswitch *esw = dev->priv.eswitch;
2170 int err;
2171
9d1cef19
OG
2172 err = mlx5_devlink_eswitch_check(devlink);
2173 if (err)
2174 return err;
7768d197
RD
2175
2176 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2177 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2178 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2179 return -EOPNOTSUPP;
2180
2181 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2182 return -EOPNOTSUPP;
2183
2184 if (esw->mode == SRIOV_LEGACY) {
2185 esw->offloads.encap = encap;
2186 return 0;
2187 }
2188
2189 if (esw->offloads.encap == encap)
2190 return 0;
2191
2192 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2193 NL_SET_ERR_MSG_MOD(extack,
2194 "Can't set encapsulation when flows are configured");
7768d197
RD
2195 return -EOPNOTSUPP;
2196 }
2197
e52c2802 2198 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2199
2200 esw->offloads.encap = encap;
e52c2802
PB
2201
2202 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2203
7768d197 2204 if (err) {
8c98ee77
EB
2205 NL_SET_ERR_MSG_MOD(extack,
2206 "Failed re-creating fast FDB table");
7768d197 2207 esw->offloads.encap = !encap;
e52c2802 2208 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2209 }
e52c2802 2210
7768d197
RD
2211 return err;
2212}
2213
2214int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
2215{
2216 struct mlx5_core_dev *dev = devlink_priv(devlink);
2217 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2218 int err;
7768d197 2219
9d1cef19
OG
2220 err = mlx5_devlink_eswitch_check(devlink);
2221 if (err)
2222 return err;
7768d197
RD
2223
2224 *encap = esw->offloads.encap;
2225 return 0;
2226}
2227
f8e8fa02
BW
2228void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2229 struct mlx5_eswitch_rep_if *__rep_if,
2230 u8 rep_type)
127ea380 2231{
a4b97ab4 2232 struct mlx5_eswitch_rep_if *rep_if;
f8e8fa02
BW
2233 struct mlx5_eswitch_rep *rep;
2234 int i;
9deb2241 2235
f8e8fa02
BW
2236 mlx5_esw_for_all_reps(esw, i, rep) {
2237 rep_if = &rep->rep_if[rep_type];
2238 rep_if->load = __rep_if->load;
2239 rep_if->unload = __rep_if->unload;
2240 rep_if->get_proto_dev = __rep_if->get_proto_dev;
2241 rep_if->priv = __rep_if->priv;
127ea380 2242
f8e8fa02
BW
2243 rep_if->state = REP_REGISTERED;
2244 }
127ea380 2245}
f8e8fa02 2246EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2247
f8e8fa02 2248void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2249{
f8e8fa02 2250 u16 max_vf = mlx5_core_max_vfs(esw->dev);
cb67b832 2251 struct mlx5_eswitch_rep *rep;
f8e8fa02 2252 int i;
cb67b832 2253
f8e8fa02
BW
2254 if (esw->mode == SRIOV_OFFLOADS)
2255 __unload_reps_all_vport(esw, max_vf, rep_type);
127ea380 2256
f8e8fa02
BW
2257 mlx5_esw_for_all_reps(esw, i, rep)
2258 rep->rep_if[rep_type].state = REP_UNREGISTERED;
127ea380 2259}
f8e8fa02 2260EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2261
a4b97ab4 2262void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2263{
726293f1
HHZ
2264 struct mlx5_eswitch_rep *rep;
2265
879c8f84 2266 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
a4b97ab4 2267 return rep->rep_if[rep_type].priv;
726293f1 2268}
22215908
MB
2269
2270void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2271 int vport,
2272 u8 rep_type)
2273{
22215908
MB
2274 struct mlx5_eswitch_rep *rep;
2275
879c8f84 2276 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2277
f121e0ea 2278 if (rep->rep_if[rep_type].state == REP_LOADED &&
22215908
MB
2279 rep->rep_if[rep_type].get_proto_dev)
2280 return rep->rep_if[rep_type].get_proto_dev(rep);
2281 return NULL;
2282}
57cbd893 2283EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2284
2285void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2286{
879c8f84 2287 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2288}
57cbd893
MB
2289EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2290
2291struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2292 int vport)
2293{
879c8f84 2294 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2295}
2296EXPORT_SYMBOL(mlx5_eswitch_vport_rep);