]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
Merge branch 'for-linus-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
e52c2802
PB
40#include "en.h"
41#include "fs_core.h"
ac004b83 42#include "lib/devcom.h"
a3888f33
BW
43#include "ecpf.h"
44#include "lib/eq.h"
69697b6e 45
1033665e
OG
46enum {
47 FDB_FAST_PATH = 0,
48 FDB_SLOW_PATH
49};
50
cd7e4186
BW
51/* There are two match-all miss flows, one for unicast dst mac and
52 * one for multicast.
53 */
54#define MLX5_ESW_MISS_FLOWS (2)
55
e52c2802
PB
56#define fdb_prio_table(esw, chain, prio, level) \
57 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
58
c9b99abc
BW
59#define UPLINK_REP_INDEX 0
60
879c8f84
BW
61/* The rep getter/iterator are only valid after esw->total_vports
62 * and vport->vport are initialized in mlx5_eswitch_init.
63 */
64#define mlx5_esw_for_all_reps(esw, i, rep) \
65 for ((i) = MLX5_VPORT_PF; \
66 (rep) = &(esw)->offloads.vport_reps[i], \
67 (i) < (esw)->total_vports; (i)++)
68
69#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
70 for ((i) = MLX5_VPORT_FIRST_VF; \
71 (rep) = &(esw)->offloads.vport_reps[i], \
72 (i) <= (nvfs); (i)++)
73
74#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
75 for ((i) = (nvfs); \
76 (rep) = &(esw)->offloads.vport_reps[i], \
77 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
78
79#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
80 for ((vport) = MLX5_VPORT_FIRST_VF; \
81 (vport) <= (nvfs); (vport)++)
82
83#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
84 for ((vport) = (nvfs); \
85 (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
86
87static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
88 u16 vport_num)
89{
5ae51620 90 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
91
92 WARN_ON(idx > esw->total_vports - 1);
93 return &esw->offloads.vport_reps[idx];
94}
95
e52c2802
PB
96static struct mlx5_flow_table *
97esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
98static void
99esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
100
101bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
102{
103 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
104}
105
106u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
107{
108 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
109 return FDB_MAX_CHAIN;
110
111 return 0;
112}
113
114u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
115{
116 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
117 return FDB_MAX_PRIO;
118
bf07aa73 119 return 1;
e52c2802
PB
120}
121
74491de9 122struct mlx5_flow_handle *
3d80d1a2
OG
123mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
124 struct mlx5_flow_spec *spec,
776b12b6 125 struct mlx5_esw_flow_attr *attr)
3d80d1a2 126{
592d3651 127 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 128 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 129 bool split = !!(attr->split_count);
74491de9 130 struct mlx5_flow_handle *rule;
e52c2802 131 struct mlx5_flow_table *fdb;
592d3651 132 int j, i = 0;
3d80d1a2
OG
133 void *misc;
134
135 if (esw->mode != SRIOV_OFFLOADS)
136 return ERR_PTR(-EOPNOTSUPP);
137
6acfbf38
OG
138 flow_act.action = attr->action;
139 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 140 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
141 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
142 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
143 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
144 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
145 flow_act.vlan[0].vid = attr->vlan_vid[0];
146 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
147 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
148 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
149 flow_act.vlan[1].vid = attr->vlan_vid[1];
150 flow_act.vlan[1].prio = attr->vlan_prio[1];
151 }
6acfbf38 152 }
776b12b6 153
66958ed9 154 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
155 if (attr->dest_chain) {
156 struct mlx5_flow_table *ft;
157
158 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
159 if (IS_ERR(ft)) {
160 rule = ERR_CAST(ft);
161 goto err_create_goto_table;
162 }
163
164 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
165 dest[i].ft = ft;
592d3651 166 i++;
e52c2802 167 } else {
e85e02ba 168 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 169 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 170 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 171 dest[i].vport.vhca_id =
df65a573 172 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
173 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
174 dest[i].vport.flags |=
175 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
176 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
177 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
8c4dc42b 178 flow_act.reformat_id = attr->dests[j].encap_id;
a18e879d 179 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b
EB
180 dest[i].vport.reformat_id =
181 attr->dests[j].encap_id;
f493f155 182 }
e52c2802
PB
183 i++;
184 }
56e858df 185 }
e37a79e5 186 }
66958ed9 187 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 188 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 189 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 190 i++;
3d80d1a2
OG
191 }
192
193 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 194 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 195
10ff5359
SK
196 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
197 MLX5_SET(fte_match_set_misc, misc,
198 source_eswitch_owner_vhca_id,
199 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
200
3d80d1a2
OG
201 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
202 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
203 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
204 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
205 source_eswitch_owner_vhca_id);
3d80d1a2 206
6363651d
OG
207 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
208 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
209 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
210 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
211 if (attr->match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
213 } else if (attr->match_level != MLX5_MATCH_NONE) {
214 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
215 }
3d80d1a2 216
aa24670e 217 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
218 flow_act.modify_id = attr->mod_hdr_id;
219
e85e02ba 220 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
221 if (IS_ERR(fdb)) {
222 rule = ERR_CAST(fdb);
223 goto err_esw_get;
224 }
225
226 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 227 if (IS_ERR(rule))
e52c2802 228 goto err_add_rule;
375f51e2
RD
229 else
230 esw->offloads.num_flows++;
3d80d1a2 231
e52c2802
PB
232 return rule;
233
234err_add_rule:
e85e02ba 235 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
236err_esw_get:
237 if (attr->dest_chain)
238 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
239err_create_goto_table:
aa0cbbae 240 return rule;
3d80d1a2
OG
241}
242
e4ad91f2
CM
243struct mlx5_flow_handle *
244mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
245 struct mlx5_flow_spec *spec,
246 struct mlx5_esw_flow_attr *attr)
247{
248 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 249 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
250 struct mlx5_flow_table *fast_fdb;
251 struct mlx5_flow_table *fwd_fdb;
e4ad91f2
CM
252 struct mlx5_flow_handle *rule;
253 void *misc;
254 int i;
255
e52c2802
PB
256 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
257 if (IS_ERR(fast_fdb)) {
258 rule = ERR_CAST(fast_fdb);
259 goto err_get_fast;
260 }
261
262 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
263 if (IS_ERR(fwd_fdb)) {
264 rule = ERR_CAST(fwd_fdb);
265 goto err_get_fwd;
266 }
267
e4ad91f2 268 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 269 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 270 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 271 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 272 dest[i].vport.vhca_id =
df65a573 273 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
275 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
276 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
277 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b 278 dest[i].vport.reformat_id = attr->dests[i].encap_id;
1cc26d74 279 }
e4ad91f2
CM
280 }
281 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 282 dest[i].ft = fwd_fdb,
e4ad91f2
CM
283 i++;
284
285 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
286 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
287
288 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
289 MLX5_SET(fte_match_set_misc, misc,
290 source_eswitch_owner_vhca_id,
291 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
292
293 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
294 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
295 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
296 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
297 source_eswitch_owner_vhca_id);
298
299 if (attr->match_level == MLX5_MATCH_NONE)
300 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
301 else
302 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
303 MLX5_MATCH_MISC_PARAMETERS;
304
e52c2802 305 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 306
e52c2802
PB
307 if (IS_ERR(rule))
308 goto add_err;
e4ad91f2 309
e52c2802
PB
310 esw->offloads.num_flows++;
311
312 return rule;
313add_err:
314 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
315err_get_fwd:
316 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
317err_get_fast:
e4ad91f2
CM
318 return rule;
319}
320
e52c2802
PB
321static void
322__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
323 struct mlx5_flow_handle *rule,
324 struct mlx5_esw_flow_attr *attr,
325 bool fwd_rule)
326{
e85e02ba 327 bool split = (attr->split_count > 0);
e52c2802
PB
328
329 mlx5_del_flow_rules(rule);
330 esw->offloads.num_flows--;
331
332 if (fwd_rule) {
333 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
334 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
335 } else {
e85e02ba 336 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
337 if (attr->dest_chain)
338 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
339 }
340}
341
d85cdccb
OG
342void
343mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
344 struct mlx5_flow_handle *rule,
345 struct mlx5_esw_flow_attr *attr)
346{
e52c2802 347 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
348}
349
48265006
OG
350void
351mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
352 struct mlx5_flow_handle *rule,
353 struct mlx5_esw_flow_attr *attr)
354{
e52c2802 355 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
356}
357
f5f82476
OG
358static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
359{
360 struct mlx5_eswitch_rep *rep;
361 int vf_vport, err = 0;
362
363 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
364 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
365 rep = &esw->offloads.vport_reps[vf_vport];
f121e0ea 366 if (rep->rep_if[REP_ETH].state != REP_LOADED)
f5f82476
OG
367 continue;
368
369 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
370 if (err)
371 goto out;
372 }
373
374out:
375 return err;
376}
377
378static struct mlx5_eswitch_rep *
379esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
380{
381 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
382
383 in_rep = attr->in_rep;
df65a573 384 out_rep = attr->dests[0].rep;
f5f82476
OG
385
386 if (push)
387 vport = in_rep;
388 else if (pop)
389 vport = out_rep;
390 else
391 vport = in_rep;
392
393 return vport;
394}
395
396static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
397 bool push, bool pop, bool fwd)
398{
399 struct mlx5_eswitch_rep *in_rep, *out_rep;
400
401 if ((push || pop) && !fwd)
402 goto out_notsupp;
403
404 in_rep = attr->in_rep;
df65a573 405 out_rep = attr->dests[0].rep;
f5f82476 406
b05af6aa 407 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
408 goto out_notsupp;
409
b05af6aa 410 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
411 goto out_notsupp;
412
413 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
414 if (!push && !pop && fwd)
b05af6aa 415 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
416 goto out_notsupp;
417
418 /* protects against (1) setting rules with different vlans to push and
419 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
420 */
1482bd3d 421 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
422 goto out_notsupp;
423
424 return 0;
425
426out_notsupp:
9eb78923 427 return -EOPNOTSUPP;
f5f82476
OG
428}
429
430int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
431 struct mlx5_esw_flow_attr *attr)
432{
433 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
434 struct mlx5_eswitch_rep *vport = NULL;
435 bool push, pop, fwd;
436 int err = 0;
437
6acfbf38 438 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 439 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
440 return 0;
441
f5f82476
OG
442 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
443 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
444 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
445 !attr->dest_chain);
f5f82476
OG
446
447 err = esw_add_vlan_action_check(attr, push, pop, fwd);
448 if (err)
449 return err;
450
451 attr->vlan_handled = false;
452
453 vport = esw_vlan_action_get_vport(attr, push, pop);
454
455 if (!push && !pop && fwd) {
456 /* tracks VF --> wire rules without vlan push action */
b05af6aa 457 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
458 vport->vlan_refcount++;
459 attr->vlan_handled = true;
460 }
461
462 return 0;
463 }
464
465 if (!push && !pop)
466 return 0;
467
468 if (!(offloads->vlan_push_pop_refcount)) {
469 /* it's the 1st vlan rule, apply global vlan pop policy */
470 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
471 if (err)
472 goto out;
473 }
474 offloads->vlan_push_pop_refcount++;
475
476 if (push) {
477 if (vport->vlan_refcount)
478 goto skip_set_push;
479
1482bd3d 480 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
481 SET_VLAN_INSERT | SET_VLAN_STRIP);
482 if (err)
483 goto out;
1482bd3d 484 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
485skip_set_push:
486 vport->vlan_refcount++;
487 }
488out:
489 if (!err)
490 attr->vlan_handled = true;
491 return err;
492}
493
494int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
495 struct mlx5_esw_flow_attr *attr)
496{
497 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
498 struct mlx5_eswitch_rep *vport = NULL;
499 bool push, pop, fwd;
500 int err = 0;
501
6acfbf38 502 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 503 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
504 return 0;
505
f5f82476
OG
506 if (!attr->vlan_handled)
507 return 0;
508
509 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
510 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
511 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
512
513 vport = esw_vlan_action_get_vport(attr, push, pop);
514
515 if (!push && !pop && fwd) {
516 /* tracks VF --> wire rules without vlan push action */
b05af6aa 517 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
518 vport->vlan_refcount--;
519
520 return 0;
521 }
522
523 if (push) {
524 vport->vlan_refcount--;
525 if (vport->vlan_refcount)
526 goto skip_unset_push;
527
528 vport->vlan = 0;
529 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
530 0, 0, SET_VLAN_STRIP);
531 if (err)
532 goto out;
533 }
534
535skip_unset_push:
536 offloads->vlan_push_pop_refcount--;
537 if (offloads->vlan_push_pop_refcount)
538 return 0;
539
540 /* no more vlan rules, stop global vlan pop policy */
541 err = esw_set_global_vlan_pop(esw, 0);
542
543out:
544 return err;
545}
546
f7a68945 547struct mlx5_flow_handle *
ab22be9b
OG
548mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
549{
66958ed9 550 struct mlx5_flow_act flow_act = {0};
4c5009c5 551 struct mlx5_flow_destination dest = {};
74491de9 552 struct mlx5_flow_handle *flow_rule;
c5bb1730 553 struct mlx5_flow_spec *spec;
ab22be9b
OG
554 void *misc;
555
1b9a07ee 556 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 557 if (!spec) {
ab22be9b
OG
558 flow_rule = ERR_PTR(-ENOMEM);
559 goto out;
560 }
561
c5bb1730 562 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 563 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
564 /* source vport is the esw manager */
565 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 566
c5bb1730 567 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
568 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
569 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
570
c5bb1730 571 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 572 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 573 dest.vport.num = vport;
66958ed9 574 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 575
52fff327 576 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 577 &flow_act, &dest, 1);
ab22be9b
OG
578 if (IS_ERR(flow_rule))
579 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
580out:
c5bb1730 581 kvfree(spec);
ab22be9b
OG
582 return flow_rule;
583}
57cbd893 584EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 585
159fe639
MB
586void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
587{
588 mlx5_del_flow_rules(rule);
589}
590
ac004b83
RD
591static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
592 struct mlx5_flow_spec *spec,
593 struct mlx5_flow_destination *dest)
594{
595 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
596 misc_parameters);
597
598 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
599 MLX5_CAP_GEN(peer_dev, vhca_id));
600
601 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
602
603 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
604 misc_parameters);
605 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
606 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
607 source_eswitch_owner_vhca_id);
608
609 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 610 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 611 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 612 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
613}
614
615static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
616 struct mlx5_core_dev *peer_dev)
617{
618 struct mlx5_flow_destination dest = {};
619 struct mlx5_flow_act flow_act = {0};
620 struct mlx5_flow_handle **flows;
621 struct mlx5_flow_handle *flow;
622 struct mlx5_flow_spec *spec;
623 /* total vports is the same for both e-switches */
624 int nvports = esw->total_vports;
625 void *misc;
626 int err, i;
627
628 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
629 if (!spec)
630 return -ENOMEM;
631
632 peer_miss_rules_setup(peer_dev, spec, &dest);
633
634 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
635 if (!flows) {
636 err = -ENOMEM;
637 goto alloc_flows_err;
638 }
639
640 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
642 misc_parameters);
643
81cd229c
BW
644 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
645 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
646 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
647 spec, &flow_act, &dest, 1);
648 if (IS_ERR(flow)) {
649 err = PTR_ERR(flow);
650 goto add_pf_flow_err;
651 }
652 flows[MLX5_VPORT_PF] = flow;
653 }
654
655 if (mlx5_ecpf_vport_exists(esw->dev)) {
656 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
657 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
658 spec, &flow_act, &dest, 1);
659 if (IS_ERR(flow)) {
660 err = PTR_ERR(flow);
661 goto add_ecpf_flow_err;
662 }
663 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
664 }
665
879c8f84 666 mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
ac004b83
RD
667 MLX5_SET(fte_match_set_misc, misc, source_port, i);
668 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
669 spec, &flow_act, &dest, 1);
670 if (IS_ERR(flow)) {
671 err = PTR_ERR(flow);
81cd229c 672 goto add_vf_flow_err;
ac004b83
RD
673 }
674 flows[i] = flow;
675 }
676
677 esw->fdb_table.offloads.peer_miss_rules = flows;
678
679 kvfree(spec);
680 return 0;
681
81cd229c 682add_vf_flow_err:
879c8f84
BW
683 nvports = --i;
684 mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
ac004b83 685 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
686
687 if (mlx5_ecpf_vport_exists(esw->dev))
688 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
689add_ecpf_flow_err:
690 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
691 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
692add_pf_flow_err:
693 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
694 kvfree(flows);
695alloc_flows_err:
696 kvfree(spec);
697 return err;
698}
699
700static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
701{
702 struct mlx5_flow_handle **flows;
703 int i;
704
705 flows = esw->fdb_table.offloads.peer_miss_rules;
706
879c8f84 707 mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
ac004b83
RD
708 mlx5_del_flow_rules(flows[i]);
709
81cd229c
BW
710 if (mlx5_ecpf_vport_exists(esw->dev))
711 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
712
713 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
714 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
715
ac004b83
RD
716 kvfree(flows);
717}
718
3aa33572
OG
719static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
720{
66958ed9 721 struct mlx5_flow_act flow_act = {0};
4c5009c5 722 struct mlx5_flow_destination dest = {};
74491de9 723 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 724 struct mlx5_flow_spec *spec;
f80be543
MB
725 void *headers_c;
726 void *headers_v;
3aa33572 727 int err = 0;
f80be543
MB
728 u8 *dmac_c;
729 u8 *dmac_v;
3aa33572 730
1b9a07ee 731 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 732 if (!spec) {
3aa33572
OG
733 err = -ENOMEM;
734 goto out;
735 }
736
f80be543
MB
737 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
738 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
739 outer_headers);
740 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
741 outer_headers.dmac_47_16);
742 dmac_c[0] = 0x01;
743
3aa33572 744 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 745 dest.vport.num = esw->manager_vport;
66958ed9 746 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 747
52fff327 748 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 749 &flow_act, &dest, 1);
3aa33572
OG
750 if (IS_ERR(flow_rule)) {
751 err = PTR_ERR(flow_rule);
f80be543 752 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
753 goto out;
754 }
755
f80be543
MB
756 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
757
758 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
759 outer_headers);
760 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
761 outer_headers.dmac_47_16);
762 dmac_v[0] = 0x01;
52fff327 763 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
764 &flow_act, &dest, 1);
765 if (IS_ERR(flow_rule)) {
766 err = PTR_ERR(flow_rule);
767 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
768 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
769 goto out;
770 }
771
772 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
773
3aa33572 774out:
c5bb1730 775 kvfree(spec);
3aa33572
OG
776 return err;
777}
778
1033665e 779#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 780
e52c2802
PB
781/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
782 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
783 * for each flow table pool. We can allocate up to 16M of each pool,
784 * and we keep track of how much we used via put/get_sz_to_pool.
785 * Firmware doesn't report any of this for now.
786 * ESW_POOL is expected to be sorted from large to small
787 */
788#define ESW_SIZE (16 * 1024 * 1024)
789const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
790 64 * 1024, 4 * 1024 };
791
792static int
793get_sz_from_pool(struct mlx5_eswitch *esw)
794{
795 int sz = 0, i;
796
797 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
798 if (esw->fdb_table.offloads.fdb_left[i]) {
799 --esw->fdb_table.offloads.fdb_left[i];
800 sz = ESW_POOLS[i];
801 break;
802 }
803 }
804
805 return sz;
806}
807
808static void
809put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
810{
811 int i;
812
813 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
814 if (sz >= ESW_POOLS[i]) {
815 ++esw->fdb_table.offloads.fdb_left[i];
816 break;
817 }
818 }
819}
820
821static struct mlx5_flow_table *
822create_next_size_table(struct mlx5_eswitch *esw,
823 struct mlx5_flow_namespace *ns,
824 u16 table_prio,
825 int level,
826 u32 flags)
827{
828 struct mlx5_flow_table *fdb;
829 int sz;
830
831 sz = get_sz_from_pool(esw);
832 if (!sz)
833 return ERR_PTR(-ENOSPC);
834
835 fdb = mlx5_create_auto_grouped_flow_table(ns,
836 table_prio,
837 sz,
838 ESW_OFFLOADS_NUM_GROUPS,
839 level,
840 flags);
841 if (IS_ERR(fdb)) {
842 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
843 (int)PTR_ERR(fdb), table_prio, level, sz);
844 put_sz_to_pool(esw, sz);
845 }
846
847 return fdb;
848}
849
850static struct mlx5_flow_table *
851esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 852{
69697b6e 853 struct mlx5_core_dev *dev = esw->dev;
69697b6e 854 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
855 struct mlx5_flow_namespace *ns;
856 int table_prio, l = 0;
bbd00f7e 857 u32 flags = 0;
69697b6e 858
c92a0b94
PB
859 if (chain == FDB_SLOW_PATH_CHAIN)
860 return esw->fdb_table.offloads.slow_fdb;
861
e52c2802 862 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 863
e52c2802
PB
864 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
865 if (fdb) {
866 /* take ref on earlier levels as well */
867 while (level >= 0)
868 fdb_prio_table(esw, chain, prio, level--).num_rules++;
869 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
870 return fdb;
871 }
69697b6e 872
e52c2802
PB
873 ns = mlx5_get_fdb_sub_ns(dev, chain);
874 if (!ns) {
875 esw_warn(dev, "Failed to get FDB sub namespace\n");
876 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
877 return ERR_PTR(-EOPNOTSUPP);
878 }
a842dd04 879
7768d197 880 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 881 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 882 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 883
e52c2802 884 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 885
e52c2802
PB
886 /* create earlier levels for correct fs_core lookup when
887 * connecting tables
888 */
889 for (l = 0; l <= level; l++) {
890 if (fdb_prio_table(esw, chain, prio, l).fdb) {
891 fdb_prio_table(esw, chain, prio, l).num_rules++;
892 continue;
893 }
a842dd04 894
e52c2802
PB
895 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
896 if (IS_ERR(fdb)) {
897 l--;
898 goto err_create_fdb;
899 }
900
901 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
902 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 903 }
a842dd04 904
e52c2802
PB
905 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
906 return fdb;
a842dd04 907
e52c2802
PB
908err_create_fdb:
909 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
910 if (l >= 0)
911 esw_put_prio_table(esw, chain, prio, l);
912
913 return fdb;
1967ce6e
OG
914}
915
e52c2802
PB
916static void
917esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 918{
e52c2802
PB
919 int l;
920
c92a0b94
PB
921 if (chain == FDB_SLOW_PATH_CHAIN)
922 return;
923
e52c2802
PB
924 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
925
926 for (l = level; l >= 0; l--) {
927 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
928 continue;
929
930 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
931 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
932 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
933 }
934
935 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
936}
937
938static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
939{
940 /* If lazy creation isn't supported, deref the fast path tables */
941 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
942 esw_put_prio_table(esw, 0, 1, 1);
943 esw_put_prio_table(esw, 0, 1, 0);
944 }
1967ce6e
OG
945}
946
947#define MAX_PF_SQ 256
cd3d07e7 948#define MAX_SQ_NVPORTS 32
1967ce6e
OG
949
950static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
951{
952 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
953 struct mlx5_flow_table_attr ft_attr = {};
954 struct mlx5_core_dev *dev = esw->dev;
e52c2802 955 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
956 struct mlx5_flow_namespace *root_ns;
957 struct mlx5_flow_table *fdb = NULL;
e52c2802 958 int table_size, ix, err = 0, i;
1967ce6e 959 struct mlx5_flow_group *g;
e52c2802 960 u32 flags = 0, fdb_max;
1967ce6e 961 void *match_criteria;
f80be543 962 u8 *dmac;
1967ce6e
OG
963
964 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 965 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
966 if (!flow_group_in)
967 return -ENOMEM;
968
969 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
970 if (!root_ns) {
971 esw_warn(dev, "Failed to get FDB flow namespace\n");
972 err = -EOPNOTSUPP;
973 goto ns_err;
974 }
975
e52c2802
PB
976 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
977 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
978 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
979
980 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
981 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
982 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
983 fdb_max);
984
985 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
986 esw->fdb_table.offloads.fdb_left[i] =
987 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 988
cd7e4186
BW
989 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
990 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 991
e52c2802
PB
992 /* create the slow path fdb with encap set, so further table instances
993 * can be created at run time while VFs are probed if the FW allows that.
994 */
995 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
996 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
997 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
998
999 ft_attr.flags = flags;
b3ba5149
ES
1000 ft_attr.max_fte = table_size;
1001 ft_attr.prio = FDB_SLOW_PATH;
1002
1003 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1004 if (IS_ERR(fdb)) {
1005 err = PTR_ERR(fdb);
1006 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1007 goto slow_fdb_err;
1008 }
52fff327 1009 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1010
e52c2802
PB
1011 /* If lazy creation isn't supported, open the fast path tables now */
1012 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1013 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1014 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1015 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1016 esw_get_prio_table(esw, 0, 1, 0);
1017 esw_get_prio_table(esw, 0, 1, 1);
1018 } else {
1019 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1020 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1021 }
1022
69697b6e
OG
1023 /* create send-to-vport group */
1024 memset(flow_group_in, 0, inlen);
1025 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1026 MLX5_MATCH_MISC_PARAMETERS);
1027
1028 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1029
1030 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1031 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1032
cd3d07e7 1033 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1034 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1035 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1036
1037 g = mlx5_create_flow_group(fdb, flow_group_in);
1038 if (IS_ERR(g)) {
1039 err = PTR_ERR(g);
1040 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1041 goto send_vport_err;
1042 }
1043 esw->fdb_table.offloads.send_to_vport_grp = g;
1044
ac004b83
RD
1045 /* create peer esw miss group */
1046 memset(flow_group_in, 0, inlen);
1047 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1048 MLX5_MATCH_MISC_PARAMETERS);
1049
1050 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1051 match_criteria);
1052
1053 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1054 misc_parameters.source_port);
1055 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1056 misc_parameters.source_eswitch_owner_vhca_id);
1057
1058 MLX5_SET(create_flow_group_in, flow_group_in,
1059 source_eswitch_owner_vhca_id_valid, 1);
1060 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1061 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1062 ix + esw->total_vports - 1);
1063 ix += esw->total_vports;
1064
1065 g = mlx5_create_flow_group(fdb, flow_group_in);
1066 if (IS_ERR(g)) {
1067 err = PTR_ERR(g);
1068 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1069 goto peer_miss_err;
1070 }
1071 esw->fdb_table.offloads.peer_miss_grp = g;
1072
69697b6e
OG
1073 /* create miss group */
1074 memset(flow_group_in, 0, inlen);
f80be543
MB
1075 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1076 MLX5_MATCH_OUTER_HEADERS);
1077 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1078 match_criteria);
1079 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1080 outer_headers.dmac_47_16);
1081 dmac[0] = 0x01;
69697b6e
OG
1082
1083 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1084 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1085 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1086
1087 g = mlx5_create_flow_group(fdb, flow_group_in);
1088 if (IS_ERR(g)) {
1089 err = PTR_ERR(g);
1090 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1091 goto miss_err;
1092 }
1093 esw->fdb_table.offloads.miss_grp = g;
1094
3aa33572
OG
1095 err = esw_add_fdb_miss_rule(esw);
1096 if (err)
1097 goto miss_rule_err;
1098
e52c2802 1099 esw->nvports = nvports;
c88a026e 1100 kvfree(flow_group_in);
69697b6e
OG
1101 return 0;
1102
3aa33572
OG
1103miss_rule_err:
1104 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1105miss_err:
ac004b83
RD
1106 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1107peer_miss_err:
69697b6e
OG
1108 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1109send_vport_err:
e52c2802 1110 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1111 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1112slow_fdb_err:
69697b6e
OG
1113ns_err:
1114 kvfree(flow_group_in);
1115 return err;
1116}
1117
1967ce6e 1118static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1119{
e52c2802 1120 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1121 return;
1122
1967ce6e 1123 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1124 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1125 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1126 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1127 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1128 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1129
52fff327 1130 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1131 esw_destroy_offloads_fast_fdb_tables(esw);
69697b6e 1132}
c116c6ee 1133
cd7e4186 1134static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1135{
b3ba5149 1136 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1137 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1138 struct mlx5_flow_table *ft_offloads;
1139 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1140 int err = 0;
1141
1142 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1143 if (!ns) {
1144 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1145 return -EOPNOTSUPP;
c116c6ee
OG
1146 }
1147
cd7e4186 1148 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1149
1150 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1151 if (IS_ERR(ft_offloads)) {
1152 err = PTR_ERR(ft_offloads);
1153 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1154 return err;
1155 }
1156
1157 esw->offloads.ft_offloads = ft_offloads;
1158 return 0;
1159}
1160
1161static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1162{
1163 struct mlx5_esw_offload *offloads = &esw->offloads;
1164
1165 mlx5_destroy_flow_table(offloads->ft_offloads);
1166}
fed9ce22 1167
cd7e4186 1168static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1169{
1170 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1171 struct mlx5_flow_group *g;
fed9ce22
OG
1172 u32 *flow_group_in;
1173 void *match_criteria, *misc;
1174 int err = 0;
fed9ce22 1175
cd7e4186 1176 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1177 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1178 if (!flow_group_in)
1179 return -ENOMEM;
1180
1181 /* create vport rx group */
1182 memset(flow_group_in, 0, inlen);
1183 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1184 MLX5_MATCH_MISC_PARAMETERS);
1185
1186 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1187 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1188 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1189
1190 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1191 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1192
1193 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1194
1195 if (IS_ERR(g)) {
1196 err = PTR_ERR(g);
1197 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1198 goto out;
1199 }
1200
1201 esw->offloads.vport_rx_group = g;
1202out:
e574978a 1203 kvfree(flow_group_in);
fed9ce22
OG
1204 return err;
1205}
1206
1207static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1208{
1209 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1210}
1211
74491de9 1212struct mlx5_flow_handle *
c966f7d5
GT
1213mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1214 struct mlx5_flow_destination *dest)
fed9ce22 1215{
66958ed9 1216 struct mlx5_flow_act flow_act = {0};
74491de9 1217 struct mlx5_flow_handle *flow_rule;
c5bb1730 1218 struct mlx5_flow_spec *spec;
fed9ce22
OG
1219 void *misc;
1220
1b9a07ee 1221 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1222 if (!spec) {
fed9ce22
OG
1223 flow_rule = ERR_PTR(-ENOMEM);
1224 goto out;
1225 }
1226
c5bb1730 1227 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
1228 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1229
c5bb1730 1230 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
1231 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1232
c5bb1730 1233 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 1234
66958ed9 1235 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1236 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1237 &flow_act, dest, 1);
fed9ce22
OG
1238 if (IS_ERR(flow_rule)) {
1239 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1240 goto out;
1241 }
1242
1243out:
c5bb1730 1244 kvfree(spec);
fed9ce22
OG
1245 return flow_rule;
1246}
feae9087 1247
db7ff19e
EB
1248static int esw_offloads_start(struct mlx5_eswitch *esw,
1249 struct netlink_ext_ack *extack)
c930a3ad 1250{
6c419ba8 1251 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad 1252
c96692fb
BW
1253 if (esw->mode != SRIOV_LEGACY &&
1254 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1255 NL_SET_ERR_MSG_MOD(extack,
1256 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1257 return -EINVAL;
1258 }
1259
1260 mlx5_eswitch_disable_sriov(esw);
1261 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 1262 if (err) {
8c98ee77
EB
1263 NL_SET_ERR_MSG_MOD(extack,
1264 "Failed setting eswitch to offloads");
6c419ba8 1265 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
1266 if (err1) {
1267 NL_SET_ERR_MSG_MOD(extack,
1268 "Failed setting eswitch back to legacy");
1269 }
6c419ba8 1270 }
bffaa916
RD
1271 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1272 if (mlx5_eswitch_inline_mode_get(esw,
1273 num_vfs,
1274 &esw->offloads.inline_mode)) {
1275 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1276 NL_SET_ERR_MSG_MOD(extack,
1277 "Inline mode is different between vports");
bffaa916
RD
1278 }
1279 }
c930a3ad
OG
1280 return err;
1281}
1282
e8d31c4d
MB
1283void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1284{
1285 kfree(esw->offloads.vport_reps);
1286}
1287
1288int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1289{
1290 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
1291 struct mlx5_core_dev *dev = esw->dev;
e8d31c4d 1292 struct mlx5_eswitch_rep *rep;
f121e0ea 1293 u8 hw_id[ETH_ALEN], rep_type;
e8d31c4d
MB
1294 int vport;
1295
1296 esw->offloads.vport_reps = kcalloc(total_vfs,
1297 sizeof(struct mlx5_eswitch_rep),
1298 GFP_KERNEL);
1299 if (!esw->offloads.vport_reps)
1300 return -ENOMEM;
1301
e8d31c4d
MB
1302 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1303
879c8f84 1304 mlx5_esw_for_all_reps(esw, vport, rep) {
5ae51620 1305 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
e8d31c4d 1306 ether_addr_copy(rep->hw_id, hw_id);
f121e0ea
BW
1307
1308 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1309 rep->rep_if[rep_type].state = REP_UNREGISTERED;
e8d31c4d
MB
1310 }
1311
e8d31c4d
MB
1312 return 0;
1313}
1314
c9b99abc
BW
1315static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1316 struct mlx5_eswitch_rep *rep, u8 rep_type)
1317{
f121e0ea 1318 if (rep->rep_if[rep_type].state != REP_LOADED)
c9b99abc
BW
1319 return;
1320
1321 rep->rep_if[rep_type].unload(rep);
f121e0ea 1322 rep->rep_if[rep_type].state = REP_REGISTERED;
c9b99abc
BW
1323}
1324
29d9fd7d 1325static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1326{
1327 struct mlx5_eswitch_rep *rep;
c9b99abc 1328
81cd229c
BW
1329 if (mlx5_ecpf_vport_exists(esw->dev)) {
1330 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1331 __esw_offloads_unload_rep(esw, rep, rep_type);
1332 }
1333
1334 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1335 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1336 __esw_offloads_unload_rep(esw, rep, rep_type);
1337 }
1338
879c8f84 1339 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1340 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1341}
1342
29d9fd7d
BW
1343static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1344 u8 rep_type)
1345{
1346 struct mlx5_eswitch_rep *rep;
1347 int i;
1348
1349 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1350 __esw_offloads_unload_rep(esw, rep, rep_type);
1351}
1352
1353static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1354{
1355 u8 rep_type = NUM_REP_TYPES;
1356
1357 while (rep_type-- > 0)
1358 __unload_reps_vf_vport(esw, nvports, rep_type);
1359}
1360
1361static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1362 u8 rep_type)
1363{
1364 __unload_reps_vf_vport(esw, nvports, rep_type);
1365
1366 /* Special vports must be the last to unload. */
1367 __unload_reps_special_vport(esw, rep_type);
1368}
1369
1370static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1371{
1372 u8 rep_type = NUM_REP_TYPES;
1373
1374 while (rep_type-- > 0)
29d9fd7d 1375 __unload_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1376}
1377
c9b99abc
BW
1378static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1379 struct mlx5_eswitch_rep *rep, u8 rep_type)
1380{
f121e0ea
BW
1381 int err = 0;
1382
1383 if (rep->rep_if[rep_type].state != REP_REGISTERED)
c9b99abc
BW
1384 return 0;
1385
f121e0ea
BW
1386 err = rep->rep_if[rep_type].load(esw->dev, rep);
1387 if (err)
1388 return err;
1389
1390 rep->rep_if[rep_type].state = REP_LOADED;
1391
1392 return 0;
c9b99abc
BW
1393}
1394
29d9fd7d 1395static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1396{
cb67b832 1397 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1398 int err;
1399
879c8f84 1400 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1401 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1402 if (err)
1403 return err;
1404
1405 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1406 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1407 err = __esw_offloads_load_rep(esw, rep, rep_type);
1408 if (err)
1409 goto err_pf;
1410 }
1411
1412 if (mlx5_ecpf_vport_exists(esw->dev)) {
1413 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1414 err = __esw_offloads_load_rep(esw, rep, rep_type);
1415 if (err)
1416 goto err_ecpf;
1417 }
1418
1419 return 0;
1420
1421err_ecpf:
1422 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1423 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1424 __esw_offloads_unload_rep(esw, rep, rep_type);
1425 }
1426
1427err_pf:
1428 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1429 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1430 return err;
1431}
6ed1803a 1432
29d9fd7d
BW
1433static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1434 u8 rep_type)
1435{
1436 struct mlx5_eswitch_rep *rep;
1437 int err, i;
1438
1439 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1440 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1441 if (err)
29d9fd7d 1442 goto err_vf;
6ed1803a
MB
1443 }
1444
1445 return 0;
1446
29d9fd7d
BW
1447err_vf:
1448 __unload_reps_vf_vport(esw, --i, rep_type);
1449 return err;
1450}
1451
1452static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1453{
1454 u8 rep_type = 0;
1455 int err;
1456
1457 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1458 err = __load_reps_vf_vport(esw, nvports, rep_type);
1459 if (err)
1460 goto err_reps;
1461 }
1462
1463 return err;
1464
6ed1803a 1465err_reps:
29d9fd7d
BW
1466 while (rep_type-- > 0)
1467 __unload_reps_vf_vport(esw, nvports, rep_type);
1468 return err;
1469}
1470
1471static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1472 u8 rep_type)
1473{
1474 int err;
1475
1476 /* Special vports must be loaded first. */
1477 err = __load_reps_special_vport(esw, rep_type);
1478 if (err)
1479 return err;
1480
1481 err = __load_reps_vf_vport(esw, nvports, rep_type);
1482 if (err)
1483 goto err_vfs;
1484
1485 return 0;
1486
1487err_vfs:
1488 __unload_reps_special_vport(esw, rep_type);
a4b97ab4
MB
1489 return err;
1490}
1491
29d9fd7d 1492static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1493{
1494 u8 rep_type = 0;
1495 int err;
1496
1497 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
29d9fd7d 1498 err = __load_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1499 if (err)
1500 goto err_reps;
1501 }
1502
1503 return err;
1504
1505err_reps:
1506 while (rep_type-- > 0)
29d9fd7d 1507 __unload_reps_all_vport(esw, nvports, rep_type);
6ed1803a
MB
1508 return err;
1509}
1510
ac004b83
RD
1511#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1512#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1513
1514static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1515 struct mlx5_eswitch *peer_esw)
1516{
1517 int err;
1518
1519 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1520 if (err)
1521 return err;
1522
1523 return 0;
1524}
1525
04de7dda
RD
1526void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
1527
ac004b83
RD
1528static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1529{
04de7dda 1530 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1531 esw_del_fdb_peer_miss_rules(esw);
1532}
1533
1534static int mlx5_esw_offloads_devcom_event(int event,
1535 void *my_data,
1536 void *event_data)
1537{
1538 struct mlx5_eswitch *esw = my_data;
1539 struct mlx5_eswitch *peer_esw = event_data;
1540 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1541 int err;
1542
1543 switch (event) {
1544 case ESW_OFFLOADS_DEVCOM_PAIR:
1545 err = mlx5_esw_offloads_pair(esw, peer_esw);
1546 if (err)
1547 goto err_out;
1548
1549 err = mlx5_esw_offloads_pair(peer_esw, esw);
1550 if (err)
1551 goto err_pair;
1552
1553 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1554 break;
1555
1556 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1557 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1558 break;
1559
1560 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1561 mlx5_esw_offloads_unpair(peer_esw);
1562 mlx5_esw_offloads_unpair(esw);
1563 break;
1564 }
1565
1566 return 0;
1567
1568err_pair:
1569 mlx5_esw_offloads_unpair(esw);
1570
1571err_out:
1572 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1573 event, err);
1574 return err;
1575}
1576
1577static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1578{
1579 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1580
04de7dda
RD
1581 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1582 mutex_init(&esw->offloads.peer_mutex);
1583
ac004b83
RD
1584 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1585 return;
1586
1587 mlx5_devcom_register_component(devcom,
1588 MLX5_DEVCOM_ESW_OFFLOADS,
1589 mlx5_esw_offloads_devcom_event,
1590 esw);
1591
1592 mlx5_devcom_send_event(devcom,
1593 MLX5_DEVCOM_ESW_OFFLOADS,
1594 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1595}
1596
1597static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1598{
1599 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1600
1601 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1602 return;
1603
1604 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1605 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1606
1607 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1608}
1609
eca8cc38 1610static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
6ed1803a
MB
1611{
1612 int err;
1613
e52c2802
PB
1614 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1615
1967ce6e 1616 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 1617 if (err)
c5447c70 1618 return err;
c930a3ad 1619
cd7e4186 1620 err = esw_create_offloads_table(esw, nvports);
c930a3ad
OG
1621 if (err)
1622 goto create_ft_err;
1623
cd7e4186 1624 err = esw_create_vport_rx_group(esw, nvports);
c930a3ad
OG
1625 if (err)
1626 goto create_fg_err;
1627
1628 return 0;
1629
1630create_fg_err:
1631 esw_destroy_offloads_table(esw);
1632
1633create_ft_err:
1967ce6e 1634 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 1635
c930a3ad
OG
1636 return err;
1637}
1638
eca8cc38
BW
1639static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1640{
1641 esw_destroy_vport_rx_group(esw);
1642 esw_destroy_offloads_table(esw);
1643 esw_destroy_offloads_fdb_tables(esw);
1644}
1645
a3888f33
BW
1646static void esw_host_params_event_handler(struct work_struct *work)
1647{
1648 struct mlx5_host_work *host_work;
1649 struct mlx5_eswitch *esw;
1650 int err, num_vf = 0;
1651
1652 host_work = container_of(work, struct mlx5_host_work, work);
1653 esw = host_work->esw;
1654
1655 err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
1656 if (err || num_vf == esw->host_info.num_vfs)
1657 goto out;
1658
1659 /* Number of VFs can only change from "0 to x" or "x to 0". */
1660 if (esw->host_info.num_vfs > 0) {
1661 esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
1662 } else {
1663 err = esw_offloads_load_vf_reps(esw, num_vf);
1664
1665 if (err)
1666 goto out;
1667 }
1668
1669 esw->host_info.num_vfs = num_vf;
1670
1671out:
1672 kfree(host_work);
1673}
1674
1675static int esw_host_params_event(struct notifier_block *nb,
1676 unsigned long type, void *data)
1677{
1678 struct mlx5_host_work *host_work;
1679 struct mlx5_host_info *host_info;
1680 struct mlx5_eswitch *esw;
1681
1682 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
1683 if (!host_work)
1684 return NOTIFY_DONE;
1685
1686 host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
1687 esw = container_of(host_info, struct mlx5_eswitch, host_info);
1688
1689 host_work->esw = esw;
1690
1691 INIT_WORK(&host_work->work, esw_host_params_event_handler);
1692 queue_work(esw->work_queue, &host_work->work);
1693
1694 return NOTIFY_OK;
1695}
1696
c9b99abc
BW
1697int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
1698 int total_nvports)
eca8cc38
BW
1699{
1700 int err;
1701
1702 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1703
c9b99abc 1704 err = esw_offloads_steering_init(esw, total_nvports);
eca8cc38
BW
1705 if (err)
1706 return err;
1707
29d9fd7d 1708 err = esw_offloads_load_all_reps(esw, vf_nvports);
eca8cc38
BW
1709 if (err)
1710 goto err_reps;
1711
1712 esw_offloads_devcom_init(esw);
a3888f33
BW
1713
1714 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1715 MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
1716 HOST_PARAMS_CHANGE);
1717 mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
1718 esw->host_info.num_vfs = vf_nvports;
1719 }
1720
eca8cc38
BW
1721 return 0;
1722
1723err_reps:
1724 esw_offloads_steering_cleanup(esw);
1725 return err;
1726}
1727
db7ff19e
EB
1728static int esw_offloads_stop(struct mlx5_eswitch *esw,
1729 struct netlink_ext_ack *extack)
c930a3ad 1730{
6c419ba8 1731 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
1732
1733 mlx5_eswitch_disable_sriov(esw);
1734 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 1735 if (err) {
8c98ee77 1736 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 1737 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
1738 if (err1) {
1739 NL_SET_ERR_MSG_MOD(extack,
1740 "Failed setting eswitch back to offloads");
1741 }
6c419ba8 1742 }
c930a3ad
OG
1743
1744 return err;
1745}
1746
c9b99abc 1747void esw_offloads_cleanup(struct mlx5_eswitch *esw)
c930a3ad 1748{
a3888f33
BW
1749 u16 num_vfs;
1750
1751 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1752 mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
1753 flush_workqueue(esw->work_queue);
1754 num_vfs = esw->host_info.num_vfs;
1755 } else {
1756 num_vfs = esw->dev->priv.sriov.num_vfs;
1757 }
c9b99abc 1758
ac004b83 1759 esw_offloads_devcom_cleanup(esw);
29d9fd7d 1760 esw_offloads_unload_all_reps(esw, num_vfs);
eca8cc38 1761 esw_offloads_steering_cleanup(esw);
c930a3ad
OG
1762}
1763
ef78618b 1764static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1765{
1766 switch (mode) {
1767 case DEVLINK_ESWITCH_MODE_LEGACY:
1768 *mlx5_mode = SRIOV_LEGACY;
1769 break;
1770 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1771 *mlx5_mode = SRIOV_OFFLOADS;
1772 break;
1773 default:
1774 return -EINVAL;
1775 }
1776
1777 return 0;
1778}
1779
ef78618b
OG
1780static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1781{
1782 switch (mlx5_mode) {
1783 case SRIOV_LEGACY:
1784 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1785 break;
1786 case SRIOV_OFFLOADS:
1787 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1788 break;
1789 default:
1790 return -EINVAL;
1791 }
1792
1793 return 0;
1794}
1795
bffaa916
RD
1796static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1797{
1798 switch (mode) {
1799 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1800 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1801 break;
1802 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1803 *mlx5_mode = MLX5_INLINE_MODE_L2;
1804 break;
1805 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1806 *mlx5_mode = MLX5_INLINE_MODE_IP;
1807 break;
1808 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1809 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1810 break;
1811 default:
1812 return -EINVAL;
1813 }
1814
1815 return 0;
1816}
1817
1818static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1819{
1820 switch (mlx5_mode) {
1821 case MLX5_INLINE_MODE_NONE:
1822 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1823 break;
1824 case MLX5_INLINE_MODE_L2:
1825 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1826 break;
1827 case MLX5_INLINE_MODE_IP:
1828 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1829 break;
1830 case MLX5_INLINE_MODE_TCP_UDP:
1831 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1832 break;
1833 default:
1834 return -EINVAL;
1835 }
1836
1837 return 0;
1838}
1839
9d1cef19 1840static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1841{
9d1cef19 1842 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1843
9d1cef19
OG
1844 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1845 return -EOPNOTSUPP;
c930a3ad 1846
733d3e54
OG
1847 if(!MLX5_ESWITCH_MANAGER(dev))
1848 return -EPERM;
c930a3ad 1849
c96692fb
BW
1850 if (dev->priv.eswitch->mode == SRIOV_NONE &&
1851 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
1852 return -EOPNOTSUPP;
1853
9d1cef19
OG
1854 return 0;
1855}
1856
db7ff19e
EB
1857int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1858 struct netlink_ext_ack *extack)
9d1cef19
OG
1859{
1860 struct mlx5_core_dev *dev = devlink_priv(devlink);
1861 u16 cur_mlx5_mode, mlx5_mode = 0;
1862 int err;
1863
1864 err = mlx5_devlink_eswitch_check(devlink);
1865 if (err)
1866 return err;
1867
1868 cur_mlx5_mode = dev->priv.eswitch->mode;
1869
ef78618b 1870 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1871 return -EINVAL;
1872
1873 if (cur_mlx5_mode == mlx5_mode)
1874 return 0;
1875
1876 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 1877 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 1878 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 1879 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
1880 else
1881 return -EINVAL;
feae9087
OG
1882}
1883
1884int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1885{
9d1cef19
OG
1886 struct mlx5_core_dev *dev = devlink_priv(devlink);
1887 int err;
c930a3ad 1888
9d1cef19
OG
1889 err = mlx5_devlink_eswitch_check(devlink);
1890 if (err)
1891 return err;
c930a3ad 1892
ef78618b 1893 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1894}
127ea380 1895
db7ff19e
EB
1896int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1897 struct netlink_ext_ack *extack)
bffaa916
RD
1898{
1899 struct mlx5_core_dev *dev = devlink_priv(devlink);
1900 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1901 int err, vport;
bffaa916
RD
1902 u8 mlx5_mode;
1903
9d1cef19
OG
1904 err = mlx5_devlink_eswitch_check(devlink);
1905 if (err)
1906 return err;
bffaa916 1907
c415f704
OG
1908 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1909 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1910 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1911 return 0;
1912 /* fall through */
1913 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 1914 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 1915 return -EOPNOTSUPP;
c415f704
OG
1916 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1917 break;
1918 }
bffaa916 1919
375f51e2 1920 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1921 NL_SET_ERR_MSG_MOD(extack,
1922 "Can't set inline mode when flows are configured");
375f51e2
RD
1923 return -EOPNOTSUPP;
1924 }
1925
bffaa916
RD
1926 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1927 if (err)
1928 goto out;
1929
9d1cef19 1930 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1931 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1932 if (err) {
8c98ee77
EB
1933 NL_SET_ERR_MSG_MOD(extack,
1934 "Failed to set min inline on vport");
bffaa916
RD
1935 goto revert_inline_mode;
1936 }
1937 }
1938
1939 esw->offloads.inline_mode = mlx5_mode;
1940 return 0;
1941
1942revert_inline_mode:
1943 while (--vport > 0)
1944 mlx5_modify_nic_vport_min_inline(dev,
1945 vport,
1946 esw->offloads.inline_mode);
1947out:
1948 return err;
1949}
1950
1951int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1952{
1953 struct mlx5_core_dev *dev = devlink_priv(devlink);
1954 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1955 int err;
bffaa916 1956
9d1cef19
OG
1957 err = mlx5_devlink_eswitch_check(devlink);
1958 if (err)
1959 return err;
bffaa916 1960
bffaa916
RD
1961 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1962}
1963
1964int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1965{
c415f704 1966 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1967 struct mlx5_core_dev *dev = esw->dev;
1968 int vport;
bffaa916
RD
1969
1970 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1971 return -EOPNOTSUPP;
1972
1973 if (esw->mode == SRIOV_NONE)
1974 return -EOPNOTSUPP;
1975
c415f704
OG
1976 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1977 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1978 mlx5_mode = MLX5_INLINE_MODE_NONE;
1979 goto out;
1980 case MLX5_CAP_INLINE_MODE_L2:
1981 mlx5_mode = MLX5_INLINE_MODE_L2;
1982 goto out;
1983 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1984 goto query_vports;
1985 }
bffaa916 1986
c415f704 1987query_vports:
bffaa916
RD
1988 for (vport = 1; vport <= nvfs; vport++) {
1989 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1990 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1991 return -EINVAL;
1992 prev_mlx5_mode = mlx5_mode;
1993 }
1994
c415f704 1995out:
bffaa916
RD
1996 *mode = mlx5_mode;
1997 return 0;
1998}
1999
db7ff19e
EB
2000int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
2001 struct netlink_ext_ack *extack)
7768d197
RD
2002{
2003 struct mlx5_core_dev *dev = devlink_priv(devlink);
2004 struct mlx5_eswitch *esw = dev->priv.eswitch;
2005 int err;
2006
9d1cef19
OG
2007 err = mlx5_devlink_eswitch_check(devlink);
2008 if (err)
2009 return err;
7768d197
RD
2010
2011 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2012 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2013 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2014 return -EOPNOTSUPP;
2015
2016 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2017 return -EOPNOTSUPP;
2018
2019 if (esw->mode == SRIOV_LEGACY) {
2020 esw->offloads.encap = encap;
2021 return 0;
2022 }
2023
2024 if (esw->offloads.encap == encap)
2025 return 0;
2026
2027 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2028 NL_SET_ERR_MSG_MOD(extack,
2029 "Can't set encapsulation when flows are configured");
7768d197
RD
2030 return -EOPNOTSUPP;
2031 }
2032
e52c2802 2033 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2034
2035 esw->offloads.encap = encap;
e52c2802
PB
2036
2037 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2038
7768d197 2039 if (err) {
8c98ee77
EB
2040 NL_SET_ERR_MSG_MOD(extack,
2041 "Failed re-creating fast FDB table");
7768d197 2042 esw->offloads.encap = !encap;
e52c2802 2043 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2044 }
e52c2802 2045
7768d197
RD
2046 return err;
2047}
2048
2049int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
2050{
2051 struct mlx5_core_dev *dev = devlink_priv(devlink);
2052 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2053 int err;
7768d197 2054
9d1cef19
OG
2055 err = mlx5_devlink_eswitch_check(devlink);
2056 if (err)
2057 return err;
7768d197
RD
2058
2059 *encap = esw->offloads.encap;
2060 return 0;
2061}
2062
f8e8fa02
BW
2063void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2064 struct mlx5_eswitch_rep_if *__rep_if,
2065 u8 rep_type)
127ea380 2066{
a4b97ab4 2067 struct mlx5_eswitch_rep_if *rep_if;
f8e8fa02
BW
2068 struct mlx5_eswitch_rep *rep;
2069 int i;
9deb2241 2070
f8e8fa02
BW
2071 mlx5_esw_for_all_reps(esw, i, rep) {
2072 rep_if = &rep->rep_if[rep_type];
2073 rep_if->load = __rep_if->load;
2074 rep_if->unload = __rep_if->unload;
2075 rep_if->get_proto_dev = __rep_if->get_proto_dev;
2076 rep_if->priv = __rep_if->priv;
127ea380 2077
f8e8fa02
BW
2078 rep_if->state = REP_REGISTERED;
2079 }
127ea380 2080}
f8e8fa02 2081EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2082
f8e8fa02 2083void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2084{
f8e8fa02 2085 u16 max_vf = mlx5_core_max_vfs(esw->dev);
cb67b832 2086 struct mlx5_eswitch_rep *rep;
f8e8fa02 2087 int i;
cb67b832 2088
f8e8fa02
BW
2089 if (esw->mode == SRIOV_OFFLOADS)
2090 __unload_reps_all_vport(esw, max_vf, rep_type);
127ea380 2091
f8e8fa02
BW
2092 mlx5_esw_for_all_reps(esw, i, rep)
2093 rep->rep_if[rep_type].state = REP_UNREGISTERED;
127ea380 2094}
f8e8fa02 2095EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2096
a4b97ab4 2097void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2098{
726293f1
HHZ
2099 struct mlx5_eswitch_rep *rep;
2100
879c8f84 2101 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
a4b97ab4 2102 return rep->rep_if[rep_type].priv;
726293f1 2103}
22215908
MB
2104
2105void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2106 int vport,
2107 u8 rep_type)
2108{
22215908
MB
2109 struct mlx5_eswitch_rep *rep;
2110
879c8f84 2111 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2112
f121e0ea 2113 if (rep->rep_if[rep_type].state == REP_LOADED &&
22215908
MB
2114 rep->rep_if[rep_type].get_proto_dev)
2115 return rep->rep_if[rep_type].get_proto_dev(rep);
2116 return NULL;
2117}
57cbd893 2118EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2119
2120void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2121{
879c8f84 2122 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2123}
57cbd893
MB
2124EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2125
2126struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2127 int vport)
2128{
879c8f84 2129 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2130}
2131EXPORT_SYMBOL(mlx5_eswitch_vport_rep);