]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Restrict metadata disablement to offloads mode
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33 44#include "lib/eq.h"
69697b6e 45
cd7e4186
BW
46/* There are two match-all miss flows, one for unicast dst mac and
47 * one for multicast.
48 */
49#define MLX5_ESW_MISS_FLOWS (2)
50
e52c2802
PB
51#define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
53
c9b99abc
BW
54#define UPLINK_REP_INDEX 0
55
879c8f84
BW
56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
57 u16 vport_num)
58{
02f3afd9 59 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
60
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
63}
64
e52c2802
PB
65static struct mlx5_flow_table *
66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
67static void
68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69
70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
71{
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
73}
74
75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
76{
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
78 return FDB_MAX_CHAIN;
79
80 return 0;
81}
82
83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
84{
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
86 return FDB_MAX_PRIO;
87
bf07aa73 88 return 1;
e52c2802
PB
89}
90
c01cfd0f
JL
91static void
92mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
94 struct mlx5_esw_flow_attr *attr)
95{
96 void *misc2;
97 void *misc;
98
99 /* Use metadata matching because vport is not represented by single
100 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
101 */
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
103 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
104 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
106 attr->in_rep->vport));
107
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
110
111 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
114 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
115 } else {
116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
117 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
118
119 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
120 MLX5_SET(fte_match_set_misc, misc,
121 source_eswitch_owner_vhca_id,
122 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
123
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
126 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
127 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
128 source_eswitch_owner_vhca_id);
129
130 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
131 }
132
133 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
134 attr->in_rep->vport == MLX5_VPORT_UPLINK)
135 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
136}
137
74491de9 138struct mlx5_flow_handle *
3d80d1a2
OG
139mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
140 struct mlx5_flow_spec *spec,
776b12b6 141 struct mlx5_esw_flow_attr *attr)
3d80d1a2 142{
592d3651 143 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 144 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 145 bool split = !!(attr->split_count);
74491de9 146 struct mlx5_flow_handle *rule;
e52c2802 147 struct mlx5_flow_table *fdb;
592d3651 148 int j, i = 0;
3d80d1a2 149
f6455de0 150 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
151 return ERR_PTR(-EOPNOTSUPP);
152
6acfbf38
OG
153 flow_act.action = attr->action;
154 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 155 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
156 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
157 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
158 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
159 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
160 flow_act.vlan[0].vid = attr->vlan_vid[0];
161 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
163 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
164 flow_act.vlan[1].vid = attr->vlan_vid[1];
165 flow_act.vlan[1].prio = attr->vlan_prio[1];
166 }
6acfbf38 167 }
776b12b6 168
66958ed9 169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
170 if (attr->dest_chain) {
171 struct mlx5_flow_table *ft;
172
173 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
174 if (IS_ERR(ft)) {
175 rule = ERR_CAST(ft);
176 goto err_create_goto_table;
177 }
178
179 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
180 dest[i].ft = ft;
592d3651 181 i++;
e52c2802 182 } else {
e85e02ba 183 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 185 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 186 dest[i].vport.vhca_id =
df65a573 187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
188 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
189 dest[i].vport.flags |=
190 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
192 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 193 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 194 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
195 dest[i].vport.pkt_reformat =
196 attr->dests[j].pkt_reformat;
f493f155 197 }
e52c2802
PB
198 i++;
199 }
56e858df 200 }
e37a79e5 201 }
66958ed9 202 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 203 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 204 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 205 i++;
3d80d1a2
OG
206 }
207
c01cfd0f 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
3d80d1a2 209
93b3586e 210 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 214
aa24670e 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 216 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 217
e85e02ba 218 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
219 if (IS_ERR(fdb)) {
220 rule = ERR_CAST(fdb);
221 goto err_esw_get;
222 }
223
10caabda
OS
224 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
225 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
226 &flow_act, dest, i);
227 else
228 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 229 if (IS_ERR(rule))
e52c2802 230 goto err_add_rule;
375f51e2 231 else
525e84be 232 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 233
e52c2802
PB
234 return rule;
235
236err_add_rule:
e85e02ba 237 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
238err_esw_get:
239 if (attr->dest_chain)
240 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
241err_create_goto_table:
aa0cbbae 242 return rule;
3d80d1a2
OG
243}
244
e4ad91f2
CM
245struct mlx5_flow_handle *
246mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249{
250 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 251 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
252 struct mlx5_flow_table *fast_fdb;
253 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 254 struct mlx5_flow_handle *rule;
e4ad91f2
CM
255 int i;
256
e52c2802
PB
257 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
258 if (IS_ERR(fast_fdb)) {
259 rule = ERR_CAST(fast_fdb);
260 goto err_get_fast;
261 }
262
263 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
264 if (IS_ERR(fwd_fdb)) {
265 rule = ERR_CAST(fwd_fdb);
266 goto err_get_fwd;
267 }
268
e4ad91f2 269 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 270 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 271 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 272 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 273 dest[i].vport.vhca_id =
df65a573 274 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
275 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
276 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
277 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
278 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 279 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 280 }
e4ad91f2
CM
281 }
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 283 dest[i].ft = fwd_fdb,
e4ad91f2
CM
284 i++;
285
c01cfd0f 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 287
4f5d1bea 288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
93b3586e 289 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 291
e52c2802 292 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 293
e52c2802
PB
294 if (IS_ERR(rule))
295 goto add_err;
e4ad91f2 296
525e84be 297 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
298
299 return rule;
300add_err:
301 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
302err_get_fwd:
303 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
304err_get_fast:
e4ad91f2
CM
305 return rule;
306}
307
e52c2802
PB
308static void
309__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
310 struct mlx5_flow_handle *rule,
311 struct mlx5_esw_flow_attr *attr,
312 bool fwd_rule)
313{
e85e02ba 314 bool split = (attr->split_count > 0);
10caabda 315 int i;
e52c2802
PB
316
317 mlx5_del_flow_rules(rule);
10caabda
OS
318
319 /* unref the term table */
320 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
321 if (attr->dests[i].termtbl)
322 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
323 }
324
525e84be 325 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
326
327 if (fwd_rule) {
328 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
329 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
330 } else {
e85e02ba 331 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
332 if (attr->dest_chain)
333 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
334 }
335}
336
d85cdccb
OG
337void
338mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
339 struct mlx5_flow_handle *rule,
340 struct mlx5_esw_flow_attr *attr)
341{
e52c2802 342 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
343}
344
48265006
OG
345void
346mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
347 struct mlx5_flow_handle *rule,
348 struct mlx5_esw_flow_attr *attr)
349{
e52c2802 350 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
351}
352
f5f82476
OG
353static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
354{
355 struct mlx5_eswitch_rep *rep;
411ec9e0 356 int i, err = 0;
f5f82476
OG
357
358 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 359 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 360 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
361 continue;
362
363 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
364 if (err)
365 goto out;
366 }
367
368out:
369 return err;
370}
371
372static struct mlx5_eswitch_rep *
373esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
374{
375 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
376
377 in_rep = attr->in_rep;
df65a573 378 out_rep = attr->dests[0].rep;
f5f82476
OG
379
380 if (push)
381 vport = in_rep;
382 else if (pop)
383 vport = out_rep;
384 else
385 vport = in_rep;
386
387 return vport;
388}
389
390static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
391 bool push, bool pop, bool fwd)
392{
393 struct mlx5_eswitch_rep *in_rep, *out_rep;
394
395 if ((push || pop) && !fwd)
396 goto out_notsupp;
397
398 in_rep = attr->in_rep;
df65a573 399 out_rep = attr->dests[0].rep;
f5f82476 400
b05af6aa 401 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
402 goto out_notsupp;
403
b05af6aa 404 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
405 goto out_notsupp;
406
407 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
408 if (!push && !pop && fwd)
b05af6aa 409 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
410 goto out_notsupp;
411
412 /* protects against (1) setting rules with different vlans to push and
413 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
414 */
1482bd3d 415 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
416 goto out_notsupp;
417
418 return 0;
419
420out_notsupp:
9eb78923 421 return -EOPNOTSUPP;
f5f82476
OG
422}
423
424int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
425 struct mlx5_esw_flow_attr *attr)
426{
427 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
428 struct mlx5_eswitch_rep *vport = NULL;
429 bool push, pop, fwd;
430 int err = 0;
431
6acfbf38 432 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 433 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
434 return 0;
435
f5f82476
OG
436 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
437 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
438 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
439 !attr->dest_chain);
f5f82476 440
0e18134f
VB
441 mutex_lock(&esw->state_lock);
442
f5f82476
OG
443 err = esw_add_vlan_action_check(attr, push, pop, fwd);
444 if (err)
0e18134f 445 goto unlock;
f5f82476
OG
446
447 attr->vlan_handled = false;
448
449 vport = esw_vlan_action_get_vport(attr, push, pop);
450
451 if (!push && !pop && fwd) {
452 /* tracks VF --> wire rules without vlan push action */
b05af6aa 453 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
454 vport->vlan_refcount++;
455 attr->vlan_handled = true;
456 }
457
0e18134f 458 goto unlock;
f5f82476
OG
459 }
460
461 if (!push && !pop)
0e18134f 462 goto unlock;
f5f82476
OG
463
464 if (!(offloads->vlan_push_pop_refcount)) {
465 /* it's the 1st vlan rule, apply global vlan pop policy */
466 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
467 if (err)
468 goto out;
469 }
470 offloads->vlan_push_pop_refcount++;
471
472 if (push) {
473 if (vport->vlan_refcount)
474 goto skip_set_push;
475
1482bd3d 476 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
477 SET_VLAN_INSERT | SET_VLAN_STRIP);
478 if (err)
479 goto out;
1482bd3d 480 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
481skip_set_push:
482 vport->vlan_refcount++;
483 }
484out:
485 if (!err)
486 attr->vlan_handled = true;
0e18134f
VB
487unlock:
488 mutex_unlock(&esw->state_lock);
f5f82476
OG
489 return err;
490}
491
492int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
493 struct mlx5_esw_flow_attr *attr)
494{
495 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
496 struct mlx5_eswitch_rep *vport = NULL;
497 bool push, pop, fwd;
498 int err = 0;
499
6acfbf38 500 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 501 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
502 return 0;
503
f5f82476
OG
504 if (!attr->vlan_handled)
505 return 0;
506
507 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
508 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
509 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
510
0e18134f
VB
511 mutex_lock(&esw->state_lock);
512
f5f82476
OG
513 vport = esw_vlan_action_get_vport(attr, push, pop);
514
515 if (!push && !pop && fwd) {
516 /* tracks VF --> wire rules without vlan push action */
b05af6aa 517 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
518 vport->vlan_refcount--;
519
0e18134f 520 goto out;
f5f82476
OG
521 }
522
523 if (push) {
524 vport->vlan_refcount--;
525 if (vport->vlan_refcount)
526 goto skip_unset_push;
527
528 vport->vlan = 0;
529 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
530 0, 0, SET_VLAN_STRIP);
531 if (err)
532 goto out;
533 }
534
535skip_unset_push:
536 offloads->vlan_push_pop_refcount--;
537 if (offloads->vlan_push_pop_refcount)
0e18134f 538 goto out;
f5f82476
OG
539
540 /* no more vlan rules, stop global vlan pop policy */
541 err = esw_set_global_vlan_pop(esw, 0);
542
543out:
0e18134f 544 mutex_unlock(&esw->state_lock);
f5f82476
OG
545 return err;
546}
547
f7a68945 548struct mlx5_flow_handle *
02f3afd9
PP
549mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
550 u32 sqn)
ab22be9b 551{
66958ed9 552 struct mlx5_flow_act flow_act = {0};
4c5009c5 553 struct mlx5_flow_destination dest = {};
74491de9 554 struct mlx5_flow_handle *flow_rule;
c5bb1730 555 struct mlx5_flow_spec *spec;
ab22be9b
OG
556 void *misc;
557
1b9a07ee 558 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 559 if (!spec) {
ab22be9b
OG
560 flow_rule = ERR_PTR(-ENOMEM);
561 goto out;
562 }
563
c5bb1730 564 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 565 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
566 /* source vport is the esw manager */
567 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 568
c5bb1730 569 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
570 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
571 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
572
c5bb1730 573 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 574 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 575 dest.vport.num = vport;
66958ed9 576 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 577
52fff327 578 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 579 &flow_act, &dest, 1);
ab22be9b
OG
580 if (IS_ERR(flow_rule))
581 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
582out:
c5bb1730 583 kvfree(spec);
ab22be9b
OG
584 return flow_rule;
585}
57cbd893 586EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 587
159fe639
MB
588void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
589{
590 mlx5_del_flow_rules(rule);
591}
592
332bd3a5 593static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
594{
595 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
596 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
597 u8 fdb_to_vport_reg_c_id;
598 int err;
599
332bd3a5
PP
600 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
601 return 0;
c1286050
JL
602
603 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
604 out, sizeof(out));
605 if (err)
606 return err;
607
608 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
609 esw_vport_context.fdb_to_vport_reg_c_id);
610
332bd3a5
PP
611 if (enable)
612 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
613 else
614 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
c1286050
JL
615
616 MLX5_SET(modify_esw_vport_context_in, in,
617 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
618
619 MLX5_SET(modify_esw_vport_context_in, in,
620 field_select.fdb_to_vport_reg_c_id, 1);
621
622 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
623 in, sizeof(in));
624}
625
a5641cb5
JL
626static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
627 struct mlx5_core_dev *peer_dev,
ac004b83
RD
628 struct mlx5_flow_spec *spec,
629 struct mlx5_flow_destination *dest)
630{
a5641cb5 631 void *misc;
ac004b83 632
a5641cb5
JL
633 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
634 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
635 misc_parameters_2);
636 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
ac004b83 637
a5641cb5
JL
638 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
639 } else {
640 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
641 misc_parameters);
ac004b83 642
a5641cb5
JL
643 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
644 MLX5_CAP_GEN(peer_dev, vhca_id));
645
646 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
647
648 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
649 misc_parameters);
650 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
651 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
652 source_eswitch_owner_vhca_id);
653 }
ac004b83
RD
654
655 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 656 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 657 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 658 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
659}
660
a5641cb5
JL
661static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
662 struct mlx5_eswitch *peer_esw,
663 struct mlx5_flow_spec *spec,
664 u16 vport)
665{
666 void *misc;
667
668 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
669 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
670 misc_parameters_2);
671 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
672 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
673 vport));
674 } else {
675 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
676 misc_parameters);
677 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
678 }
679}
680
ac004b83
RD
681static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
682 struct mlx5_core_dev *peer_dev)
683{
684 struct mlx5_flow_destination dest = {};
685 struct mlx5_flow_act flow_act = {0};
686 struct mlx5_flow_handle **flows;
687 struct mlx5_flow_handle *flow;
688 struct mlx5_flow_spec *spec;
689 /* total vports is the same for both e-switches */
690 int nvports = esw->total_vports;
691 void *misc;
692 int err, i;
693
694 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
695 if (!spec)
696 return -ENOMEM;
697
a5641cb5 698 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
699
700 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
701 if (!flows) {
702 err = -ENOMEM;
703 goto alloc_flows_err;
704 }
705
706 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
707 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
708 misc_parameters);
709
81cd229c 710 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
711 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
712 spec, MLX5_VPORT_PF);
713
81cd229c
BW
714 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
715 spec, &flow_act, &dest, 1);
716 if (IS_ERR(flow)) {
717 err = PTR_ERR(flow);
718 goto add_pf_flow_err;
719 }
720 flows[MLX5_VPORT_PF] = flow;
721 }
722
723 if (mlx5_ecpf_vport_exists(esw->dev)) {
724 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
725 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
726 spec, &flow_act, &dest, 1);
727 if (IS_ERR(flow)) {
728 err = PTR_ERR(flow);
729 goto add_ecpf_flow_err;
730 }
731 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
732 }
733
786ef904 734 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
735 esw_set_peer_miss_rule_source_port(esw,
736 peer_dev->priv.eswitch,
737 spec, i);
738
ac004b83
RD
739 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
740 spec, &flow_act, &dest, 1);
741 if (IS_ERR(flow)) {
742 err = PTR_ERR(flow);
81cd229c 743 goto add_vf_flow_err;
ac004b83
RD
744 }
745 flows[i] = flow;
746 }
747
748 esw->fdb_table.offloads.peer_miss_rules = flows;
749
750 kvfree(spec);
751 return 0;
752
81cd229c 753add_vf_flow_err:
879c8f84 754 nvports = --i;
786ef904 755 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 756 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
757
758 if (mlx5_ecpf_vport_exists(esw->dev))
759 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
760add_ecpf_flow_err:
761 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
762 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
763add_pf_flow_err:
764 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
765 kvfree(flows);
766alloc_flows_err:
767 kvfree(spec);
768 return err;
769}
770
771static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
772{
773 struct mlx5_flow_handle **flows;
774 int i;
775
776 flows = esw->fdb_table.offloads.peer_miss_rules;
777
786ef904
PP
778 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
779 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
780 mlx5_del_flow_rules(flows[i]);
781
81cd229c
BW
782 if (mlx5_ecpf_vport_exists(esw->dev))
783 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
784
785 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
786 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
787
ac004b83
RD
788 kvfree(flows);
789}
790
3aa33572
OG
791static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
792{
66958ed9 793 struct mlx5_flow_act flow_act = {0};
4c5009c5 794 struct mlx5_flow_destination dest = {};
74491de9 795 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 796 struct mlx5_flow_spec *spec;
f80be543
MB
797 void *headers_c;
798 void *headers_v;
3aa33572 799 int err = 0;
f80be543
MB
800 u8 *dmac_c;
801 u8 *dmac_v;
3aa33572 802
1b9a07ee 803 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 804 if (!spec) {
3aa33572
OG
805 err = -ENOMEM;
806 goto out;
807 }
808
f80be543
MB
809 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
810 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
811 outer_headers);
812 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
813 outer_headers.dmac_47_16);
814 dmac_c[0] = 0x01;
815
3aa33572 816 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 817 dest.vport.num = esw->manager_vport;
66958ed9 818 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 819
52fff327 820 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 821 &flow_act, &dest, 1);
3aa33572
OG
822 if (IS_ERR(flow_rule)) {
823 err = PTR_ERR(flow_rule);
f80be543 824 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
825 goto out;
826 }
827
f80be543
MB
828 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
829
830 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
831 outer_headers);
832 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
833 outer_headers.dmac_47_16);
834 dmac_v[0] = 0x01;
52fff327 835 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
836 &flow_act, &dest, 1);
837 if (IS_ERR(flow_rule)) {
838 err = PTR_ERR(flow_rule);
839 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
840 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
841 goto out;
842 }
843
844 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
845
3aa33572 846out:
c5bb1730 847 kvfree(spec);
3aa33572
OG
848 return err;
849}
850
1033665e 851#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 852
e52c2802
PB
853/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
854 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
855 * for each flow table pool. We can allocate up to 16M of each pool,
856 * and we keep track of how much we used via put/get_sz_to_pool.
857 * Firmware doesn't report any of this for now.
858 * ESW_POOL is expected to be sorted from large to small
859 */
860#define ESW_SIZE (16 * 1024 * 1024)
861const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
862 64 * 1024, 4 * 1024 };
863
864static int
865get_sz_from_pool(struct mlx5_eswitch *esw)
866{
867 int sz = 0, i;
868
869 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
870 if (esw->fdb_table.offloads.fdb_left[i]) {
871 --esw->fdb_table.offloads.fdb_left[i];
872 sz = ESW_POOLS[i];
873 break;
874 }
875 }
876
877 return sz;
878}
879
880static void
881put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
882{
883 int i;
884
885 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
886 if (sz >= ESW_POOLS[i]) {
887 ++esw->fdb_table.offloads.fdb_left[i];
888 break;
889 }
890 }
891}
892
893static struct mlx5_flow_table *
894create_next_size_table(struct mlx5_eswitch *esw,
895 struct mlx5_flow_namespace *ns,
896 u16 table_prio,
897 int level,
898 u32 flags)
899{
900 struct mlx5_flow_table *fdb;
901 int sz;
902
903 sz = get_sz_from_pool(esw);
904 if (!sz)
905 return ERR_PTR(-ENOSPC);
906
907 fdb = mlx5_create_auto_grouped_flow_table(ns,
908 table_prio,
909 sz,
910 ESW_OFFLOADS_NUM_GROUPS,
911 level,
912 flags);
913 if (IS_ERR(fdb)) {
914 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
915 (int)PTR_ERR(fdb), table_prio, level, sz);
916 put_sz_to_pool(esw, sz);
917 }
918
919 return fdb;
920}
921
922static struct mlx5_flow_table *
923esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 924{
69697b6e 925 struct mlx5_core_dev *dev = esw->dev;
69697b6e 926 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
927 struct mlx5_flow_namespace *ns;
928 int table_prio, l = 0;
bbd00f7e 929 u32 flags = 0;
69697b6e 930
c92a0b94
PB
931 if (chain == FDB_SLOW_PATH_CHAIN)
932 return esw->fdb_table.offloads.slow_fdb;
933
e52c2802 934 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 935
e52c2802
PB
936 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
937 if (fdb) {
938 /* take ref on earlier levels as well */
939 while (level >= 0)
940 fdb_prio_table(esw, chain, prio, level--).num_rules++;
941 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
942 return fdb;
943 }
69697b6e 944
e52c2802
PB
945 ns = mlx5_get_fdb_sub_ns(dev, chain);
946 if (!ns) {
947 esw_warn(dev, "Failed to get FDB sub namespace\n");
948 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
949 return ERR_PTR(-EOPNOTSUPP);
950 }
a842dd04 951
7768d197 952 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 953 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 954 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 955
e52c2802 956 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 957
e52c2802
PB
958 /* create earlier levels for correct fs_core lookup when
959 * connecting tables
960 */
961 for (l = 0; l <= level; l++) {
962 if (fdb_prio_table(esw, chain, prio, l).fdb) {
963 fdb_prio_table(esw, chain, prio, l).num_rules++;
964 continue;
965 }
a842dd04 966
e52c2802
PB
967 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
968 if (IS_ERR(fdb)) {
969 l--;
970 goto err_create_fdb;
971 }
972
973 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
974 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 975 }
a842dd04 976
e52c2802
PB
977 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
978 return fdb;
a842dd04 979
e52c2802
PB
980err_create_fdb:
981 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
982 if (l >= 0)
983 esw_put_prio_table(esw, chain, prio, l);
984
985 return fdb;
1967ce6e
OG
986}
987
e52c2802
PB
988static void
989esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 990{
e52c2802
PB
991 int l;
992
c92a0b94
PB
993 if (chain == FDB_SLOW_PATH_CHAIN)
994 return;
995
e52c2802
PB
996 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
997
998 for (l = level; l >= 0; l--) {
999 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1000 continue;
1001
1002 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1003 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1004 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1005 }
1006
1007 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1008}
1009
1010static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1011{
1012 /* If lazy creation isn't supported, deref the fast path tables */
1013 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1014 esw_put_prio_table(esw, 0, 1, 1);
1015 esw_put_prio_table(esw, 0, 1, 0);
1016 }
1967ce6e
OG
1017}
1018
1019#define MAX_PF_SQ 256
cd3d07e7 1020#define MAX_SQ_NVPORTS 32
1967ce6e 1021
a5641cb5
JL
1022static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1023 u32 *flow_group_in)
1024{
1025 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1026 flow_group_in,
1027 match_criteria);
1028
1029 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1030 MLX5_SET(create_flow_group_in, flow_group_in,
1031 match_criteria_enable,
1032 MLX5_MATCH_MISC_PARAMETERS_2);
1033
1034 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1035 misc_parameters_2.metadata_reg_c_0);
1036 } else {
1037 MLX5_SET(create_flow_group_in, flow_group_in,
1038 match_criteria_enable,
1039 MLX5_MATCH_MISC_PARAMETERS);
1040
1041 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1042 misc_parameters.source_port);
1043 }
1044}
1045
1967ce6e
OG
1046static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1047{
1048 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1049 struct mlx5_flow_table_attr ft_attr = {};
1050 struct mlx5_core_dev *dev = esw->dev;
e52c2802 1051 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
1052 struct mlx5_flow_namespace *root_ns;
1053 struct mlx5_flow_table *fdb = NULL;
e52c2802 1054 int table_size, ix, err = 0, i;
1967ce6e 1055 struct mlx5_flow_group *g;
e52c2802 1056 u32 flags = 0, fdb_max;
1967ce6e 1057 void *match_criteria;
f80be543 1058 u8 *dmac;
1967ce6e
OG
1059
1060 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 1061 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1062 if (!flow_group_in)
1063 return -ENOMEM;
1064
1065 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1066 if (!root_ns) {
1067 esw_warn(dev, "Failed to get FDB flow namespace\n");
1068 err = -EOPNOTSUPP;
1069 goto ns_err;
1070 }
8463daf1
MG
1071 esw->fdb_table.offloads.ns = root_ns;
1072 err = mlx5_flow_namespace_set_mode(root_ns,
1073 esw->dev->priv.steering->mode);
1074 if (err) {
1075 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1076 goto ns_err;
1077 }
1967ce6e 1078
e52c2802
PB
1079 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1080 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1081 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1082
1083 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
1084 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1085 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1086 fdb_max);
1087
1088 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1089 esw->fdb_table.offloads.fdb_left[i] =
1090 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 1091
cd7e4186
BW
1092 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1093 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1094
e52c2802
PB
1095 /* create the slow path fdb with encap set, so further table instances
1096 * can be created at run time while VFs are probed if the FW allows that.
1097 */
1098 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1099 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1100 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1101
1102 ft_attr.flags = flags;
b3ba5149
ES
1103 ft_attr.max_fte = table_size;
1104 ft_attr.prio = FDB_SLOW_PATH;
1105
1106 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1107 if (IS_ERR(fdb)) {
1108 err = PTR_ERR(fdb);
1109 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1110 goto slow_fdb_err;
1111 }
52fff327 1112 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1113
e52c2802
PB
1114 /* If lazy creation isn't supported, open the fast path tables now */
1115 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1116 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1117 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1118 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1119 esw_get_prio_table(esw, 0, 1, 0);
1120 esw_get_prio_table(esw, 0, 1, 1);
1121 } else {
1122 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1123 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1124 }
1125
69697b6e 1126 /* create send-to-vport group */
69697b6e
OG
1127 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1128 MLX5_MATCH_MISC_PARAMETERS);
1129
1130 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1131
1132 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1133 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1134
cd3d07e7 1135 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1136 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1137 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1138
1139 g = mlx5_create_flow_group(fdb, flow_group_in);
1140 if (IS_ERR(g)) {
1141 err = PTR_ERR(g);
1142 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1143 goto send_vport_err;
1144 }
1145 esw->fdb_table.offloads.send_to_vport_grp = g;
1146
ac004b83
RD
1147 /* create peer esw miss group */
1148 memset(flow_group_in, 0, inlen);
ac004b83 1149
a5641cb5
JL
1150 esw_set_flow_group_source_port(esw, flow_group_in);
1151
1152 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1153 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1154 flow_group_in,
1155 match_criteria);
ac004b83 1156
a5641cb5
JL
1157 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1158 misc_parameters.source_eswitch_owner_vhca_id);
1159
1160 MLX5_SET(create_flow_group_in, flow_group_in,
1161 source_eswitch_owner_vhca_id_valid, 1);
1162 }
ac004b83 1163
ac004b83
RD
1164 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1165 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1166 ix + esw->total_vports - 1);
1167 ix += esw->total_vports;
1168
1169 g = mlx5_create_flow_group(fdb, flow_group_in);
1170 if (IS_ERR(g)) {
1171 err = PTR_ERR(g);
1172 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1173 goto peer_miss_err;
1174 }
1175 esw->fdb_table.offloads.peer_miss_grp = g;
1176
69697b6e
OG
1177 /* create miss group */
1178 memset(flow_group_in, 0, inlen);
f80be543
MB
1179 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1180 MLX5_MATCH_OUTER_HEADERS);
1181 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1182 match_criteria);
1183 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1184 outer_headers.dmac_47_16);
1185 dmac[0] = 0x01;
69697b6e
OG
1186
1187 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1188 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1189 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1190
1191 g = mlx5_create_flow_group(fdb, flow_group_in);
1192 if (IS_ERR(g)) {
1193 err = PTR_ERR(g);
1194 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1195 goto miss_err;
1196 }
1197 esw->fdb_table.offloads.miss_grp = g;
1198
3aa33572
OG
1199 err = esw_add_fdb_miss_rule(esw);
1200 if (err)
1201 goto miss_rule_err;
1202
e52c2802 1203 esw->nvports = nvports;
c88a026e 1204 kvfree(flow_group_in);
69697b6e
OG
1205 return 0;
1206
3aa33572
OG
1207miss_rule_err:
1208 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1209miss_err:
ac004b83
RD
1210 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1211peer_miss_err:
69697b6e
OG
1212 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1213send_vport_err:
e52c2802 1214 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1215 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1216slow_fdb_err:
8463daf1
MG
1217 /* Holds true only as long as DMFS is the default */
1218 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1219ns_err:
1220 kvfree(flow_group_in);
1221 return err;
1222}
1223
1967ce6e 1224static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1225{
e52c2802 1226 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1227 return;
1228
1967ce6e 1229 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1230 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1231 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1232 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1233 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1234 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1235
52fff327 1236 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1237 esw_destroy_offloads_fast_fdb_tables(esw);
8463daf1
MG
1238 /* Holds true only as long as DMFS is the default */
1239 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1240 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1241}
c116c6ee 1242
cd7e4186 1243static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1244{
b3ba5149 1245 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1246 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1247 struct mlx5_flow_table *ft_offloads;
1248 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1249 int err = 0;
1250
1251 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1252 if (!ns) {
1253 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1254 return -EOPNOTSUPP;
c116c6ee
OG
1255 }
1256
cd7e4186 1257 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1258
1259 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1260 if (IS_ERR(ft_offloads)) {
1261 err = PTR_ERR(ft_offloads);
1262 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1263 return err;
1264 }
1265
1266 esw->offloads.ft_offloads = ft_offloads;
1267 return 0;
1268}
1269
1270static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1271{
1272 struct mlx5_esw_offload *offloads = &esw->offloads;
1273
1274 mlx5_destroy_flow_table(offloads->ft_offloads);
1275}
fed9ce22 1276
cd7e4186 1277static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1278{
1279 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1280 struct mlx5_flow_group *g;
fed9ce22 1281 u32 *flow_group_in;
fed9ce22 1282 int err = 0;
fed9ce22 1283
cd7e4186 1284 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1285 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1286 if (!flow_group_in)
1287 return -ENOMEM;
1288
1289 /* create vport rx group */
a5641cb5 1290 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1291
1292 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1293 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1294
1295 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1296
1297 if (IS_ERR(g)) {
1298 err = PTR_ERR(g);
1299 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1300 goto out;
1301 }
1302
1303 esw->offloads.vport_rx_group = g;
1304out:
e574978a 1305 kvfree(flow_group_in);
fed9ce22
OG
1306 return err;
1307}
1308
1309static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1310{
1311 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1312}
1313
74491de9 1314struct mlx5_flow_handle *
02f3afd9 1315mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1316 struct mlx5_flow_destination *dest)
fed9ce22 1317{
66958ed9 1318 struct mlx5_flow_act flow_act = {0};
74491de9 1319 struct mlx5_flow_handle *flow_rule;
c5bb1730 1320 struct mlx5_flow_spec *spec;
fed9ce22
OG
1321 void *misc;
1322
1b9a07ee 1323 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1324 if (!spec) {
fed9ce22
OG
1325 flow_rule = ERR_PTR(-ENOMEM);
1326 goto out;
1327 }
1328
a5641cb5
JL
1329 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1330 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1331 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1332 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1333
a5641cb5
JL
1334 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1335 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
fed9ce22 1336
a5641cb5
JL
1337 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1338 } else {
1339 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1340 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1341
1342 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1343 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1344
1345 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1346 }
fed9ce22 1347
66958ed9 1348 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1349 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1350 &flow_act, dest, 1);
fed9ce22
OG
1351 if (IS_ERR(flow_rule)) {
1352 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1353 goto out;
1354 }
1355
1356out:
c5bb1730 1357 kvfree(spec);
fed9ce22
OG
1358 return flow_rule;
1359}
feae9087 1360
db7ff19e
EB
1361static int esw_offloads_start(struct mlx5_eswitch *esw,
1362 struct netlink_ext_ack *extack)
c930a3ad 1363{
062f4bf4 1364 int err, err1;
c930a3ad 1365
f6455de0 1366 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1367 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1368 NL_SET_ERR_MSG_MOD(extack,
1369 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1370 return -EINVAL;
1371 }
1372
f6455de0 1373 mlx5_eswitch_disable(esw);
062f4bf4
BW
1374 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1375 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1376 if (err) {
8c98ee77
EB
1377 NL_SET_ERR_MSG_MOD(extack,
1378 "Failed setting eswitch to offloads");
062f4bf4 1379 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1380 if (err1) {
1381 NL_SET_ERR_MSG_MOD(extack,
1382 "Failed setting eswitch back to legacy");
1383 }
6c419ba8 1384 }
bffaa916
RD
1385 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1386 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1387 &esw->offloads.inline_mode)) {
1388 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1389 NL_SET_ERR_MSG_MOD(extack,
1390 "Inline mode is different between vports");
bffaa916
RD
1391 }
1392 }
c930a3ad
OG
1393 return err;
1394}
1395
e8d31c4d
MB
1396void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1397{
1398 kfree(esw->offloads.vport_reps);
1399}
1400
1401int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1402{
2752b823 1403 int total_vports = esw->total_vports;
e8d31c4d 1404 struct mlx5_eswitch_rep *rep;
d6518db2 1405 int vport_index;
ef2e4094 1406 u8 rep_type;
e8d31c4d 1407
2aca1787 1408 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1409 sizeof(struct mlx5_eswitch_rep),
1410 GFP_KERNEL);
1411 if (!esw->offloads.vport_reps)
1412 return -ENOMEM;
1413
d6518db2
BW
1414 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1415 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1416 rep->vport_index = vport_index;
f121e0ea
BW
1417
1418 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1419 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1420 REP_UNREGISTERED);
e8d31c4d
MB
1421 }
1422
e8d31c4d
MB
1423 return 0;
1424}
1425
c9b99abc
BW
1426static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1427 struct mlx5_eswitch_rep *rep, u8 rep_type)
1428{
8693115a 1429 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1430 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1431 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1432}
1433
29d9fd7d 1434static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1435{
1436 struct mlx5_eswitch_rep *rep;
c9b99abc 1437
81cd229c
BW
1438 if (mlx5_ecpf_vport_exists(esw->dev)) {
1439 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1440 __esw_offloads_unload_rep(esw, rep, rep_type);
1441 }
1442
1443 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1444 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1445 __esw_offloads_unload_rep(esw, rep, rep_type);
1446 }
1447
879c8f84 1448 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1449 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1450}
1451
29d9fd7d
BW
1452static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1453 u8 rep_type)
1454{
1455 struct mlx5_eswitch_rep *rep;
1456 int i;
1457
1458 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1459 __esw_offloads_unload_rep(esw, rep, rep_type);
1460}
1461
1462static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1463{
1464 u8 rep_type = NUM_REP_TYPES;
1465
1466 while (rep_type-- > 0)
1467 __unload_reps_vf_vport(esw, nvports, rep_type);
1468}
1469
062f4bf4 1470static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
29d9fd7d 1471{
062f4bf4 1472 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
29d9fd7d
BW
1473
1474 /* Special vports must be the last to unload. */
1475 __unload_reps_special_vport(esw, rep_type);
1476}
1477
062f4bf4 1478static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1479{
1480 u8 rep_type = NUM_REP_TYPES;
1481
1482 while (rep_type-- > 0)
062f4bf4 1483 __unload_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1484}
1485
c9b99abc
BW
1486static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1487 struct mlx5_eswitch_rep *rep, u8 rep_type)
1488{
f121e0ea
BW
1489 int err = 0;
1490
8693115a 1491 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1492 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
8693115a 1493 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
6f4e0219 1494 if (err)
8693115a 1495 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219
BW
1496 REP_REGISTERED);
1497 }
f121e0ea 1498
6f4e0219 1499 return err;
c9b99abc
BW
1500}
1501
29d9fd7d 1502static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1503{
cb67b832 1504 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1505 int err;
1506
879c8f84 1507 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1508 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1509 if (err)
1510 return err;
1511
1512 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1513 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1514 err = __esw_offloads_load_rep(esw, rep, rep_type);
1515 if (err)
1516 goto err_pf;
1517 }
1518
1519 if (mlx5_ecpf_vport_exists(esw->dev)) {
1520 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1521 err = __esw_offloads_load_rep(esw, rep, rep_type);
1522 if (err)
1523 goto err_ecpf;
1524 }
1525
1526 return 0;
1527
1528err_ecpf:
1529 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1530 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1531 __esw_offloads_unload_rep(esw, rep, rep_type);
1532 }
1533
1534err_pf:
1535 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1536 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1537 return err;
1538}
6ed1803a 1539
29d9fd7d
BW
1540static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1541 u8 rep_type)
1542{
1543 struct mlx5_eswitch_rep *rep;
1544 int err, i;
1545
1546 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1547 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1548 if (err)
29d9fd7d 1549 goto err_vf;
6ed1803a
MB
1550 }
1551
1552 return 0;
1553
29d9fd7d
BW
1554err_vf:
1555 __unload_reps_vf_vport(esw, --i, rep_type);
1556 return err;
1557}
1558
062f4bf4
BW
1559static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1560{
1561 int err;
1562
1563 /* Special vports must be loaded first, uplink rep creates mdev resource. */
1564 err = __load_reps_special_vport(esw, rep_type);
1565 if (err)
1566 return err;
1567
1568 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1569 if (err)
1570 goto err_vfs;
1571
1572 return 0;
1573
1574err_vfs:
1575 __unload_reps_special_vport(esw, rep_type);
1576 return err;
1577}
1578
29d9fd7d
BW
1579static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1580{
1581 u8 rep_type = 0;
1582 int err;
1583
1584 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1585 err = __load_reps_vf_vport(esw, nvports, rep_type);
1586 if (err)
1587 goto err_reps;
1588 }
1589
1590 return err;
1591
6ed1803a 1592err_reps:
29d9fd7d
BW
1593 while (rep_type-- > 0)
1594 __unload_reps_vf_vport(esw, nvports, rep_type);
1595 return err;
1596}
1597
062f4bf4 1598static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1599{
1600 u8 rep_type = 0;
1601 int err;
1602
1603 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
062f4bf4 1604 err = __load_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1605 if (err)
1606 goto err_reps;
1607 }
1608
1609 return err;
1610
1611err_reps:
1612 while (rep_type-- > 0)
062f4bf4 1613 __unload_reps_all_vport(esw, rep_type);
6ed1803a
MB
1614 return err;
1615}
1616
ac004b83
RD
1617#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1618#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1619
1620static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1621 struct mlx5_eswitch *peer_esw)
1622{
1623 int err;
1624
1625 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1626 if (err)
1627 return err;
1628
1629 return 0;
1630}
1631
1632static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1633{
04de7dda 1634 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1635 esw_del_fdb_peer_miss_rules(esw);
1636}
1637
8463daf1
MG
1638static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1639 struct mlx5_eswitch *peer_esw,
1640 bool pair)
1641{
1642 struct mlx5_flow_root_namespace *peer_ns;
1643 struct mlx5_flow_root_namespace *ns;
1644 int err;
1645
1646 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1647 ns = esw->dev->priv.steering->fdb_root_ns;
1648
1649 if (pair) {
1650 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1651 if (err)
1652 return err;
1653
e53e6655 1654 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1655 if (err) {
1656 mlx5_flow_namespace_set_peer(ns, NULL);
1657 return err;
1658 }
1659 } else {
1660 mlx5_flow_namespace_set_peer(ns, NULL);
1661 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1662 }
1663
1664 return 0;
1665}
1666
ac004b83
RD
1667static int mlx5_esw_offloads_devcom_event(int event,
1668 void *my_data,
1669 void *event_data)
1670{
1671 struct mlx5_eswitch *esw = my_data;
ac004b83 1672 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1673 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1674 int err;
1675
1676 switch (event) {
1677 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1678 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1679 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1680 break;
1681
8463daf1 1682 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1683 if (err)
1684 goto err_out;
8463daf1
MG
1685 err = mlx5_esw_offloads_pair(esw, peer_esw);
1686 if (err)
1687 goto err_peer;
ac004b83
RD
1688
1689 err = mlx5_esw_offloads_pair(peer_esw, esw);
1690 if (err)
1691 goto err_pair;
1692
1693 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1694 break;
1695
1696 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1697 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1698 break;
1699
1700 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1701 mlx5_esw_offloads_unpair(peer_esw);
1702 mlx5_esw_offloads_unpair(esw);
8463daf1 1703 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1704 break;
1705 }
1706
1707 return 0;
1708
1709err_pair:
1710 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1711err_peer:
1712 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1713err_out:
1714 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1715 event, err);
1716 return err;
1717}
1718
1719static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1720{
1721 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1722
04de7dda
RD
1723 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1724 mutex_init(&esw->offloads.peer_mutex);
1725
ac004b83
RD
1726 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1727 return;
1728
1729 mlx5_devcom_register_component(devcom,
1730 MLX5_DEVCOM_ESW_OFFLOADS,
1731 mlx5_esw_offloads_devcom_event,
1732 esw);
1733
1734 mlx5_devcom_send_event(devcom,
1735 MLX5_DEVCOM_ESW_OFFLOADS,
1736 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1737}
1738
1739static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1740{
1741 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1742
1743 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1744 return;
1745
1746 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1747 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1748
1749 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1750}
1751
18486737
EB
1752static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1753 struct mlx5_vport *vport)
1754{
18486737
EB
1755 struct mlx5_flow_act flow_act = {0};
1756 struct mlx5_flow_spec *spec;
1757 int err = 0;
1758
1759 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1760 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1761 * required, allow
18486737
EB
1762 * Unmatched traffic is allowed by default
1763 */
1764
18486737
EB
1765 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1766 if (!spec) {
1767 err = -ENOMEM;
1768 goto out_no_mem;
1769 }
1770
1771 /* Untagged packets - push prio tag VLAN, allow */
1772 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1773 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1774 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1775 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1776 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1777 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1778 flow_act.vlan[0].vid = 0;
1779 flow_act.vlan[0].prio = 0;
7445cfb1 1780
d68316b5 1781 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1782 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1783 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1784 }
1785
18486737
EB
1786 vport->ingress.allow_rule =
1787 mlx5_add_flow_rules(vport->ingress.acl, spec,
1788 &flow_act, NULL, 0);
1789 if (IS_ERR(vport->ingress.allow_rule)) {
1790 err = PTR_ERR(vport->ingress.allow_rule);
1791 esw_warn(esw->dev,
1792 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1793 vport->vport, err);
1794 vport->ingress.allow_rule = NULL;
1795 goto out;
1796 }
1797
1798out:
1799 kvfree(spec);
1800out_no_mem:
1801 if (err)
1802 esw_vport_cleanup_ingress_rules(esw, vport);
1803 return err;
1804}
1805
7445cfb1
JL
1806static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1807 struct mlx5_vport *vport)
1808{
1809 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
9446d17e 1810 static const struct mlx5_flow_spec spec = {};
7445cfb1 1811 struct mlx5_flow_act flow_act = {};
7445cfb1
JL
1812 int err = 0;
1813
1814 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1815 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1816 MLX5_SET(set_action_in, action, data,
1817 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1818
d68316b5 1819 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1820 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1821 1, action);
d68316b5
PP
1822 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1823 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1824 esw_warn(esw->dev,
1825 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1826 vport->vport, err);
1827 return err;
1828 }
1829
1830 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1831 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1832 vport->ingress.offloads.modify_metadata_rule =
1833 mlx5_add_flow_rules(vport->ingress.acl,
1834 &spec, &flow_act, NULL, 0);
1835 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1836 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1837 esw_warn(esw->dev,
1838 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1839 vport->vport, err);
d68316b5 1840 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1841 goto out;
1842 }
1843
1844out:
1845 if (err)
d68316b5 1846 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1847 return err;
1848}
1849
a962d7a6
PP
1850static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1851 struct mlx5_vport *vport)
7445cfb1 1852{
d68316b5
PP
1853 if (vport->ingress.offloads.modify_metadata_rule) {
1854 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1855 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1856
d68316b5 1857 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1858 }
1859}
1860
b1a3380a
VP
1861static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1862 struct mlx5_vport *vport)
18486737 1863{
18486737
EB
1864 int err;
1865
7445cfb1
JL
1866 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1867 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1868 return 0;
1869
1870 esw_vport_cleanup_ingress_rules(esw, vport);
1871
1872 err = esw_vport_enable_ingress_acl(esw, vport);
1873 if (err) {
1874 esw_warn(esw->dev,
1875 "failed to enable ingress acl (%d) on vport[%d]\n",
1876 err, vport->vport);
1877 return err;
1878 }
1879
1880 esw_debug(esw->dev,
1881 "vport[%d] configure ingress rules\n", vport->vport);
1882
1883 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1884 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1885 if (err)
1886 goto out;
1887 }
1888
1889 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1890 mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
786ef904 1891 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 1892 if (err)
7445cfb1
JL
1893 goto out;
1894 }
1895
1896out:
1897 if (err)
1898 esw_vport_disable_ingress_acl(esw, vport);
1899 return err;
1900}
1901
6d94e610
VP
1902static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1903 struct mlx5_vport *vport)
1904{
1905 int err;
1906
1907 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1908 return 0;
1909
1910 esw_vport_cleanup_egress_rules(esw, vport);
1911
1912 err = esw_vport_enable_egress_acl(esw, vport);
1913 if (err)
1914 return err;
1915
fdde49e0
PP
1916 /* For prio tag mode, there is only 1 FTEs:
1917 * 1) prio tag packets - pop the prio tag VLAN, allow
1918 * Unmatched traffic is allowed by default
1919 */
1920 esw_debug(esw->dev,
1921 "vport[%d] configure prio tag egress rules\n", vport->vport);
6d94e610 1922
fdde49e0
PP
1923 /* prio tag vlan rule - pop it so VF receives untagged packets */
1924 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
1925 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1926 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
6d94e610
VP
1927 if (err)
1928 esw_vport_disable_egress_acl(esw, vport);
1929
1930 return err;
1931}
1932
92ab1eb3
JL
1933static bool
1934esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1935{
1936 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1937 return false;
1938
1939 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1940 MLX5_FDB_TO_VPORT_REG_C_0))
1941 return false;
1942
1943 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1944 return false;
1945
1946 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1947 mlx5_ecpf_vport_exists(esw->dev))
1948 return false;
1949
1950 return true;
1951}
1952
748da30b 1953int
89a0f1fb
PP
1954esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
1955 struct mlx5_vport *vport)
1956{
1957 int err;
1958
1959 err = esw_vport_ingress_config(esw, vport);
1960 if (err)
1961 return err;
1962
1963 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1964 err = esw_vport_egress_config(esw, vport);
a962d7a6
PP
1965 if (err) {
1966 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
89a0f1fb 1967 esw_vport_disable_ingress_acl(esw, vport);
a962d7a6 1968 }
89a0f1fb
PP
1969 }
1970 return err;
1971}
1972
748da30b 1973void
89a0f1fb
PP
1974esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
1975 struct mlx5_vport *vport)
1976{
1977 esw_vport_disable_egress_acl(esw, vport);
a962d7a6 1978 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
89a0f1fb
PP
1979 esw_vport_disable_ingress_acl(esw, vport);
1980}
1981
748da30b 1982static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
1983{
1984 struct mlx5_vport *vport;
7445cfb1
JL
1985 int err;
1986
92ab1eb3
JL
1987 if (esw_check_vport_match_metadata_supported(esw))
1988 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1989
748da30b
VP
1990 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1991 err = esw_vport_create_offloads_acl_tables(esw, vport);
1992 if (err)
1993 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1994 return err;
1995}
1996
748da30b 1997static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 1998{
786ef904 1999 struct mlx5_vport *vport;
7445cfb1 2000
748da30b
VP
2001 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2002 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 2003 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2004}
2005
062f4bf4 2006static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2007{
062f4bf4
BW
2008 int num_vfs = esw->esw_funcs.num_vfs;
2009 int total_vports;
6ed1803a
MB
2010 int err;
2011
062f4bf4
BW
2012 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2013 total_vports = esw->total_vports;
2014 else
2015 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2016
5c1d260e 2017 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
2018 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2019
748da30b 2020 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1
JL
2021 if (err)
2022 return err;
18486737 2023
062f4bf4 2024 err = esw_create_offloads_fdb_tables(esw, total_vports);
c930a3ad 2025 if (err)
7445cfb1 2026 goto create_fdb_err;
c930a3ad 2027
062f4bf4 2028 err = esw_create_offloads_table(esw, total_vports);
c930a3ad
OG
2029 if (err)
2030 goto create_ft_err;
2031
062f4bf4 2032 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2033 if (err)
2034 goto create_fg_err;
2035
2036 return 0;
2037
2038create_fg_err:
2039 esw_destroy_offloads_table(esw);
2040
2041create_ft_err:
1967ce6e 2042 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 2043
7445cfb1 2044create_fdb_err:
748da30b 2045 esw_destroy_uplink_offloads_acl_tables(esw);
7445cfb1 2046
c930a3ad
OG
2047 return err;
2048}
2049
eca8cc38
BW
2050static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2051{
2052 esw_destroy_vport_rx_group(esw);
2053 esw_destroy_offloads_table(esw);
2054 esw_destroy_offloads_fdb_tables(esw);
748da30b 2055 esw_destroy_uplink_offloads_acl_tables(esw);
eca8cc38
BW
2056}
2057
7e736f9a
PP
2058static void
2059esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2060{
5ccf2770 2061 bool host_pf_disabled;
7e736f9a 2062 u16 new_num_vfs;
a3888f33 2063
7e736f9a
PP
2064 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2065 host_params_context.host_num_of_vfs);
5ccf2770
BW
2066 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2067 host_params_context.host_pf_disabled);
a3888f33 2068
7e736f9a
PP
2069 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2070 return;
a3888f33
BW
2071
2072 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929
VP
2073 if (esw->esw_funcs.num_vfs > 0) {
2074 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
a3888f33 2075 } else {
7e736f9a 2076 int err;
a3888f33 2077
7e736f9a 2078 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
a3888f33 2079 if (err)
7e736f9a 2080 return;
a3888f33 2081 }
7e736f9a 2082 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2083}
2084
7e736f9a 2085static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2086{
7e736f9a
PP
2087 struct mlx5_host_work *host_work;
2088 struct mlx5_eswitch *esw;
dd28087c 2089 const u32 *out;
ac35dcd6 2090
7e736f9a
PP
2091 host_work = container_of(work, struct mlx5_host_work, work);
2092 esw = host_work->esw;
a3888f33 2093
dd28087c
PP
2094 out = mlx5_esw_query_functions(esw->dev);
2095 if (IS_ERR(out))
7e736f9a 2096 goto out;
a3888f33 2097
7e736f9a 2098 esw_vfs_changed_event_handler(esw, out);
dd28087c 2099 kvfree(out);
a3888f33 2100out:
ac35dcd6
VP
2101 kfree(host_work);
2102}
2103
16fff98a 2104int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2105{
cd56f929 2106 struct mlx5_esw_functions *esw_funcs;
a3888f33 2107 struct mlx5_host_work *host_work;
a3888f33
BW
2108 struct mlx5_eswitch *esw;
2109
2110 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2111 if (!host_work)
2112 return NOTIFY_DONE;
2113
cd56f929
VP
2114 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2115 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2116
2117 host_work->esw = esw;
2118
062f4bf4 2119 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2120 queue_work(esw->work_queue, &host_work->work);
2121
2122 return NOTIFY_OK;
2123}
2124
5896b972 2125int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38
BW
2126{
2127 int err;
2128
9a64144d
MG
2129 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2130 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2131 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2132 else
2133 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2134
8463daf1 2135 mlx5_rdma_enable_roce(esw->dev);
062f4bf4 2136 err = esw_offloads_steering_init(esw);
eca8cc38 2137 if (err)
8463daf1 2138 goto err_steering_init;
eca8cc38 2139
332bd3a5
PP
2140 err = esw_set_passing_vport_metadata(esw, true);
2141 if (err)
2142 goto err_vport_metadata;
c1286050 2143
925a6acc
PP
2144 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2145 if (err)
2146 goto err_vports;
c1286050 2147
062f4bf4 2148 err = esw_offloads_load_all_reps(esw);
eca8cc38
BW
2149 if (err)
2150 goto err_reps;
2151
2152 esw_offloads_devcom_init(esw);
10caabda 2153 mutex_init(&esw->offloads.termtbl_mutex);
a3888f33 2154
eca8cc38
BW
2155 return 0;
2156
2157err_reps:
5896b972 2158 mlx5_eswitch_disable_pf_vf_vports(esw);
925a6acc 2159err_vports:
332bd3a5 2160 esw_set_passing_vport_metadata(esw, false);
c1286050 2161err_vport_metadata:
eca8cc38 2162 esw_offloads_steering_cleanup(esw);
8463daf1
MG
2163err_steering_init:
2164 mlx5_rdma_disable_roce(esw->dev);
eca8cc38
BW
2165 return err;
2166}
2167
db7ff19e
EB
2168static int esw_offloads_stop(struct mlx5_eswitch *esw,
2169 struct netlink_ext_ack *extack)
c930a3ad 2170{
062f4bf4 2171 int err, err1;
c930a3ad 2172
f6455de0 2173 mlx5_eswitch_disable(esw);
062f4bf4 2174 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2175 if (err) {
8c98ee77 2176 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2177 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2178 if (err1) {
2179 NL_SET_ERR_MSG_MOD(extack,
2180 "Failed setting eswitch back to offloads");
2181 }
6c419ba8 2182 }
c930a3ad
OG
2183
2184 return err;
2185}
2186
5896b972 2187void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2188{
ac004b83 2189 esw_offloads_devcom_cleanup(esw);
062f4bf4 2190 esw_offloads_unload_all_reps(esw);
5896b972 2191 mlx5_eswitch_disable_pf_vf_vports(esw);
332bd3a5 2192 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2193 esw_offloads_steering_cleanup(esw);
8463daf1 2194 mlx5_rdma_disable_roce(esw->dev);
9a64144d 2195 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2196}
2197
ef78618b 2198static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2199{
2200 switch (mode) {
2201 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2202 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2203 break;
2204 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2205 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2206 break;
2207 default:
2208 return -EINVAL;
2209 }
2210
2211 return 0;
2212}
2213
ef78618b
OG
2214static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2215{
2216 switch (mlx5_mode) {
f6455de0 2217 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2218 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2219 break;
f6455de0 2220 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2221 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2222 break;
2223 default:
2224 return -EINVAL;
2225 }
2226
2227 return 0;
2228}
2229
bffaa916
RD
2230static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2231{
2232 switch (mode) {
2233 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2234 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2235 break;
2236 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2237 *mlx5_mode = MLX5_INLINE_MODE_L2;
2238 break;
2239 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2240 *mlx5_mode = MLX5_INLINE_MODE_IP;
2241 break;
2242 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2243 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2244 break;
2245 default:
2246 return -EINVAL;
2247 }
2248
2249 return 0;
2250}
2251
2252static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2253{
2254 switch (mlx5_mode) {
2255 case MLX5_INLINE_MODE_NONE:
2256 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2257 break;
2258 case MLX5_INLINE_MODE_L2:
2259 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2260 break;
2261 case MLX5_INLINE_MODE_IP:
2262 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2263 break;
2264 case MLX5_INLINE_MODE_TCP_UDP:
2265 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2266 break;
2267 default:
2268 return -EINVAL;
2269 }
2270
2271 return 0;
2272}
2273
9d1cef19 2274static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2275{
9d1cef19 2276 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2277
9d1cef19
OG
2278 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2279 return -EOPNOTSUPP;
c930a3ad 2280
733d3e54
OG
2281 if(!MLX5_ESWITCH_MANAGER(dev))
2282 return -EPERM;
c930a3ad 2283
f6455de0 2284 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2285 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2286 return -EOPNOTSUPP;
2287
9d1cef19
OG
2288 return 0;
2289}
2290
db7ff19e
EB
2291int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2292 struct netlink_ext_ack *extack)
9d1cef19
OG
2293{
2294 struct mlx5_core_dev *dev = devlink_priv(devlink);
2295 u16 cur_mlx5_mode, mlx5_mode = 0;
2296 int err;
2297
2298 err = mlx5_devlink_eswitch_check(devlink);
2299 if (err)
2300 return err;
2301
2302 cur_mlx5_mode = dev->priv.eswitch->mode;
2303
ef78618b 2304 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2305 return -EINVAL;
2306
2307 if (cur_mlx5_mode == mlx5_mode)
2308 return 0;
2309
2310 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2311 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2312 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2313 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2314 else
2315 return -EINVAL;
feae9087
OG
2316}
2317
2318int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2319{
9d1cef19
OG
2320 struct mlx5_core_dev *dev = devlink_priv(devlink);
2321 int err;
c930a3ad 2322
9d1cef19
OG
2323 err = mlx5_devlink_eswitch_check(devlink);
2324 if (err)
2325 return err;
c930a3ad 2326
ef78618b 2327 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2328}
127ea380 2329
db7ff19e
EB
2330int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2331 struct netlink_ext_ack *extack)
bffaa916
RD
2332{
2333 struct mlx5_core_dev *dev = devlink_priv(devlink);
2334 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2335 int err, vport, num_vport;
bffaa916
RD
2336 u8 mlx5_mode;
2337
9d1cef19
OG
2338 err = mlx5_devlink_eswitch_check(devlink);
2339 if (err)
2340 return err;
bffaa916 2341
c415f704
OG
2342 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2343 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2344 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2345 return 0;
2346 /* fall through */
2347 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2348 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2349 return -EOPNOTSUPP;
c415f704
OG
2350 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2351 break;
2352 }
bffaa916 2353
525e84be 2354 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2355 NL_SET_ERR_MSG_MOD(extack,
2356 "Can't set inline mode when flows are configured");
375f51e2
RD
2357 return -EOPNOTSUPP;
2358 }
2359
bffaa916
RD
2360 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2361 if (err)
2362 goto out;
2363
411ec9e0 2364 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2365 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2366 if (err) {
8c98ee77
EB
2367 NL_SET_ERR_MSG_MOD(extack,
2368 "Failed to set min inline on vport");
bffaa916
RD
2369 goto revert_inline_mode;
2370 }
2371 }
2372
2373 esw->offloads.inline_mode = mlx5_mode;
2374 return 0;
2375
2376revert_inline_mode:
db68cc56 2377 num_vport = --vport;
411ec9e0 2378 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2379 mlx5_modify_nic_vport_min_inline(dev,
2380 vport,
2381 esw->offloads.inline_mode);
2382out:
2383 return err;
2384}
2385
2386int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2387{
2388 struct mlx5_core_dev *dev = devlink_priv(devlink);
2389 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2390 int err;
bffaa916 2391
9d1cef19
OG
2392 err = mlx5_devlink_eswitch_check(devlink);
2393 if (err)
2394 return err;
bffaa916 2395
bffaa916
RD
2396 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2397}
2398
062f4bf4 2399int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
bffaa916 2400{
c415f704 2401 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2402 struct mlx5_core_dev *dev = esw->dev;
2403 int vport;
bffaa916
RD
2404
2405 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2406 return -EOPNOTSUPP;
2407
f6455de0 2408 if (esw->mode == MLX5_ESWITCH_NONE)
bffaa916
RD
2409 return -EOPNOTSUPP;
2410
c415f704
OG
2411 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2412 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2413 mlx5_mode = MLX5_INLINE_MODE_NONE;
2414 goto out;
2415 case MLX5_CAP_INLINE_MODE_L2:
2416 mlx5_mode = MLX5_INLINE_MODE_L2;
2417 goto out;
2418 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2419 goto query_vports;
2420 }
bffaa916 2421
c415f704 2422query_vports:
411ec9e0
BW
2423 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2424 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916 2425 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
411ec9e0 2426 if (prev_mlx5_mode != mlx5_mode)
bffaa916
RD
2427 return -EINVAL;
2428 prev_mlx5_mode = mlx5_mode;
2429 }
2430
c415f704 2431out:
bffaa916
RD
2432 *mode = mlx5_mode;
2433 return 0;
2434}
2435
98fdbea5
LR
2436int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2437 enum devlink_eswitch_encap_mode encap,
db7ff19e 2438 struct netlink_ext_ack *extack)
7768d197
RD
2439{
2440 struct mlx5_core_dev *dev = devlink_priv(devlink);
2441 struct mlx5_eswitch *esw = dev->priv.eswitch;
2442 int err;
2443
9d1cef19
OG
2444 err = mlx5_devlink_eswitch_check(devlink);
2445 if (err)
2446 return err;
7768d197
RD
2447
2448 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2449 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2450 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2451 return -EOPNOTSUPP;
2452
2453 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2454 return -EOPNOTSUPP;
2455
f6455de0 2456 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2457 esw->offloads.encap = encap;
2458 return 0;
2459 }
2460
2461 if (esw->offloads.encap == encap)
2462 return 0;
2463
525e84be 2464 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2465 NL_SET_ERR_MSG_MOD(extack,
2466 "Can't set encapsulation when flows are configured");
7768d197
RD
2467 return -EOPNOTSUPP;
2468 }
2469
e52c2802 2470 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2471
2472 esw->offloads.encap = encap;
e52c2802
PB
2473
2474 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2475
7768d197 2476 if (err) {
8c98ee77
EB
2477 NL_SET_ERR_MSG_MOD(extack,
2478 "Failed re-creating fast FDB table");
7768d197 2479 esw->offloads.encap = !encap;
e52c2802 2480 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2481 }
e52c2802 2482
7768d197
RD
2483 return err;
2484}
2485
98fdbea5
LR
2486int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2487 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2488{
2489 struct mlx5_core_dev *dev = devlink_priv(devlink);
2490 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2491 int err;
7768d197 2492
9d1cef19
OG
2493 err = mlx5_devlink_eswitch_check(devlink);
2494 if (err)
2495 return err;
7768d197
RD
2496
2497 *encap = esw->offloads.encap;
2498 return 0;
2499}
2500
f8e8fa02 2501void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2502 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2503 u8 rep_type)
127ea380 2504{
8693115a 2505 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2506 struct mlx5_eswitch_rep *rep;
2507 int i;
9deb2241 2508
8693115a 2509 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2510 mlx5_esw_for_all_reps(esw, i, rep) {
8693115a
PP
2511 rep_data = &rep->rep_data[rep_type];
2512 atomic_set(&rep_data->state, REP_REGISTERED);
f8e8fa02 2513 }
127ea380 2514}
f8e8fa02 2515EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2516
f8e8fa02 2517void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2518{
cb67b832 2519 struct mlx5_eswitch_rep *rep;
f8e8fa02 2520 int i;
cb67b832 2521
f6455de0 2522 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2523 __unload_reps_all_vport(esw, rep_type);
127ea380 2524
f8e8fa02 2525 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2526 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2527}
f8e8fa02 2528EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2529
a4b97ab4 2530void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2531{
726293f1
HHZ
2532 struct mlx5_eswitch_rep *rep;
2533
879c8f84 2534 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2535 return rep->rep_data[rep_type].priv;
726293f1 2536}
22215908
MB
2537
2538void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2539 u16 vport,
22215908
MB
2540 u8 rep_type)
2541{
22215908
MB
2542 struct mlx5_eswitch_rep *rep;
2543
879c8f84 2544 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2545
8693115a
PP
2546 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2547 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2548 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2549 return NULL;
2550}
57cbd893 2551EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2552
2553void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2554{
879c8f84 2555 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2556}
57cbd893
MB
2557EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2558
2559struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2560 u16 vport)
57cbd893 2561{
879c8f84 2562 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2563}
2564EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2565
2566bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2567{
2568 return vport_num >= MLX5_VPORT_FIRST_VF &&
2569 vport_num <= esw->dev->priv.sriov.max_vfs;
2570}
7445cfb1
JL
2571
2572bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2573{
2574 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2575}
2576EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2577
2578u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2579 u16 vport_num)
2580{
2581 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2582}
2583EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);